summaryrefslogtreecommitdiff
path: root/drivers/net
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net')
-rw-r--r--drivers/net/bonding/bond_3ad.c62
-rw-r--r--drivers/net/bonding/bond_alb.c153
-rw-r--r--drivers/net/bonding/bond_alb.h1
-rw-r--r--drivers/net/bonding/bond_debugfs.c2
-rw-r--r--drivers/net/bonding/bond_main.c218
-rw-r--r--drivers/net/bonding/bond_netlink.c8
-rw-r--r--drivers/net/bonding/bond_options.c66
-rw-r--r--drivers/net/bonding/bond_options.h2
-rw-r--r--drivers/net/bonding/bond_procfs.c16
-rw-r--r--drivers/net/bonding/bond_sysfs.c567
-rw-r--r--drivers/net/bonding/bond_sysfs_slave.c4
-rw-r--r--drivers/net/bonding/bonding.h143
-rw-r--r--drivers/net/can/Kconfig30
-rw-r--r--drivers/net/can/Makefile4
-rw-r--r--drivers/net/can/c_can/c_can.c15
-rw-r--r--drivers/net/can/c_can/c_can.h8
-rw-r--r--drivers/net/can/c_can/c_can_pci.c78
-rw-r--r--drivers/net/can/c_can/c_can_platform.c84
-rw-r--r--drivers/net/can/mscan/Kconfig2
-rw-r--r--drivers/net/can/rcar_can.c876
-rw-r--r--drivers/net/can/softing/softing_main.c20
-rw-r--r--drivers/net/can/spi/Kconfig10
-rw-r--r--drivers/net/can/spi/Makefile8
-rw-r--r--drivers/net/can/spi/mcp251x.c (renamed from drivers/net/can/mcp251x.c)95
-rw-r--r--drivers/net/can/usb/Kconfig12
-rw-r--r--drivers/net/can/usb/Makefile1
-rw-r--r--drivers/net/can/usb/gs_usb.c971
-rw-r--r--drivers/net/can/usb/kvaser_usb.c53
-rw-r--r--drivers/net/can/xilinx_can.c1208
-rw-r--r--drivers/net/dsa/mv88e6123_61_65.c2
-rw-r--r--drivers/net/dsa/mv88e6131.c4
-rw-r--r--drivers/net/dsa/mv88e6xxx.c12
-rw-r--r--drivers/net/ethernet/3com/3c509.c2
-rw-r--r--drivers/net/ethernet/3com/3c589_cs.c2
-rw-r--r--drivers/net/ethernet/3com/typhoon.c2
-rw-r--r--drivers/net/ethernet/8390/ax88796.c4
-rw-r--r--drivers/net/ethernet/Kconfig1
-rw-r--r--drivers/net/ethernet/Makefile1
-rw-r--r--drivers/net/ethernet/adaptec/starfire.c2
-rw-r--r--drivers/net/ethernet/alteon/acenic.c2
-rw-r--r--drivers/net/ethernet/altera/altera_sgdma.c1
-rw-r--r--drivers/net/ethernet/altera/altera_tse_ethtool.c2
-rw-r--r--drivers/net/ethernet/amd/Kconfig14
-rw-r--r--drivers/net/ethernet/amd/Makefile1
-rw-r--r--drivers/net/ethernet/amd/amd8111e.c2
-rw-r--r--drivers/net/ethernet/amd/ariadne.c3
-rw-r--r--drivers/net/ethernet/amd/au1000_eth.c2
-rw-r--r--drivers/net/ethernet/amd/hplance.c4
-rw-r--r--drivers/net/ethernet/amd/mvme147.c6
-rw-r--r--drivers/net/ethernet/amd/nmclan_cs.c2
-rw-r--r--drivers/net/ethernet/amd/xgbe/Makefile6
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-common.h1007
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c375
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-desc.c556
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-dev.c2182
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-drv.c1351
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c510
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-main.c512
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-mdio.c433
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe.h676
-rw-r--r--drivers/net/ethernet/arc/emac_main.c49
-rw-r--r--drivers/net/ethernet/atheros/alx/main.c2
-rw-r--r--drivers/net/ethernet/atheros/atl1c/atl1c_ethtool.c6
-rw-r--r--drivers/net/ethernet/atheros/atl1e/atl1e_ethtool.c6
-rw-r--r--drivers/net/ethernet/atheros/atlx/atl1.c4
-rw-r--r--drivers/net/ethernet/atheros/atlx/atl2.c6
-rw-r--r--drivers/net/ethernet/broadcom/Kconfig11
-rw-r--r--drivers/net/ethernet/broadcom/Makefile1
-rw-r--r--drivers/net/ethernet/broadcom/b44.c2
-rw-r--r--drivers/net/ethernet/broadcom/bcm63xx_enet.c7
-rw-r--r--drivers/net/ethernet/broadcom/bcmsysport.c1654
-rw-r--r--drivers/net/ethernet/broadcom/bcmsysport.h678
-rw-r--r--drivers/net/ethernet/broadcom/bgmac.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2.c4
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x.h2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c14
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.h2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c23
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_file_hdr.h4
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_init.h4
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_init_ops.h4
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c36
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c49
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c15
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h8
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c6
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.h4
-rw-r--r--drivers/net/ethernet/broadcom/cnic.c29
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.c2
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmmii.c21
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c104
-rw-r--r--drivers/net/ethernet/broadcom/tg3.h2
-rw-r--r--drivers/net/ethernet/brocade/bna/bnad_ethtool.c6
-rw-r--r--drivers/net/ethernet/calxeda/xgmac.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/cxgb2.c6
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c6
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4.h14
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c263
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h7
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/sge.c11
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_hw.h1
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_msg.h10
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/sge.c7
-rw-r--r--drivers/net/ethernet/cisco/enic/enic.h32
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_dev.c4
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_dev.h4
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_ethtool.c67
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_main.c323
-rw-r--r--drivers/net/ethernet/cisco/enic/vnic_cq.h9
-rw-r--r--drivers/net/ethernet/cisco/enic/vnic_dev.c4
-rw-r--r--drivers/net/ethernet/cisco/enic/vnic_dev.h4
-rw-r--r--drivers/net/ethernet/davicom/dm9000.c66
-rw-r--r--drivers/net/ethernet/dec/tulip/tulip_core.c2
-rw-r--r--drivers/net/ethernet/dec/tulip/uli526x.c4
-rw-r--r--drivers/net/ethernet/dlink/dl2k.c6
-rw-r--r--drivers/net/ethernet/dlink/sundance.c2
-rw-r--r--drivers/net/ethernet/ec_bhf.c20
-rw-r--r--drivers/net/ethernet/emulex/benet/be.h15
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.c610
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.h85
-rw-r--r--drivers/net/ethernet/emulex/benet/be_ethtool.c194
-rw-r--r--drivers/net/ethernet/emulex/benet/be_hw.h12
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c581
-rw-r--r--drivers/net/ethernet/ethoc.c6
-rw-r--r--drivers/net/ethernet/faraday/ftgmac100.c2
-rw-r--r--drivers/net/ethernet/faraday/ftmac100.c2
-rw-r--r--drivers/net/ethernet/freescale/Kconfig1
-rw-r--r--drivers/net/ethernet/freescale/fec.h13
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c661
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c17
-rw-r--r--drivers/net/ethernet/freescale/gianfar.c22
-rw-r--r--drivers/net/ethernet/freescale/ucc_geth.c14
-rw-r--r--drivers/net/ethernet/freescale/ucc_geth_ethtool.c2
-rw-r--r--drivers/net/ethernet/freescale/xgmac_mdio.c4
-rw-r--r--drivers/net/ethernet/fujitsu/fmvj18x_cs.c2
-rw-r--r--drivers/net/ethernet/hisilicon/Kconfig27
-rw-r--r--drivers/net/ethernet/hisilicon/Makefile5
-rw-r--r--drivers/net/ethernet/hisilicon/hix5hd2_gmac.c1066
-rw-r--r--drivers/net/ethernet/ibm/ehea/ehea_ethtool.c6
-rw-r--r--drivers/net/ethernet/ibm/ehea/ehea_main.c5
-rw-r--r--drivers/net/ethernet/ibm/ehea/ehea_qmr.c4
-rw-r--r--drivers/net/ethernet/ibm/emac/core.c2
-rw-r--r--drivers/net/ethernet/icplus/ipg.c2
-rw-r--r--drivers/net/ethernet/intel/e100.c2
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_ethtool.c9
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_hw.c4
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_main.c5
-rw-r--r--drivers/net/ethernet/intel/e1000e/80003es2lan.c1
-rw-r--r--drivers/net/ethernet/intel/e1000e/82571.c1
-rw-r--r--drivers/net/ethernet/intel/e1000e/e1000.h41
-rw-r--r--drivers/net/ethernet/intel/e1000e/ethtool.c17
-rw-r--r--drivers/net/ethernet/intel/e1000e/hw.h3
-rw-r--r--drivers/net/ethernet/intel/e1000e/ich8lan.c62
-rw-r--r--drivers/net/ethernet/intel/e1000e/mac.c9
-rw-r--r--drivers/net/ethernet/intel/e1000e/mac.h3
-rw-r--r--drivers/net/ethernet/intel/e1000e/netdev.c88
-rw-r--r--drivers/net/ethernet/intel/e1000e/nvm.c1
-rw-r--r--drivers/net/ethernet/intel/e1000e/param.c4
-rw-r--r--drivers/net/ethernet/intel/e1000e/phy.c1
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e.h35
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_adminq.c60
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_adminq.h1
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h137
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_common.c141
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c6
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_debugfs.c21
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_diag.c50
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ethtool.c311
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_hmc.c7
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_hmc.h7
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c12
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_lan_hmc.h1
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c1124
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_prototype.h12
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ptp.c37
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_register.h12
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.c131
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.h18
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_type.h53
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl.h4
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c209
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h6
-rw-r--r--drivers/net/ethernet/intel/i40evf/Makefile5
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_adminq.c61
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_adminq.h6
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h164
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_alloc.h5
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_common.c13
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_hmc.h12
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_lan_hmc.h6
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_osdep.h5
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_prototype.h5
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_register.h17
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_status.h5
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_txrx.c84
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_txrx.h23
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_type.h67
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_virtchnl.h9
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40evf.h7
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c423
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40evf_main.c185
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c62
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_82575.c121
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_82575.h72
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_defines.h100
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_hw.h96
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_i210.c134
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_i210.h47
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_mac.c56
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_mac.h47
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_mbx.c47
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_mbx.h47
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_nvm.c48
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_nvm.h49
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_phy.c66
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_phy.h48
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_regs.h58
-rw-r--r--drivers/net/ethernet/intel/igb/igb.h49
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ethtool.c151
-rw-r--r--drivers/net/ethernet/intel/igb/igb_hwmon.c47
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c203
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ptp.c60
-rw-r--r--drivers/net/ethernet/intel/igbvf/ethtool.c7
-rw-r--r--drivers/net/ethernet/intel/ixgb/ixgb_ethtool.c6
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe.h21
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c89
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c85
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_common.c80
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_common.h31
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c3
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.c2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.c2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.h24
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c28
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_debugfs.c3
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c96
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.h2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c4
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c356
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c10
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h6
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c68
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h32
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c164
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c66
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h5
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_type.h62
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c15
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ethtool.c6
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c2
-rw-r--r--drivers/net/ethernet/marvell/mv643xx_eth.c376
-rw-r--r--drivers/net/ethernet/marvell/mvmdio.c18
-rw-r--r--drivers/net/ethernet/marvell/mvneta.c324
-rw-r--r--drivers/net/ethernet/marvell/pxa168_eth.c2
-rw-r--r--drivers/net/ethernet/marvell/sky2.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/cmd.c109
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/cq.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_cq.c15
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_ethtool.c25
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_main.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_netdev.c57
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_rx.c28
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_tx.c14
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/eq.c147
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/fw.c24
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/main.c278
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mcg.c18
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4.h17
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4_en.h41
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mr.c16
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/port.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/profile.c13
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/qp.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/reset.c24
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/resource_tracker.c124
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/cmd.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eq.c9
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c14
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h30
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/mr.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c15
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/qp.c4
-rw-r--r--drivers/net/ethernet/micrel/ks8695net.c6
-rw-r--r--drivers/net/ethernet/micrel/ks8851.c87
-rw-r--r--drivers/net/ethernet/micrel/ksz884x.c5
-rw-r--r--drivers/net/ethernet/microchip/enc28j60.c2
-rw-r--r--drivers/net/ethernet/myricom/myri10ge/myri10ge.c2
-rw-r--r--drivers/net/ethernet/natsemi/natsemi.c2
-rw-r--r--drivers/net/ethernet/natsemi/ns83820.c2
-rw-r--r--drivers/net/ethernet/neterion/s2io.c15
-rw-r--r--drivers/net/ethernet/neterion/vxge/vxge-config.c22
-rw-r--r--drivers/net/ethernet/neterion/vxge/vxge-ethtool.c6
-rw-r--r--drivers/net/ethernet/neterion/vxge/vxge-main.c4
-rw-r--r--drivers/net/ethernet/nvidia/forcedeth.c16
-rw-r--r--drivers/net/ethernet/nxp/lpc_eth.c8
-rw-r--r--drivers/net/ethernet/oki-semi/pch_gbe/Kconfig2
-rw-r--r--drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c4
-rw-r--r--drivers/net/ethernet/packetengines/hamachi.c6
-rw-r--r--drivers/net/ethernet/packetengines/yellowfin.c2
-rw-r--r--drivers/net/ethernet/qlogic/Kconfig11
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c2
-rw-r--r--drivers/net/ethernet/qlogic/qla3xxx.c2
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic.h36
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c44
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h5
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c14
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c13
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c9
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c42
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c31
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c92
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c69
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov.h6
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c192
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c171
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c68
-rw-r--r--drivers/net/ethernet/qlogic/qlge/qlge_main.c4
-rw-r--r--drivers/net/ethernet/realtek/r8169.c2
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.c47
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.h2
-rw-r--r--drivers/net/ethernet/samsung/sxgbe/sxgbe_ethtool.c2
-rw-r--r--drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c50
-rw-r--r--drivers/net/ethernet/samsung/sxgbe/sxgbe_reg.h1
-rw-r--r--drivers/net/ethernet/sfc/efx.c2
-rw-r--r--drivers/net/ethernet/sfc/ethtool.c10
-rw-r--r--drivers/net/ethernet/sfc/io.h7
-rw-r--r--drivers/net/ethernet/sfc/siena_sriov.c3
-rw-r--r--drivers/net/ethernet/sfc/tx.c22
-rw-r--r--drivers/net/ethernet/sis/sis190.c2
-rw-r--r--drivers/net/ethernet/smsc/smc91c92_cs.c2
-rw-r--r--drivers/net/ethernet/smsc/smsc911x.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c10
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c26
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c7
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c17
-rw-r--r--drivers/net/ethernet/tehuti/tehuti.c2
-rw-r--r--drivers/net/ethernet/ti/cpmac.c14
-rw-r--r--drivers/net/ethernet/ti/cpsw-phy-sel.c62
-rw-r--r--drivers/net/ethernet/ti/cpsw.c114
-rw-r--r--drivers/net/ethernet/ti/cpts.c11
-rw-r--r--drivers/net/ethernet/ti/davinci_cpdma.c39
-rw-r--r--drivers/net/ethernet/ti/davinci_emac.c9
-rw-r--r--drivers/net/ethernet/ti/davinci_mdio.c50
-rw-r--r--drivers/net/ethernet/tile/tilegx.c13
-rw-r--r--drivers/net/ethernet/toshiba/ps3_gelic_net.c2
-rw-r--r--drivers/net/ethernet/via/Kconfig2
-rw-r--r--drivers/net/ethernet/via/via-rhine.c511
-rw-r--r--drivers/net/ethernet/xilinx/ll_temac_main.c2
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c2
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_emaclite.c2
-rw-r--r--drivers/net/hyperv/hyperv_net.h163
-rw-r--r--drivers/net/hyperv/netvsc.c529
-rw-r--r--drivers/net/hyperv/netvsc_drv.c129
-rw-r--r--drivers/net/hyperv/rndis_filter.c193
-rw-r--r--drivers/net/ieee802154/at86rf230.c133
-rw-r--r--drivers/net/ieee802154/fakelb.c6
-rw-r--r--drivers/net/ieee802154/mrf24j40.c33
-rw-r--r--drivers/net/irda/Kconfig3
-rw-r--r--drivers/net/irda/via-ircc.c7
-rw-r--r--drivers/net/irda/w83977af_ir.c33
-rw-r--r--drivers/net/macvlan.c262
-rw-r--r--drivers/net/ntb_netdev.c3
-rw-r--r--drivers/net/phy/Kconfig6
-rw-r--r--drivers/net/phy/Makefile1
-rw-r--r--drivers/net/phy/amd-xgbe-phy.c1357
-rw-r--r--drivers/net/phy/at803x.c39
-rw-r--r--drivers/net/phy/fixed.c81
-rw-r--r--drivers/net/phy/mdio_bus.c73
-rw-r--r--drivers/net/phy/micrel.c106
-rw-r--r--drivers/net/phy/phy_device.c50
-rw-r--r--drivers/net/phy/realtek.c88
-rw-r--r--drivers/net/phy/smsc.c3
-rw-r--r--drivers/net/phy/vitesse.c3
-rw-r--r--drivers/net/ppp/ppp_generic.c4
-rw-r--r--drivers/net/ppp/pptp.c2
-rw-r--r--drivers/net/rionet.c2
-rw-r--r--drivers/net/team/team.c2
-rw-r--r--drivers/net/team/team_mode_loadbalance.c12
-rw-r--r--drivers/net/tun.c54
-rw-r--r--drivers/net/usb/catc.c2
-rw-r--r--drivers/net/usb/cdc_mbim.c129
-rw-r--r--drivers/net/usb/cdc_ncm.c740
-rw-r--r--drivers/net/usb/hso.c2
-rw-r--r--drivers/net/usb/huawei_cdc_ncm.c13
-rw-r--r--drivers/net/usb/ipheth.c2
-rw-r--r--drivers/net/usb/kaweth.c2
-rw-r--r--drivers/net/usb/pegasus.c2
-rw-r--r--drivers/net/usb/qmi_wwan.c7
-rw-r--r--drivers/net/usb/r8152.c6
-rw-r--r--drivers/net/usb/rtl8150.c2
-rw-r--r--drivers/net/virtio_net.c9
-rw-r--r--drivers/net/vmxnet3/vmxnet3_ethtool.c14
-rw-r--r--drivers/net/vxlan.c187
-rw-r--r--drivers/net/wan/farsync.c31
-rw-r--r--drivers/net/wan/sdla.c4
-rw-r--r--drivers/net/wimax/i2400m/control.c2
-rw-r--r--drivers/net/wimax/i2400m/driver.c7
-rw-r--r--drivers/net/wireless/at76c50x-usb.c180
-rw-r--r--drivers/net/wireless/at76c50x-usb.h26
-rw-r--r--drivers/net/wireless/ath/ar5523/ar5523.c3
-rw-r--r--drivers/net/wireless/ath/ath10k/bmi.c13
-rw-r--r--drivers/net/wireless/ath/ath10k/bmi.h5
-rw-r--r--drivers/net/wireless/ath/ath10k/ce.c383
-rw-r--r--drivers/net/wireless/ath/ath10k/ce.h17
-rw-r--r--drivers/net/wireless/ath/ath10k/core.c366
-rw-r--r--drivers/net/wireless/ath/ath10k/core.h26
-rw-r--r--drivers/net/wireless/ath/ath10k/debug.c109
-rw-r--r--drivers/net/wireless/ath/ath10k/htc.c18
-rw-r--r--drivers/net/wireless/ath/ath10k/htt.c42
-rw-r--r--drivers/net/wireless/ath/ath10k/htt.h37
-rw-r--r--drivers/net/wireless/ath/ath10k/htt_rx.c587
-rw-r--r--drivers/net/wireless/ath/ath10k/htt_tx.c8
-rw-r--r--drivers/net/wireless/ath/ath10k/hw.h1
-rw-r--r--drivers/net/wireless/ath/ath10k/mac.c990
-rw-r--r--drivers/net/wireless/ath/ath10k/pci.c336
-rw-r--r--drivers/net/wireless/ath/ath10k/pci.h3
-rw-r--r--drivers/net/wireless/ath/ath10k/txrx.c183
-rw-r--r--drivers/net/wireless/ath/ath10k/txrx.h1
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi.c90
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi.h104
-rw-r--r--drivers/net/wireless/ath/ath5k/phy.c4
-rw-r--r--drivers/net/wireless/ath/ath6kl/Kconfig30
-rw-r--r--drivers/net/wireless/ath/ath6kl/cfg80211.c16
-rw-r--r--drivers/net/wireless/ath/ath6kl/core.c6
-rw-r--r--drivers/net/wireless/ath/ath6kl/debug.c4
-rw-r--r--drivers/net/wireless/ath/ath6kl/debug.h2
-rw-r--r--drivers/net/wireless/ath/ath6kl/hif.c3
-rw-r--r--drivers/net/wireless/ath/ath6kl/hif.h4
-rw-r--r--drivers/net/wireless/ath/ath6kl/htc_mbox.c23
-rw-r--r--drivers/net/wireless/ath/ath6kl/htc_pipe.c10
-rw-r--r--drivers/net/wireless/ath/ath6kl/init.c1
-rw-r--r--drivers/net/wireless/ath/ath6kl/main.c10
-rw-r--r--drivers/net/wireless/ath/ath6kl/sdio.c17
-rw-r--r--drivers/net/wireless/ath/ath6kl/target.h2
-rw-r--r--drivers/net/wireless/ath/ath6kl/txrx.c31
-rw-r--r--drivers/net/wireless/ath/ath6kl/usb.c2
-rw-r--r--drivers/net/wireless/ath/ath6kl/wmi.c21
-rw-r--r--drivers/net/wireless/ath/ath6kl/wmi.h3
-rw-r--r--drivers/net/wireless/ath/ath9k/Makefile3
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h2
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9330_1p1_initvals.h2
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9330_1p2_initvals.h2
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9340_initvals.h8
-rw-r--r--drivers/net/wireless/ath/ath9k/ar953x_initvals.h6
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9580_1p0_initvals.h2
-rw-r--r--drivers/net/wireless/ath/ath9k/ath9k.h16
-rw-r--r--drivers/net/wireless/ath/ath9k/beacon.c5
-rw-r--r--drivers/net/wireless/ath/ath9k/common-debug.c253
-rw-r--r--drivers/net/wireless/ath/ath9k/common-debug.h72
-rw-r--r--drivers/net/wireless/ath/ath9k/common.h1
-rw-r--r--drivers/net/wireless/ath/ath9k/debug.c214
-rw-r--r--drivers/net/wireless/ath/ath9k/debug.h44
-rw-r--r--drivers/net/wireless/ath/ath9k/dfs.c6
-rw-r--r--drivers/net/wireless/ath/ath9k/htc.h19
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_debug.c555
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_txrx.c3
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.c3
-rw-r--r--drivers/net/wireless/ath/ath9k/init.c40
-rw-r--r--drivers/net/wireless/ath/ath9k/mac.c22
-rw-r--r--drivers/net/wireless/ath/ath9k/mac.h1
-rw-r--r--drivers/net/wireless/ath/ath9k/main.c163
-rw-r--r--drivers/net/wireless/ath/ath9k/pci.c3
-rw-r--r--drivers/net/wireless/ath/ath9k/recv.c29
-rw-r--r--drivers/net/wireless/ath/ath9k/reg.h3
-rw-r--r--drivers/net/wireless/ath/carl9170/main.c4
-rw-r--r--drivers/net/wireless/ath/carl9170/usb.c8
-rw-r--r--drivers/net/wireless/ath/dfs_pattern_detector.c45
-rw-r--r--drivers/net/wireless/ath/wcn36xx/smd.c3
-rw-r--r--drivers/net/wireless/ath/wil6210/cfg80211.c5
-rw-r--r--drivers/net/wireless/ath/wil6210/debugfs.c4
-rw-r--r--drivers/net/wireless/ath/wil6210/interrupt.c2
-rw-r--r--drivers/net/wireless/ath/wil6210/main.c43
-rw-r--r--drivers/net/wireless/ath/wil6210/netdev.c14
-rw-r--r--drivers/net/wireless/ath/wil6210/pcie_bus.c4
-rw-r--r--drivers/net/wireless/ath/wil6210/rx_reorder.c26
-rw-r--r--drivers/net/wireless/ath/wil6210/txrx.c28
-rw-r--r--drivers/net/wireless/ath/wil6210/wil6210.h11
-rw-r--r--drivers/net/wireless/ath/wil6210/wmi.c36
-rw-r--r--drivers/net/wireless/ath/wil6210/wmi.h50
-rw-r--r--drivers/net/wireless/b43/Kconfig42
-rw-r--r--drivers/net/wireless/b43/b43.h4
-rw-r--r--drivers/net/wireless/b43/bus.h10
-rw-r--r--drivers/net/wireless/b43/main.c498
-rw-r--r--drivers/net/wireless/b43/phy_common.c96
-rw-r--r--drivers/net/wireless/b43/phy_common.h8
-rw-r--r--drivers/net/wireless/b43/phy_g.c6
-rw-r--r--drivers/net/wireless/b43/phy_n.c321
-rw-r--r--drivers/net/wireless/b43/phy_n.h1
-rw-r--r--drivers/net/wireless/b43/radio_2056.c1336
-rw-r--r--drivers/net/wireless/b43/tables_nphy.c150
-rw-r--r--drivers/net/wireless/b43/tables_nphy.h3
-rw-r--r--drivers/net/wireless/b43/wa.c2
-rw-r--r--drivers/net/wireless/b43/xmit.c2
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/Makefile2
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/dhd.h2
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/dhd_bus.h7
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/dhd_common.c18
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c39
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c283
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/firmware.c332
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/firmware.h (renamed from drivers/net/wireless/brcm80211/brcmfmac/nvram.h)24
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/fwil_types.h24
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/fwsignal.c80
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/nvram.c94
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/usb.c270
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c213
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c3
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/main.c13
-rw-r--r--drivers/net/wireless/brcm80211/brcmutil/d11.c93
-rw-r--r--drivers/net/wireless/brcm80211/include/brcmu_d11.h14
-rw-r--r--drivers/net/wireless/brcm80211/include/brcmu_wifi.h1
-rw-r--r--drivers/net/wireless/cw1200/sta.c3
-rw-r--r--drivers/net/wireless/cw1200/sta.h3
-rw-r--r--drivers/net/wireless/hostap/hostap_main.c2
-rw-r--r--drivers/net/wireless/iwlegacy/3945.c2
-rw-r--r--drivers/net/wireless/iwlegacy/4965-mac.c2
-rw-r--r--drivers/net/wireless/iwlegacy/common.c3
-rw-r--r--drivers/net/wireless/iwlegacy/common.h3
-rw-r--r--drivers/net/wireless/iwlwifi/Kconfig13
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/Makefile3
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/calib.c1
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/debugfs.c7
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/dev.h2
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/devices.c2
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/led.h12
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/lib.c19
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/mac80211.c5
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/main.c39
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/power.c4
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/rs.c10
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/rx.c2
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/rxon.c18
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/scan.c3
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/sta.c29
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/tt.c2
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/tx.c24
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/ucode.c6
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-1000.c1
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-2000.c2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-5000.c1
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-6000.c3
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-7000.c11
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-8000.c5
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-hw.h4
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-config.h12
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-debug.c6
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-debug.h41
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-drv.c5
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-fw-error-dump.h (renamed from drivers/net/wireless/iwlwifi/mvm/fw-error-dump.h)32
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-fw.h35
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-io.c18
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-io.h1
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-modparams.h1
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-nvm-parse.c73
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-op-mode.h25
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-phy-db.c9
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-prph.h11
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-trans.h60
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/Makefile3
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/coex.c114
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/d3.c129
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/debugfs-vif.c61
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/debugfs.c26
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw-api-coex.h38
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw-api-d3.h17
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw-api-rs.h2
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h26
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw-api-sta.h46
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw-api-tx.h3
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw-api.h54
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw.c31
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c119
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/mac80211.c204
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/mvm.h60
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/nvm.c102
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/ops.c97
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/phy-ctxt.c23
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/power.c410
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/quota.c2
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/rs.c491
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/rs.h24
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/rx.c45
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/scan.c33
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/sf.c3
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/sta.c189
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/sta.h6
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/time-event.c71
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/tt.c11
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/tx.c13
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/utils.c87
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/drv.c14
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/internal.h33
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/rx.c86
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/trans.c137
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/tx.c197
-rw-r--r--drivers/net/wireless/libertas/cfg.c7
-rw-r--r--drivers/net/wireless/libertas/defs.h3
-rw-r--r--drivers/net/wireless/libertas/rx.c8
-rw-r--r--drivers/net/wireless/mac80211_hwsim.c5
-rw-r--r--drivers/net/wireless/mwifiex/11ac.c3
-rw-r--r--drivers/net/wireless/mwifiex/11n.c45
-rw-r--r--drivers/net/wireless/mwifiex/11n.h3
-rw-r--r--drivers/net/wireless/mwifiex/11n_aggr.c28
-rw-r--r--drivers/net/wireless/mwifiex/README7
-rw-r--r--drivers/net/wireless/mwifiex/cfg80211.c19
-rw-r--r--drivers/net/wireless/mwifiex/cmdevt.c2
-rw-r--r--drivers/net/wireless/mwifiex/debugfs.c25
-rw-r--r--drivers/net/wireless/mwifiex/decl.h8
-rw-r--r--drivers/net/wireless/mwifiex/fw.h25
-rw-r--r--drivers/net/wireless/mwifiex/ioctl.h2
-rw-r--r--drivers/net/wireless/mwifiex/main.c2
-rw-r--r--drivers/net/wireless/mwifiex/main.h26
-rw-r--r--drivers/net/wireless/mwifiex/pcie.c4
-rw-r--r--drivers/net/wireless/mwifiex/scan.c66
-rw-r--r--drivers/net/wireless/mwifiex/sdio.c15
-rw-r--r--drivers/net/wireless/mwifiex/sdio.h18
-rw-r--r--drivers/net/wireless/mwifiex/sta_cmd.c7
-rw-r--r--drivers/net/wireless/mwifiex/sta_cmdresp.c18
-rw-r--r--drivers/net/wireless/mwifiex/sta_event.c44
-rw-r--r--drivers/net/wireless/mwifiex/sta_rx.c16
-rw-r--r--drivers/net/wireless/mwifiex/sta_tx.c4
-rw-r--r--drivers/net/wireless/mwifiex/tdls.c97
-rw-r--r--drivers/net/wireless/mwifiex/uap_cmd.c8
-rw-r--r--drivers/net/wireless/mwifiex/usb.c55
-rw-r--r--drivers/net/wireless/mwifiex/util.c6
-rw-r--r--drivers/net/wireless/mwifiex/wmm.c22
-rw-r--r--drivers/net/wireless/mwifiex/wmm.h5
-rw-r--r--drivers/net/wireless/orinoco/hw.c4
-rw-r--r--drivers/net/wireless/orinoco/hw.h4
-rw-r--r--drivers/net/wireless/orinoco/orinoco_usb.c2
-rw-r--r--drivers/net/wireless/orinoco/wext.c4
-rw-r--r--drivers/net/wireless/p54/main.c3
-rw-r--r--drivers/net/wireless/ray_cs.c2
-rw-r--r--drivers/net/wireless/rndis_wlan.c4
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_mac80211.c1
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_mgmt.c4
-rw-r--r--drivers/net/wireless/rsi/rsi_common.h2
-rw-r--r--drivers/net/wireless/rsi/rsi_mgmt.h1
-rw-r--r--drivers/net/wireless/rt2x00/rt2800lib.c15
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00.h3
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00mac.c3
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00usb.c6
-rw-r--r--drivers/net/wireless/rt2x00/rt61pci.c10
-rw-r--r--drivers/net/wireless/rt2x00/rt73usb.c10
-rw-r--r--drivers/net/wireless/rtl818x/rtl8180/Makefile4
-rw-r--r--drivers/net/wireless/rtl818x/rtl8180/dev.c27
-rw-r--r--drivers/net/wireless/rtl818x/rtl8187/dev.c11
-rw-r--r--drivers/net/wireless/rtl818x/rtl818x.h6
-rw-r--r--drivers/net/wireless/rtlwifi/core.c3
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8188ee/hw.c20
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8188ee/hw.h2
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8188ee/sw.c6
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192ce/hw.c21
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192ce/hw.h2
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192ce/sw.c1
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/hw.c2
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/sw.c3
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192se/hw.c20
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192se/hw.h2
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192se/sw.c1
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8723ae/hal_bt_coexist.c1
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8723ae/hw.c21
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8723ae/hw.h2
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8723ae/sw.c1
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8723be/hw.c20
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8723be/hw.h2
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8723be/sw.c5
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8723be/trx.c1
-rw-r--r--drivers/net/wireless/rtlwifi/wifi.h6
-rw-r--r--drivers/net/wireless/ti/wl1251/acx.c1
-rw-r--r--drivers/net/wireless/ti/wl1251/cmd.c1
-rw-r--r--drivers/net/wireless/ti/wl1251/event.c5
-rw-r--r--drivers/net/wireless/ti/wl1251/main.c68
-rw-r--r--drivers/net/wireless/ti/wl1251/spi.c44
-rw-r--r--drivers/net/wireless/ti/wlcore/debugfs.h4
-rw-r--r--drivers/net/wireless/ti/wlcore/main.c5
-rw-r--r--drivers/net/wireless/ti/wlcore/sdio.c28
-rw-r--r--drivers/net/wireless/ti/wlcore/spi.c69
-rw-r--r--drivers/net/wireless/ti/wlcore/wlcore_i.h4
-rw-r--r--drivers/net/xen-netback/common.h107
-rw-r--r--drivers/net/xen-netback/interface.c523
-rw-r--r--drivers/net/xen-netback/netback.c754
-rw-r--r--drivers/net/xen-netback/xenbus.c182
-rw-r--r--drivers/net/xen-netfront.c1123
692 files changed, 37654 insertions, 13603 deletions
diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c
index b667a51ed215..0dfeaf5da3f2 100644
--- a/drivers/net/bonding/bond_3ad.c
+++ b/drivers/net/bonding/bond_3ad.c
@@ -157,7 +157,7 @@ static inline struct aggregator *__get_first_agg(struct port *port)
rcu_read_lock();
first_slave = bond_first_slave_rcu(bond);
- agg = first_slave ? &(SLAVE_AD_INFO(first_slave).aggregator) : NULL;
+ agg = first_slave ? &(SLAVE_AD_INFO(first_slave)->aggregator) : NULL;
rcu_read_unlock();
return agg;
@@ -192,7 +192,7 @@ static inline void __enable_port(struct port *port)
{
struct slave *slave = port->slave;
- if ((slave->link == BOND_LINK_UP) && IS_UP(slave->dev))
+ if ((slave->link == BOND_LINK_UP) && bond_slave_is_up(slave))
bond_set_slave_active_flags(slave, BOND_SLAVE_NOTIFY_LATER);
}
@@ -241,7 +241,7 @@ static inline int __check_agg_selection_timer(struct port *port)
*/
static inline void __get_state_machine_lock(struct port *port)
{
- spin_lock_bh(&(SLAVE_AD_INFO(port->slave).state_machine_lock));
+ spin_lock_bh(&(SLAVE_AD_INFO(port->slave)->state_machine_lock));
}
/**
@@ -250,7 +250,7 @@ static inline void __get_state_machine_lock(struct port *port)
*/
static inline void __release_state_machine_lock(struct port *port)
{
- spin_unlock_bh(&(SLAVE_AD_INFO(port->slave).state_machine_lock));
+ spin_unlock_bh(&(SLAVE_AD_INFO(port->slave)->state_machine_lock));
}
/**
@@ -350,7 +350,7 @@ static u8 __get_duplex(struct port *port)
static inline void __initialize_port_locks(struct slave *slave)
{
/* make sure it isn't called twice */
- spin_lock_init(&(SLAVE_AD_INFO(slave).state_machine_lock));
+ spin_lock_init(&(SLAVE_AD_INFO(slave)->state_machine_lock));
}
/* Conversions */
@@ -688,8 +688,8 @@ static struct aggregator *__get_active_agg(struct aggregator *aggregator)
struct slave *slave;
bond_for_each_slave_rcu(bond, slave, iter)
- if (SLAVE_AD_INFO(slave).aggregator.is_active)
- return &(SLAVE_AD_INFO(slave).aggregator);
+ if (SLAVE_AD_INFO(slave)->aggregator.is_active)
+ return &(SLAVE_AD_INFO(slave)->aggregator);
return NULL;
}
@@ -1293,7 +1293,7 @@ static void ad_port_selection_logic(struct port *port)
}
/* search on all aggregators for a suitable aggregator for this port */
bond_for_each_slave(bond, slave, iter) {
- aggregator = &(SLAVE_AD_INFO(slave).aggregator);
+ aggregator = &(SLAVE_AD_INFO(slave)->aggregator);
/* keep a free aggregator for later use(if needed) */
if (!aggregator->lag_ports) {
@@ -1504,7 +1504,7 @@ static void ad_agg_selection_logic(struct aggregator *agg)
best = (active && agg_device_up(active)) ? active : NULL;
bond_for_each_slave_rcu(bond, slave, iter) {
- agg = &(SLAVE_AD_INFO(slave).aggregator);
+ agg = &(SLAVE_AD_INFO(slave)->aggregator);
agg->is_active = 0;
@@ -1549,7 +1549,7 @@ static void ad_agg_selection_logic(struct aggregator *agg)
best->slave ? best->slave->dev->name : "NULL");
bond_for_each_slave_rcu(bond, slave, iter) {
- agg = &(SLAVE_AD_INFO(slave).aggregator);
+ agg = &(SLAVE_AD_INFO(slave)->aggregator);
pr_debug("Agg=%d; P=%d; a k=%d; p k=%d; Ind=%d; Act=%d\n",
agg->aggregator_identifier, agg->num_of_ports,
@@ -1840,16 +1840,16 @@ void bond_3ad_bind_slave(struct slave *slave)
struct aggregator *aggregator;
/* check that the slave has not been initialized yet. */
- if (SLAVE_AD_INFO(slave).port.slave != slave) {
+ if (SLAVE_AD_INFO(slave)->port.slave != slave) {
/* port initialization */
- port = &(SLAVE_AD_INFO(slave).port);
+ port = &(SLAVE_AD_INFO(slave)->port);
ad_initialize_port(port, bond->params.lacp_fast);
__initialize_port_locks(slave);
port->slave = slave;
- port->actor_port_number = SLAVE_AD_INFO(slave).id;
+ port->actor_port_number = SLAVE_AD_INFO(slave)->id;
/* key is determined according to the link speed, duplex and user key(which
* is yet not supported)
*/
@@ -1874,7 +1874,7 @@ void bond_3ad_bind_slave(struct slave *slave)
__disable_port(port);
/* aggregator initialization */
- aggregator = &(SLAVE_AD_INFO(slave).aggregator);
+ aggregator = &(SLAVE_AD_INFO(slave)->aggregator);
ad_initialize_agg(aggregator);
@@ -1903,8 +1903,8 @@ void bond_3ad_unbind_slave(struct slave *slave)
struct slave *slave_iter;
struct list_head *iter;
- aggregator = &(SLAVE_AD_INFO(slave).aggregator);
- port = &(SLAVE_AD_INFO(slave).port);
+ aggregator = &(SLAVE_AD_INFO(slave)->aggregator);
+ port = &(SLAVE_AD_INFO(slave)->port);
/* if slave is null, the whole port is not initialized */
if (!port->slave) {
@@ -1932,7 +1932,7 @@ void bond_3ad_unbind_slave(struct slave *slave)
(aggregator->lag_ports->next_port_in_aggregator)) {
/* find new aggregator for the related port(s) */
bond_for_each_slave(bond, slave_iter, iter) {
- new_aggregator = &(SLAVE_AD_INFO(slave_iter).aggregator);
+ new_aggregator = &(SLAVE_AD_INFO(slave_iter)->aggregator);
/* if the new aggregator is empty, or it is
* connected to our port only
*/
@@ -2010,7 +2010,7 @@ void bond_3ad_unbind_slave(struct slave *slave)
/* find the aggregator that this port is connected to */
bond_for_each_slave(bond, slave_iter, iter) {
- temp_aggregator = &(SLAVE_AD_INFO(slave_iter).aggregator);
+ temp_aggregator = &(SLAVE_AD_INFO(slave_iter)->aggregator);
prev_port = NULL;
/* search the port in the aggregator's related ports */
for (temp_port = temp_aggregator->lag_ports; temp_port;
@@ -2076,7 +2076,7 @@ void bond_3ad_state_machine_handler(struct work_struct *work)
if (BOND_AD_INFO(bond).agg_select_timer &&
!(--BOND_AD_INFO(bond).agg_select_timer)) {
slave = bond_first_slave_rcu(bond);
- port = slave ? &(SLAVE_AD_INFO(slave).port) : NULL;
+ port = slave ? &(SLAVE_AD_INFO(slave)->port) : NULL;
/* select the active aggregator for the bond */
if (port) {
@@ -2094,7 +2094,7 @@ void bond_3ad_state_machine_handler(struct work_struct *work)
/* for each port run the state machines */
bond_for_each_slave_rcu(bond, slave, iter) {
- port = &(SLAVE_AD_INFO(slave).port);
+ port = &(SLAVE_AD_INFO(slave)->port);
if (!port->slave) {
pr_warn_ratelimited("%s: Warning: Found an uninitialized port\n",
bond->dev->name);
@@ -2155,7 +2155,7 @@ static int bond_3ad_rx_indication(struct lacpdu *lacpdu, struct slave *slave,
if (length >= sizeof(struct lacpdu)) {
- port = &(SLAVE_AD_INFO(slave).port);
+ port = &(SLAVE_AD_INFO(slave)->port);
if (!port->slave) {
pr_warn_ratelimited("%s: Warning: port of slave %s is uninitialized\n",
@@ -2212,7 +2212,7 @@ void bond_3ad_adapter_speed_changed(struct slave *slave)
{
struct port *port;
- port = &(SLAVE_AD_INFO(slave).port);
+ port = &(SLAVE_AD_INFO(slave)->port);
/* if slave is null, the whole port is not initialized */
if (!port->slave) {
@@ -2245,7 +2245,7 @@ void bond_3ad_adapter_duplex_changed(struct slave *slave)
{
struct port *port;
- port = &(SLAVE_AD_INFO(slave).port);
+ port = &(SLAVE_AD_INFO(slave)->port);
/* if slave is null, the whole port is not initialized */
if (!port->slave) {
@@ -2279,7 +2279,7 @@ void bond_3ad_handle_link_change(struct slave *slave, char link)
{
struct port *port;
- port = &(SLAVE_AD_INFO(slave).port);
+ port = &(SLAVE_AD_INFO(slave)->port);
/* if slave is null, the whole port is not initialized */
if (!port->slave) {
@@ -2347,7 +2347,7 @@ int bond_3ad_set_carrier(struct bonding *bond)
ret = 0;
goto out;
}
- active = __get_active_agg(&(SLAVE_AD_INFO(first_slave).aggregator));
+ active = __get_active_agg(&(SLAVE_AD_INFO(first_slave)->aggregator));
if (active) {
/* are enough slaves available to consider link up? */
if (active->num_of_ports < bond->params.min_links) {
@@ -2384,7 +2384,7 @@ int __bond_3ad_get_active_agg_info(struct bonding *bond,
struct port *port;
bond_for_each_slave_rcu(bond, slave, iter) {
- port = &(SLAVE_AD_INFO(slave).port);
+ port = &(SLAVE_AD_INFO(slave)->port);
if (port->aggregator && port->aggregator->is_active) {
aggregator = port->aggregator;
break;
@@ -2440,22 +2440,22 @@ int bond_3ad_xmit_xor(struct sk_buff *skb, struct net_device *dev)
goto err_free;
}
- slave_agg_no = bond_xmit_hash(bond, skb, slaves_in_agg);
+ slave_agg_no = bond_xmit_hash(bond, skb) % slaves_in_agg;
first_ok_slave = NULL;
bond_for_each_slave_rcu(bond, slave, iter) {
- agg = SLAVE_AD_INFO(slave).port.aggregator;
+ agg = SLAVE_AD_INFO(slave)->port.aggregator;
if (!agg || agg->aggregator_identifier != agg_id)
continue;
if (slave_agg_no >= 0) {
- if (!first_ok_slave && SLAVE_IS_OK(slave))
+ if (!first_ok_slave && bond_slave_can_tx(slave))
first_ok_slave = slave;
slave_agg_no--;
continue;
}
- if (SLAVE_IS_OK(slave)) {
+ if (bond_slave_can_tx(slave)) {
bond_dev_queue_xmit(bond, skb, slave->dev);
goto out;
}
@@ -2522,7 +2522,7 @@ void bond_3ad_update_lacp_rate(struct bonding *bond)
lacp_fast = bond->params.lacp_fast;
bond_for_each_slave(bond, slave, iter) {
- port = &(SLAVE_AD_INFO(slave).port);
+ port = &(SLAVE_AD_INFO(slave)->port);
__get_state_machine_lock(port);
if (lacp_fast)
port->actor_oper_port_state |= AD_STATE_LACP_TIMEOUT;
diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c
index 93580a47cc54..76c0dade233f 100644
--- a/drivers/net/bonding/bond_alb.c
+++ b/drivers/net/bonding/bond_alb.c
@@ -229,7 +229,7 @@ static struct slave *tlb_get_least_loaded_slave(struct bonding *bond)
/* Find the slave with the largest gap */
bond_for_each_slave_rcu(bond, slave, iter) {
- if (SLAVE_IS_OK(slave)) {
+ if (bond_slave_can_tx(slave)) {
long long gap = compute_gap(slave);
if (max_gap < gap) {
@@ -384,7 +384,7 @@ static struct slave *rlb_next_rx_slave(struct bonding *bond)
bool found = false;
bond_for_each_slave(bond, slave, iter) {
- if (!SLAVE_IS_OK(slave))
+ if (!bond_slave_can_tx(slave))
continue;
if (!found) {
if (!before || before->speed < slave->speed)
@@ -417,7 +417,7 @@ static struct slave *__rlb_next_rx_slave(struct bonding *bond)
bool found = false;
bond_for_each_slave_rcu(bond, slave, iter) {
- if (!SLAVE_IS_OK(slave))
+ if (!bond_slave_can_tx(slave))
continue;
if (!found) {
if (!before || before->speed < slave->speed)
@@ -755,7 +755,7 @@ static struct slave *rlb_arp_xmit(struct sk_buff *skb, struct bonding *bond)
/* Don't modify or load balance ARPs that do not originate locally
* (e.g.,arrive via a bridge).
*/
- if (!bond_slave_has_mac_rcu(bond, arp->mac_src))
+ if (!bond_slave_has_mac_rx(bond, arp->mac_src))
return NULL;
if (arp->op_code == htons(ARPOP_REPLY)) {
@@ -1039,11 +1039,14 @@ static void alb_send_learning_packets(struct slave *slave, u8 mac_addr[],
struct bonding *bond = bond_get_bond_by_slave(slave);
struct net_device *upper;
struct list_head *iter;
+ struct bond_vlan_tag tags[BOND_MAX_VLAN_ENCAP];
/* send untagged */
alb_send_lp_vid(slave, mac_addr, 0, 0);
- /* loop through vlans and send one packet for each */
+ /* loop through all devices and see if we need to send a packet
+ * for that device.
+ */
rcu_read_lock();
netdev_for_each_all_upper_dev_rcu(bond->dev, upper, iter) {
if (is_vlan_dev(upper) && vlan_get_encap_level(upper) == 0) {
@@ -1059,6 +1062,16 @@ static void alb_send_learning_packets(struct slave *slave, u8 mac_addr[],
vlan_dev_vlan_id(upper));
}
}
+
+ /* If this is a macvlan device, then only send updates
+ * when strict_match is turned off.
+ */
+ if (netif_is_macvlan(upper) && !strict_match) {
+ memset(tags, 0, sizeof(tags));
+ bond_verify_device_path(bond->dev, upper, tags);
+ alb_send_lp_vid(slave, upper->dev_addr,
+ tags[0].vlan_proto, tags[0].vlan_id);
+ }
}
rcu_read_unlock();
}
@@ -1068,7 +1081,7 @@ static int alb_set_slave_mac_addr(struct slave *slave, u8 addr[])
struct net_device *dev = slave->dev;
struct sockaddr s_addr;
- if (slave->bond->params.mode == BOND_MODE_TLB) {
+ if (BOND_MODE(slave->bond) == BOND_MODE_TLB) {
memcpy(dev->dev_addr, addr, dev->addr_len);
return 0;
}
@@ -1111,13 +1124,13 @@ static void alb_swap_mac_addr(struct slave *slave1, struct slave *slave2)
static void alb_fasten_mac_swap(struct bonding *bond, struct slave *slave1,
struct slave *slave2)
{
- int slaves_state_differ = (SLAVE_IS_OK(slave1) != SLAVE_IS_OK(slave2));
+ int slaves_state_differ = (bond_slave_can_tx(slave1) != bond_slave_can_tx(slave2));
struct slave *disabled_slave = NULL;
ASSERT_RTNL();
/* fasten the change in the switch */
- if (SLAVE_IS_OK(slave1)) {
+ if (bond_slave_can_tx(slave1)) {
alb_send_learning_packets(slave1, slave1->dev->dev_addr, false);
if (bond->alb_info.rlb_enabled) {
/* inform the clients that the mac address
@@ -1129,7 +1142,7 @@ static void alb_fasten_mac_swap(struct bonding *bond, struct slave *slave1,
disabled_slave = slave1;
}
- if (SLAVE_IS_OK(slave2)) {
+ if (bond_slave_can_tx(slave2)) {
alb_send_learning_packets(slave2, slave2->dev->dev_addr, false);
if (bond->alb_info.rlb_enabled) {
/* inform the clients that the mac address
@@ -1358,6 +1371,77 @@ void bond_alb_deinitialize(struct bonding *bond)
rlb_deinitialize(bond);
}
+static int bond_do_alb_xmit(struct sk_buff *skb, struct bonding *bond,
+ struct slave *tx_slave)
+{
+ struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond));
+ struct ethhdr *eth_data = eth_hdr(skb);
+
+ if (!tx_slave) {
+ /* unbalanced or unassigned, send through primary */
+ tx_slave = rcu_dereference(bond->curr_active_slave);
+ if (bond->params.tlb_dynamic_lb)
+ bond_info->unbalanced_load += skb->len;
+ }
+
+ if (tx_slave && bond_slave_can_tx(tx_slave)) {
+ if (tx_slave != rcu_dereference(bond->curr_active_slave)) {
+ ether_addr_copy(eth_data->h_source,
+ tx_slave->dev->dev_addr);
+ }
+
+ bond_dev_queue_xmit(bond, skb, tx_slave->dev);
+ goto out;
+ }
+
+ if (tx_slave && bond->params.tlb_dynamic_lb) {
+ _lock_tx_hashtbl(bond);
+ __tlb_clear_slave(bond, tx_slave, 0);
+ _unlock_tx_hashtbl(bond);
+ }
+
+ /* no suitable interface, frame not sent */
+ dev_kfree_skb_any(skb);
+out:
+ return NETDEV_TX_OK;
+}
+
+int bond_tlb_xmit(struct sk_buff *skb, struct net_device *bond_dev)
+{
+ struct bonding *bond = netdev_priv(bond_dev);
+ struct ethhdr *eth_data;
+ struct slave *tx_slave = NULL;
+ u32 hash_index;
+
+ skb_reset_mac_header(skb);
+ eth_data = eth_hdr(skb);
+
+ /* Do not TX balance any multicast or broadcast */
+ if (!is_multicast_ether_addr(eth_data->h_dest)) {
+ switch (skb->protocol) {
+ case htons(ETH_P_IP):
+ case htons(ETH_P_IPX):
+ /* In case of IPX, it will falback to L2 hash */
+ case htons(ETH_P_IPV6):
+ hash_index = bond_xmit_hash(bond, skb);
+ if (bond->params.tlb_dynamic_lb) {
+ tx_slave = tlb_choose_channel(bond,
+ hash_index & 0xFF,
+ skb->len);
+ } else {
+ struct list_head *iter;
+ int idx = hash_index % bond->slave_cnt;
+
+ bond_for_each_slave_rcu(bond, tx_slave, iter)
+ if (--idx < 0)
+ break;
+ }
+ break;
+ }
+ }
+ return bond_do_alb_xmit(skb, bond, tx_slave);
+}
+
int bond_alb_xmit(struct sk_buff *skb, struct net_device *bond_dev)
{
struct bonding *bond = netdev_priv(bond_dev);
@@ -1366,7 +1450,7 @@ int bond_alb_xmit(struct sk_buff *skb, struct net_device *bond_dev)
struct slave *tx_slave = NULL;
static const __be32 ip_bcast = htonl(0xffffffff);
int hash_size = 0;
- int do_tx_balance = 1;
+ bool do_tx_balance = true;
u32 hash_index = 0;
const u8 *hash_start = NULL;
struct ipv6hdr *ip6hdr;
@@ -1381,7 +1465,7 @@ int bond_alb_xmit(struct sk_buff *skb, struct net_device *bond_dev)
if (ether_addr_equal_64bits(eth_data->h_dest, mac_bcast) ||
(iph->daddr == ip_bcast) ||
(iph->protocol == IPPROTO_IGMP)) {
- do_tx_balance = 0;
+ do_tx_balance = false;
break;
}
hash_start = (char *)&(iph->daddr);
@@ -1393,7 +1477,7 @@ int bond_alb_xmit(struct sk_buff *skb, struct net_device *bond_dev)
* that here just in case.
*/
if (ether_addr_equal_64bits(eth_data->h_dest, mac_bcast)) {
- do_tx_balance = 0;
+ do_tx_balance = false;
break;
}
@@ -1401,7 +1485,7 @@ int bond_alb_xmit(struct sk_buff *skb, struct net_device *bond_dev)
* broadcasts in IPv4.
*/
if (ether_addr_equal_64bits(eth_data->h_dest, mac_v6_allmcast)) {
- do_tx_balance = 0;
+ do_tx_balance = false;
break;
}
@@ -1411,7 +1495,7 @@ int bond_alb_xmit(struct sk_buff *skb, struct net_device *bond_dev)
*/
ip6hdr = ipv6_hdr(skb);
if (ipv6_addr_any(&ip6hdr->saddr)) {
- do_tx_balance = 0;
+ do_tx_balance = false;
break;
}
@@ -1421,7 +1505,7 @@ int bond_alb_xmit(struct sk_buff *skb, struct net_device *bond_dev)
case ETH_P_IPX:
if (ipx_hdr(skb)->ipx_checksum != IPX_NO_CHECKSUM) {
/* something is wrong with this packet */
- do_tx_balance = 0;
+ do_tx_balance = false;
break;
}
@@ -1430,7 +1514,7 @@ int bond_alb_xmit(struct sk_buff *skb, struct net_device *bond_dev)
* this family since it has an "ARP" like
* mechanism
*/
- do_tx_balance = 0;
+ do_tx_balance = false;
break;
}
@@ -1438,12 +1522,12 @@ int bond_alb_xmit(struct sk_buff *skb, struct net_device *bond_dev)
hash_size = ETH_ALEN;
break;
case ETH_P_ARP:
- do_tx_balance = 0;
+ do_tx_balance = false;
if (bond_info->rlb_enabled)
tx_slave = rlb_arp_xmit(skb, bond);
break;
default:
- do_tx_balance = 0;
+ do_tx_balance = false;
break;
}
@@ -1452,32 +1536,7 @@ int bond_alb_xmit(struct sk_buff *skb, struct net_device *bond_dev)
tx_slave = tlb_choose_channel(bond, hash_index, skb->len);
}
- if (!tx_slave) {
- /* unbalanced or unassigned, send through primary */
- tx_slave = rcu_dereference(bond->curr_active_slave);
- bond_info->unbalanced_load += skb->len;
- }
-
- if (tx_slave && SLAVE_IS_OK(tx_slave)) {
- if (tx_slave != rcu_dereference(bond->curr_active_slave)) {
- ether_addr_copy(eth_data->h_source,
- tx_slave->dev->dev_addr);
- }
-
- bond_dev_queue_xmit(bond, skb, tx_slave->dev);
- goto out;
- }
-
- if (tx_slave) {
- _lock_tx_hashtbl(bond);
- __tlb_clear_slave(bond, tx_slave, 0);
- _unlock_tx_hashtbl(bond);
- }
-
- /* no suitable interface, frame not sent */
- dev_kfree_skb_any(skb);
-out:
- return NETDEV_TX_OK;
+ return bond_do_alb_xmit(skb, bond, tx_slave);
}
void bond_alb_monitor(struct work_struct *work)
@@ -1514,8 +1573,10 @@ void bond_alb_monitor(struct work_struct *work)
/* If updating current_active, use all currently
* user mac addreses (!strict_match). Otherwise, only
* use mac of the slave device.
+ * In RLB mode, we always use strict matches.
*/
- strict_match = (slave != bond->curr_active_slave);
+ strict_match = (slave != bond->curr_active_slave ||
+ bond_info->rlb_enabled);
alb_send_learning_packets(slave, slave->dev->dev_addr,
strict_match);
}
@@ -1719,7 +1780,7 @@ void bond_alb_handle_active_change(struct bonding *bond, struct slave *new_slave
/* in TLB mode, the slave might flip down/up with the old dev_addr,
* and thus filter bond->dev_addr's packets, so force bond's mac
*/
- if (bond->params.mode == BOND_MODE_TLB) {
+ if (BOND_MODE(bond) == BOND_MODE_TLB) {
struct sockaddr sa;
u8 tmp_addr[ETH_ALEN];
diff --git a/drivers/net/bonding/bond_alb.h b/drivers/net/bonding/bond_alb.h
index e09dd4bfafff..5fc76c01636c 100644
--- a/drivers/net/bonding/bond_alb.h
+++ b/drivers/net/bonding/bond_alb.h
@@ -175,6 +175,7 @@ void bond_alb_deinit_slave(struct bonding *bond, struct slave *slave);
void bond_alb_handle_link_change(struct bonding *bond, struct slave *slave, char link);
void bond_alb_handle_active_change(struct bonding *bond, struct slave *new_slave);
int bond_alb_xmit(struct sk_buff *skb, struct net_device *bond_dev);
+int bond_tlb_xmit(struct sk_buff *skb, struct net_device *bond_dev);
void bond_alb_monitor(struct work_struct *);
int bond_alb_set_mac_address(struct net_device *bond_dev, void *addr);
void bond_alb_clear_vlan(struct bonding *bond, unsigned short vlan_id);
diff --git a/drivers/net/bonding/bond_debugfs.c b/drivers/net/bonding/bond_debugfs.c
index 2d3f7fa541ff..658e761c4568 100644
--- a/drivers/net/bonding/bond_debugfs.c
+++ b/drivers/net/bonding/bond_debugfs.c
@@ -23,7 +23,7 @@ static int bond_debug_rlb_hash_show(struct seq_file *m, void *v)
struct rlb_client_info *client_info;
u32 hash_index;
- if (bond->params.mode != BOND_MODE_ALB)
+ if (BOND_MODE(bond) != BOND_MODE_ALB)
return 0;
seq_printf(m, "SourceIP DestinationIP "
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index d3a67896d435..04f35f960cb8 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -343,7 +343,7 @@ static int bond_set_carrier(struct bonding *bond)
if (!bond_has_slaves(bond))
goto down;
- if (bond->params.mode == BOND_MODE_8023AD)
+ if (BOND_MODE(bond) == BOND_MODE_8023AD)
return bond_3ad_set_carrier(bond);
bond_for_each_slave(bond, slave, iter) {
@@ -497,7 +497,7 @@ static int bond_set_promiscuity(struct bonding *bond, int inc)
struct list_head *iter;
int err = 0;
- if (USES_PRIMARY(bond->params.mode)) {
+ if (bond_uses_primary(bond)) {
/* write lock already acquired */
if (bond->curr_active_slave) {
err = dev_set_promiscuity(bond->curr_active_slave->dev,
@@ -523,7 +523,7 @@ static int bond_set_allmulti(struct bonding *bond, int inc)
struct list_head *iter;
int err = 0;
- if (USES_PRIMARY(bond->params.mode)) {
+ if (bond_uses_primary(bond)) {
/* write lock already acquired */
if (bond->curr_active_slave) {
err = dev_set_allmulti(bond->curr_active_slave->dev,
@@ -574,7 +574,7 @@ static void bond_hw_addr_flush(struct net_device *bond_dev,
dev_uc_unsync(slave_dev, bond_dev);
dev_mc_unsync(slave_dev, bond_dev);
- if (bond->params.mode == BOND_MODE_8023AD) {
+ if (BOND_MODE(bond) == BOND_MODE_8023AD) {
/* del lacpdu mc addr from mc list */
u8 lacpdu_multicast[ETH_ALEN] = MULTICAST_LACPDU_ADDR;
@@ -585,8 +585,8 @@ static void bond_hw_addr_flush(struct net_device *bond_dev,
/*--------------------------- Active slave change ---------------------------*/
/* Update the hardware address list and promisc/allmulti for the new and
- * old active slaves (if any). Modes that are !USES_PRIMARY keep all
- * slaves up date at all times; only the USES_PRIMARY modes need to call
+ * old active slaves (if any). Modes that are not using primary keep all
+ * slaves up date at all times; only the modes that use primary need to call
* this function to swap these settings during a failover.
*/
static void bond_hw_addr_swap(struct bonding *bond, struct slave *new_active,
@@ -747,7 +747,7 @@ static struct slave *bond_find_best_slave(struct bonding *bond)
bond_for_each_slave(bond, slave, iter) {
if (slave->link == BOND_LINK_UP)
return slave;
- if (slave->link == BOND_LINK_BACK && IS_UP(slave->dev) &&
+ if (slave->link == BOND_LINK_BACK && bond_slave_is_up(slave) &&
slave->delay < mintime) {
mintime = slave->delay;
bestslave = slave;
@@ -801,7 +801,7 @@ void bond_change_active_slave(struct bonding *bond, struct slave *new_active)
new_active->last_link_up = jiffies;
if (new_active->link == BOND_LINK_BACK) {
- if (USES_PRIMARY(bond->params.mode)) {
+ if (bond_uses_primary(bond)) {
pr_info("%s: making interface %s the new active one %d ms earlier\n",
bond->dev->name, new_active->dev->name,
(bond->params.updelay - new_active->delay) * bond->params.miimon);
@@ -810,20 +810,20 @@ void bond_change_active_slave(struct bonding *bond, struct slave *new_active)
new_active->delay = 0;
new_active->link = BOND_LINK_UP;
- if (bond->params.mode == BOND_MODE_8023AD)
+ if (BOND_MODE(bond) == BOND_MODE_8023AD)
bond_3ad_handle_link_change(new_active, BOND_LINK_UP);
if (bond_is_lb(bond))
bond_alb_handle_link_change(bond, new_active, BOND_LINK_UP);
} else {
- if (USES_PRIMARY(bond->params.mode)) {
+ if (bond_uses_primary(bond)) {
pr_info("%s: making interface %s the new active one\n",
bond->dev->name, new_active->dev->name);
}
}
}
- if (USES_PRIMARY(bond->params.mode))
+ if (bond_uses_primary(bond))
bond_hw_addr_swap(bond, new_active, old_active);
if (bond_is_lb(bond)) {
@@ -838,7 +838,7 @@ void bond_change_active_slave(struct bonding *bond, struct slave *new_active)
rcu_assign_pointer(bond->curr_active_slave, new_active);
}
- if (bond->params.mode == BOND_MODE_ACTIVEBACKUP) {
+ if (BOND_MODE(bond) == BOND_MODE_ACTIVEBACKUP) {
if (old_active)
bond_set_slave_inactive_flags(old_active,
BOND_SLAVE_NOTIFY_NOW);
@@ -876,8 +876,8 @@ void bond_change_active_slave(struct bonding *bond, struct slave *new_active)
* resend only if bond is brought up with the affected
* bonding modes and the retransmission is enabled */
if (netif_running(bond->dev) && (bond->params.resend_igmp > 0) &&
- ((USES_PRIMARY(bond->params.mode) && new_active) ||
- bond->params.mode == BOND_MODE_ROUNDROBIN)) {
+ ((bond_uses_primary(bond) && new_active) ||
+ BOND_MODE(bond) == BOND_MODE_ROUNDROBIN)) {
bond->igmp_retrans = bond->params.resend_igmp;
queue_delayed_work(bond->wq, &bond->mcast_work, 1);
}
@@ -958,7 +958,7 @@ static void bond_netpoll_cleanup(struct net_device *bond_dev)
struct slave *slave;
bond_for_each_slave(bond, slave, iter)
- if (IS_UP(slave->dev))
+ if (bond_slave_is_up(slave))
slave_disable_netpoll(slave);
}
@@ -1038,6 +1038,7 @@ static void bond_compute_features(struct bonding *bond)
if (!bond_has_slaves(bond))
goto done;
+ vlan_features &= NETIF_F_ALL_FOR_ALL;
bond_for_each_slave(bond, slave, iter) {
vlan_features = netdev_increment_features(vlan_features,
@@ -1084,7 +1085,7 @@ static bool bond_should_deliver_exact_match(struct sk_buff *skb,
struct bonding *bond)
{
if (bond_is_slave_inactive(slave)) {
- if (bond->params.mode == BOND_MODE_ALB &&
+ if (BOND_MODE(bond) == BOND_MODE_ALB &&
skb->pkt_type != PACKET_BROADCAST &&
skb->pkt_type != PACKET_MULTICAST)
return false;
@@ -1126,7 +1127,7 @@ static rx_handler_result_t bond_handle_frame(struct sk_buff **pskb)
skb->dev = bond->dev;
- if (bond->params.mode == BOND_MODE_ALB &&
+ if (BOND_MODE(bond) == BOND_MODE_ALB &&
bond->dev->priv_flags & IFF_BRIDGE_PORT &&
skb->pkt_type == PACKET_HOST) {
@@ -1163,6 +1164,35 @@ static void bond_upper_dev_unlink(struct net_device *bond_dev,
rtmsg_ifinfo(RTM_NEWLINK, slave_dev, IFF_SLAVE, GFP_KERNEL);
}
+static struct slave *bond_alloc_slave(struct bonding *bond)
+{
+ struct slave *slave = NULL;
+
+ slave = kzalloc(sizeof(struct slave), GFP_KERNEL);
+ if (!slave)
+ return NULL;
+
+ if (BOND_MODE(bond) == BOND_MODE_8023AD) {
+ SLAVE_AD_INFO(slave) = kzalloc(sizeof(struct ad_slave_info),
+ GFP_KERNEL);
+ if (!SLAVE_AD_INFO(slave)) {
+ kfree(slave);
+ return NULL;
+ }
+ }
+ return slave;
+}
+
+static void bond_free_slave(struct slave *slave)
+{
+ struct bonding *bond = bond_get_bond_by_slave(slave);
+
+ if (BOND_MODE(bond) == BOND_MODE_8023AD)
+ kfree(SLAVE_AD_INFO(slave));
+
+ kfree(slave);
+}
+
/* enslave device <slave> to bond device <master> */
int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
{
@@ -1269,7 +1299,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
if (!bond_has_slaves(bond)) {
pr_warn("%s: Warning: The first slave device specified does not support setting the MAC address\n",
bond_dev->name);
- if (bond->params.mode == BOND_MODE_ACTIVEBACKUP) {
+ if (BOND_MODE(bond) == BOND_MODE_ACTIVEBACKUP) {
bond->params.fail_over_mac = BOND_FOM_ACTIVE;
pr_warn("%s: Setting fail_over_mac to active for active-backup mode\n",
bond_dev->name);
@@ -1290,11 +1320,14 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
bond->dev->addr_assign_type == NET_ADDR_RANDOM)
bond_set_dev_addr(bond->dev, slave_dev);
- new_slave = kzalloc(sizeof(struct slave), GFP_KERNEL);
+ new_slave = bond_alloc_slave(bond);
if (!new_slave) {
res = -ENOMEM;
goto err_undo_flags;
}
+
+ new_slave->bond = bond;
+ new_slave->dev = slave_dev;
/*
* Set the new_slave's queue_id to be zero. Queue ID mapping
* is set via sysfs or module option if desired.
@@ -1317,7 +1350,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
ether_addr_copy(new_slave->perm_hwaddr, slave_dev->dev_addr);
if (!bond->params.fail_over_mac ||
- bond->params.mode != BOND_MODE_ACTIVEBACKUP) {
+ BOND_MODE(bond) != BOND_MODE_ACTIVEBACKUP) {
/*
* Set slave to master's mac address. The application already
* set the master's mac address to that of the first slave
@@ -1338,8 +1371,6 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
goto err_restore_mac;
}
- new_slave->bond = bond;
- new_slave->dev = slave_dev;
slave_dev->priv_flags |= IFF_BONDING;
if (bond_is_lb(bond)) {
@@ -1351,10 +1382,10 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
goto err_close;
}
- /* If the mode USES_PRIMARY, then the following is handled by
+ /* If the mode uses primary, then the following is handled by
* bond_change_active_slave().
*/
- if (!USES_PRIMARY(bond->params.mode)) {
+ if (!bond_uses_primary(bond)) {
/* set promiscuity level to new slave */
if (bond_dev->flags & IFF_PROMISC) {
res = dev_set_promiscuity(slave_dev, 1);
@@ -1377,7 +1408,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
netif_addr_unlock_bh(bond_dev);
}
- if (bond->params.mode == BOND_MODE_8023AD) {
+ if (BOND_MODE(bond) == BOND_MODE_8023AD) {
/* add lacpdu mc addr to mc list */
u8 lacpdu_multicast[ETH_ALEN] = MULTICAST_LACPDU_ADDR;
@@ -1450,7 +1481,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
new_slave->link == BOND_LINK_DOWN ? "DOWN" :
(new_slave->link == BOND_LINK_UP ? "UP" : "BACK"));
- if (USES_PRIMARY(bond->params.mode) && bond->params.primary[0]) {
+ if (bond_uses_primary(bond) && bond->params.primary[0]) {
/* if there is a primary slave, remember it */
if (strcmp(bond->params.primary, new_slave->dev->name) == 0) {
bond->primary_slave = new_slave;
@@ -1458,7 +1489,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
}
}
- switch (bond->params.mode) {
+ switch (BOND_MODE(bond)) {
case BOND_MODE_ACTIVEBACKUP:
bond_set_slave_inactive_flags(new_slave,
BOND_SLAVE_NOTIFY_NOW);
@@ -1471,14 +1502,14 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
bond_set_slave_inactive_flags(new_slave, BOND_SLAVE_NOTIFY_NOW);
/* if this is the first slave */
if (!prev_slave) {
- SLAVE_AD_INFO(new_slave).id = 1;
+ SLAVE_AD_INFO(new_slave)->id = 1;
/* Initialize AD with the number of times that the AD timer is called in 1 second
* can be called only after the mac address of the bond is set
*/
bond_3ad_initialize(bond, 1000/AD_TIMER_INTERVAL);
} else {
- SLAVE_AD_INFO(new_slave).id =
- SLAVE_AD_INFO(prev_slave).id + 1;
+ SLAVE_AD_INFO(new_slave)->id =
+ SLAVE_AD_INFO(prev_slave)->id + 1;
}
bond_3ad_bind_slave(new_slave);
@@ -1539,7 +1570,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
bond_compute_features(bond);
bond_set_carrier(bond);
- if (USES_PRIMARY(bond->params.mode)) {
+ if (bond_uses_primary(bond)) {
block_netpoll_tx();
write_lock_bh(&bond->curr_slave_lock);
bond_select_active_slave(bond);
@@ -1563,7 +1594,7 @@ err_unregister:
netdev_rx_handler_unregister(slave_dev);
err_detach:
- if (!USES_PRIMARY(bond->params.mode))
+ if (!bond_uses_primary(bond))
bond_hw_addr_flush(bond_dev, slave_dev);
vlan_vids_del_by_dev(slave_dev, bond_dev);
@@ -1585,7 +1616,7 @@ err_close:
err_restore_mac:
if (!bond->params.fail_over_mac ||
- bond->params.mode != BOND_MODE_ACTIVEBACKUP) {
+ BOND_MODE(bond) != BOND_MODE_ACTIVEBACKUP) {
/* XXX TODO - fom follow mode needs to change master's
* MAC if this slave's MAC is in use by the bond, or at
* least print a warning.
@@ -1599,7 +1630,7 @@ err_restore_mtu:
dev_set_mtu(slave_dev, new_slave->original_mtu);
err_free:
- kfree(new_slave);
+ bond_free_slave(new_slave);
err_undo_flags:
/* Enslave of first slave has failed and we need to fix master's mac */
@@ -1661,7 +1692,7 @@ static int __bond_release_one(struct net_device *bond_dev,
write_lock_bh(&bond->lock);
/* Inform AD package of unbinding of slave. */
- if (bond->params.mode == BOND_MODE_8023AD)
+ if (BOND_MODE(bond) == BOND_MODE_8023AD)
bond_3ad_unbind_slave(slave);
write_unlock_bh(&bond->lock);
@@ -1676,7 +1707,7 @@ static int __bond_release_one(struct net_device *bond_dev,
bond->current_arp_slave = NULL;
if (!all && (!bond->params.fail_over_mac ||
- bond->params.mode != BOND_MODE_ACTIVEBACKUP)) {
+ BOND_MODE(bond) != BOND_MODE_ACTIVEBACKUP)) {
if (ether_addr_equal_64bits(bond_dev->dev_addr, slave->perm_hwaddr) &&
bond_has_slaves(bond))
pr_warn("%s: Warning: the permanent HWaddr of %s - %pM - is still in use by %s - set the HWaddr of %s to a different address to avoid conflicts\n",
@@ -1748,10 +1779,10 @@ static int __bond_release_one(struct net_device *bond_dev,
/* must do this from outside any spinlocks */
vlan_vids_del_by_dev(slave_dev, bond_dev);
- /* If the mode USES_PRIMARY, then this cases was handled above by
+ /* If the mode uses primary, then this cases was handled above by
* bond_change_active_slave(..., NULL)
*/
- if (!USES_PRIMARY(bond->params.mode)) {
+ if (!bond_uses_primary(bond)) {
/* unset promiscuity level from slave
* NOTE: The NETDEV_CHANGEADDR call above may change the value
* of the IFF_PROMISC flag in the bond_dev, but we need the
@@ -1775,7 +1806,7 @@ static int __bond_release_one(struct net_device *bond_dev,
dev_close(slave_dev);
if (bond->params.fail_over_mac != BOND_FOM_ACTIVE ||
- bond->params.mode != BOND_MODE_ACTIVEBACKUP) {
+ BOND_MODE(bond) != BOND_MODE_ACTIVEBACKUP) {
/* restore original ("permanent") mac address */
ether_addr_copy(addr.sa_data, slave->perm_hwaddr);
addr.sa_family = slave_dev->type;
@@ -1786,7 +1817,7 @@ static int __bond_release_one(struct net_device *bond_dev,
slave_dev->priv_flags &= ~IFF_BONDING;
- kfree(slave);
+ bond_free_slave(slave);
return 0; /* deletion OK */
}
@@ -1821,7 +1852,7 @@ static int bond_info_query(struct net_device *bond_dev, struct ifbond *info)
{
struct bonding *bond = netdev_priv(bond_dev);
- info->bond_mode = bond->params.mode;
+ info->bond_mode = BOND_MODE(bond);
info->miimon = bond->params.miimon;
info->num_slaves = bond->slave_cnt;
@@ -1877,7 +1908,7 @@ static int bond_miimon_inspect(struct bonding *bond)
if (slave->delay) {
pr_info("%s: link status down for %sinterface %s, disabling it in %d ms\n",
bond->dev->name,
- (bond->params.mode ==
+ (BOND_MODE(bond) ==
BOND_MODE_ACTIVEBACKUP) ?
(bond_is_active_slave(slave) ?
"active " : "backup ") : "",
@@ -1968,10 +1999,10 @@ static void bond_miimon_commit(struct bonding *bond)
slave->link = BOND_LINK_UP;
slave->last_link_up = jiffies;
- if (bond->params.mode == BOND_MODE_8023AD) {
+ if (BOND_MODE(bond) == BOND_MODE_8023AD) {
/* prevent it from being the active one */
bond_set_backup_slave(slave);
- } else if (bond->params.mode != BOND_MODE_ACTIVEBACKUP) {
+ } else if (BOND_MODE(bond) != BOND_MODE_ACTIVEBACKUP) {
/* make it immediately active */
bond_set_active_slave(slave);
} else if (slave != bond->primary_slave) {
@@ -1985,7 +2016,7 @@ static void bond_miimon_commit(struct bonding *bond)
slave->duplex ? "full" : "half");
/* notify ad that the link status has changed */
- if (bond->params.mode == BOND_MODE_8023AD)
+ if (BOND_MODE(bond) == BOND_MODE_8023AD)
bond_3ad_handle_link_change(slave, BOND_LINK_UP);
if (bond_is_lb(bond))
@@ -2004,15 +2035,15 @@ static void bond_miimon_commit(struct bonding *bond)
slave->link = BOND_LINK_DOWN;
- if (bond->params.mode == BOND_MODE_ACTIVEBACKUP ||
- bond->params.mode == BOND_MODE_8023AD)
+ if (BOND_MODE(bond) == BOND_MODE_ACTIVEBACKUP ||
+ BOND_MODE(bond) == BOND_MODE_8023AD)
bond_set_slave_inactive_flags(slave,
BOND_SLAVE_NOTIFY_NOW);
pr_info("%s: link status definitely down for interface %s, disabling it\n",
bond->dev->name, slave->dev->name);
- if (bond->params.mode == BOND_MODE_8023AD)
+ if (BOND_MODE(bond) == BOND_MODE_8023AD)
bond_3ad_handle_link_change(slave,
BOND_LINK_DOWN);
@@ -2175,9 +2206,9 @@ static void bond_arp_send(struct net_device *slave_dev, int arp_op,
* When the path is validated, collect any vlan information in the
* path.
*/
-static bool bond_verify_device_path(struct net_device *start_dev,
- struct net_device *end_dev,
- struct bond_vlan_tag *tags)
+bool bond_verify_device_path(struct net_device *start_dev,
+ struct net_device *end_dev,
+ struct bond_vlan_tag *tags)
{
struct net_device *upper;
struct list_head *iter;
@@ -2287,8 +2318,8 @@ int bond_arp_rcv(const struct sk_buff *skb, struct bonding *bond,
int alen, is_arp = skb->protocol == __cpu_to_be16(ETH_P_ARP);
if (!slave_do_arp_validate(bond, slave)) {
- if ((slave_do_arp_validate_only(bond, slave) && is_arp) ||
- !slave_do_arp_validate_only(bond, slave))
+ if ((slave_do_arp_validate_only(bond) && is_arp) ||
+ !slave_do_arp_validate_only(bond))
slave->last_rx = jiffies;
return RX_HANDLER_ANOTHER;
} else if (!is_arp) {
@@ -2456,7 +2487,7 @@ static void bond_loadbalance_arp_mon(struct work_struct *work)
* do - all replies will be rx'ed on same link causing slaves
* to be unstable during low/no traffic periods
*/
- if (IS_UP(slave->dev))
+ if (bond_slave_is_up(slave))
bond_arp_send_all(bond, slave);
}
@@ -2678,10 +2709,10 @@ static bool bond_ab_arp_probe(struct bonding *bond)
bond_set_slave_inactive_flags(curr_arp_slave, BOND_SLAVE_NOTIFY_LATER);
bond_for_each_slave_rcu(bond, slave, iter) {
- if (!found && !before && IS_UP(slave->dev))
+ if (!found && !before && bond_slave_is_up(slave))
before = slave;
- if (found && !new_slave && IS_UP(slave->dev))
+ if (found && !new_slave && bond_slave_is_up(slave))
new_slave = slave;
/* if the link state is up at this point, we
* mark it down - this can happen if we have
@@ -2690,7 +2721,7 @@ static bool bond_ab_arp_probe(struct bonding *bond)
* one the current slave so it is still marked
* up when it is actually down
*/
- if (!IS_UP(slave->dev) && slave->link == BOND_LINK_UP) {
+ if (!bond_slave_is_up(slave) && slave->link == BOND_LINK_UP) {
slave->link = BOND_LINK_DOWN;
if (slave->link_failure_count < UINT_MAX)
slave->link_failure_count++;
@@ -2853,7 +2884,7 @@ static int bond_slave_netdev_event(unsigned long event,
bond_update_speed_duplex(slave);
- if (bond->params.mode == BOND_MODE_8023AD) {
+ if (BOND_MODE(bond) == BOND_MODE_8023AD) {
if (old_speed != slave->speed)
bond_3ad_adapter_speed_changed(slave);
if (old_duplex != slave->duplex)
@@ -2881,7 +2912,7 @@ static int bond_slave_netdev_event(unsigned long event,
break;
case NETDEV_CHANGENAME:
/* we don't care if we don't have primary set */
- if (!USES_PRIMARY(bond->params.mode) ||
+ if (!bond_uses_primary(bond) ||
!bond->params.primary[0])
break;
@@ -3011,20 +3042,18 @@ static bool bond_flow_dissect(struct bonding *bond, struct sk_buff *skb,
* bond_xmit_hash - generate a hash value based on the xmit policy
* @bond: bonding device
* @skb: buffer to use for headers
- * @count: modulo value
*
* This function will extract the necessary headers from the skb buffer and use
* them to generate a hash based on the xmit_policy set in the bonding device
- * which will be reduced modulo count before returning.
*/
-int bond_xmit_hash(struct bonding *bond, struct sk_buff *skb, int count)
+u32 bond_xmit_hash(struct bonding *bond, struct sk_buff *skb)
{
struct flow_keys flow;
u32 hash;
if (bond->params.xmit_policy == BOND_XMIT_POLICY_LAYER2 ||
!bond_flow_dissect(bond, skb, &flow))
- return bond_eth_hash(skb) % count;
+ return bond_eth_hash(skb);
if (bond->params.xmit_policy == BOND_XMIT_POLICY_LAYER23 ||
bond->params.xmit_policy == BOND_XMIT_POLICY_ENCAP23)
@@ -3035,7 +3064,7 @@ int bond_xmit_hash(struct bonding *bond, struct sk_buff *skb, int count)
hash ^= (hash >> 16);
hash ^= (hash >> 8);
- return hash % count;
+ return hash;
}
/*-------------------------- Device entry points ----------------------------*/
@@ -3046,7 +3075,7 @@ static void bond_work_init_all(struct bonding *bond)
bond_resend_igmp_join_requests_delayed);
INIT_DELAYED_WORK(&bond->alb_work, bond_alb_monitor);
INIT_DELAYED_WORK(&bond->mii_work, bond_mii_monitor);
- if (bond->params.mode == BOND_MODE_ACTIVEBACKUP)
+ if (BOND_MODE(bond) == BOND_MODE_ACTIVEBACKUP)
INIT_DELAYED_WORK(&bond->arp_work, bond_activebackup_arp_mon);
else
INIT_DELAYED_WORK(&bond->arp_work, bond_loadbalance_arp_mon);
@@ -3073,7 +3102,7 @@ static int bond_open(struct net_device *bond_dev)
if (bond_has_slaves(bond)) {
read_lock(&bond->curr_slave_lock);
bond_for_each_slave(bond, slave, iter) {
- if (USES_PRIMARY(bond->params.mode)
+ if (bond_uses_primary(bond)
&& (slave != bond->curr_active_slave)) {
bond_set_slave_inactive_flags(slave,
BOND_SLAVE_NOTIFY_NOW);
@@ -3092,9 +3121,10 @@ static int bond_open(struct net_device *bond_dev)
/* bond_alb_initialize must be called before the timer
* is started.
*/
- if (bond_alb_initialize(bond, (bond->params.mode == BOND_MODE_ALB)))
+ if (bond_alb_initialize(bond, (BOND_MODE(bond) == BOND_MODE_ALB)))
return -ENOMEM;
- queue_delayed_work(bond->wq, &bond->alb_work, 0);
+ if (bond->params.tlb_dynamic_lb)
+ queue_delayed_work(bond->wq, &bond->alb_work, 0);
}
if (bond->params.miimon) /* link check interval, in milliseconds. */
@@ -3105,7 +3135,7 @@ static int bond_open(struct net_device *bond_dev)
bond->recv_probe = bond_arp_rcv;
}
- if (bond->params.mode == BOND_MODE_8023AD) {
+ if (BOND_MODE(bond) == BOND_MODE_8023AD) {
queue_delayed_work(bond->wq, &bond->ad_work, 0);
/* register to receive LACPDUs */
bond->recv_probe = bond_3ad_lacpdu_recv;
@@ -3310,7 +3340,7 @@ static void bond_set_rx_mode(struct net_device *bond_dev)
rcu_read_lock();
- if (USES_PRIMARY(bond->params.mode)) {
+ if (bond_uses_primary(bond)) {
slave = rcu_dereference(bond->curr_active_slave);
if (slave) {
dev_uc_sync(slave->dev, bond_dev);
@@ -3464,7 +3494,7 @@ static int bond_set_mac_address(struct net_device *bond_dev, void *addr)
struct list_head *iter;
int res = 0;
- if (bond->params.mode == BOND_MODE_ALB)
+ if (BOND_MODE(bond) == BOND_MODE_ALB)
return bond_alb_set_mac_address(bond_dev, addr);
@@ -3475,7 +3505,7 @@ static int bond_set_mac_address(struct net_device *bond_dev, void *addr)
* Returning an error causes ifenslave to fail.
*/
if (bond->params.fail_over_mac &&
- bond->params.mode == BOND_MODE_ACTIVEBACKUP)
+ BOND_MODE(bond) == BOND_MODE_ACTIVEBACKUP)
return 0;
if (!is_valid_ether_addr(sa->sa_data))
@@ -3555,7 +3585,7 @@ static void bond_xmit_slave_id(struct bonding *bond, struct sk_buff *skb, int sl
/* Here we start from the slave with slave_id */
bond_for_each_slave_rcu(bond, slave, iter) {
if (--i < 0) {
- if (slave_can_tx(slave)) {
+ if (bond_slave_can_tx(slave)) {
bond_dev_queue_xmit(bond, skb, slave->dev);
return;
}
@@ -3567,7 +3597,7 @@ static void bond_xmit_slave_id(struct bonding *bond, struct sk_buff *skb, int sl
bond_for_each_slave_rcu(bond, slave, iter) {
if (--i < 0)
break;
- if (slave_can_tx(slave)) {
+ if (bond_slave_can_tx(slave)) {
bond_dev_queue_xmit(bond, skb, slave->dev);
return;
}
@@ -3624,7 +3654,7 @@ static int bond_xmit_roundrobin(struct sk_buff *skb, struct net_device *bond_dev
*/
if (iph->protocol == IPPROTO_IGMP && skb->protocol == htons(ETH_P_IP)) {
slave = rcu_dereference(bond->curr_active_slave);
- if (slave && slave_can_tx(slave))
+ if (slave && bond_slave_can_tx(slave))
bond_dev_queue_xmit(bond, skb, slave->dev);
else
bond_xmit_slave_id(bond, skb, 0);
@@ -3662,7 +3692,7 @@ static int bond_xmit_xor(struct sk_buff *skb, struct net_device *bond_dev)
{
struct bonding *bond = netdev_priv(bond_dev);
- bond_xmit_slave_id(bond, skb, bond_xmit_hash(bond, skb, bond->slave_cnt));
+ bond_xmit_slave_id(bond, skb, bond_xmit_hash(bond, skb) % bond->slave_cnt);
return NETDEV_TX_OK;
}
@@ -3677,7 +3707,7 @@ static int bond_xmit_broadcast(struct sk_buff *skb, struct net_device *bond_dev)
bond_for_each_slave_rcu(bond, slave, iter) {
if (bond_is_last_slave(bond, slave))
break;
- if (IS_UP(slave->dev) && slave->link == BOND_LINK_UP) {
+ if (bond_slave_is_up(slave) && slave->link == BOND_LINK_UP) {
struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
if (!skb2) {
@@ -3689,7 +3719,7 @@ static int bond_xmit_broadcast(struct sk_buff *skb, struct net_device *bond_dev)
bond_dev_queue_xmit(bond, skb2, slave->dev);
}
}
- if (slave && IS_UP(slave->dev) && slave->link == BOND_LINK_UP)
+ if (slave && bond_slave_is_up(slave) && slave->link == BOND_LINK_UP)
bond_dev_queue_xmit(bond, skb, slave->dev);
else
dev_kfree_skb_any(skb);
@@ -3714,7 +3744,7 @@ static inline int bond_slave_override(struct bonding *bond,
/* Find out if any slaves have the same mapping as this skb. */
bond_for_each_slave_rcu(bond, slave, iter) {
if (slave->queue_id == skb->queue_mapping) {
- if (slave_can_tx(slave)) {
+ if (bond_slave_can_tx(slave)) {
bond_dev_queue_xmit(bond, skb, slave->dev);
return 0;
}
@@ -3755,12 +3785,11 @@ static netdev_tx_t __bond_start_xmit(struct sk_buff *skb, struct net_device *dev
{
struct bonding *bond = netdev_priv(dev);
- if (TX_QUEUE_OVERRIDE(bond->params.mode)) {
- if (!bond_slave_override(bond, skb))
- return NETDEV_TX_OK;
- }
+ if (bond_should_override_tx_queue(bond) &&
+ !bond_slave_override(bond, skb))
+ return NETDEV_TX_OK;
- switch (bond->params.mode) {
+ switch (BOND_MODE(bond)) {
case BOND_MODE_ROUNDROBIN:
return bond_xmit_roundrobin(skb, dev);
case BOND_MODE_ACTIVEBACKUP:
@@ -3772,12 +3801,13 @@ static netdev_tx_t __bond_start_xmit(struct sk_buff *skb, struct net_device *dev
case BOND_MODE_8023AD:
return bond_3ad_xmit_xor(skb, dev);
case BOND_MODE_ALB:
- case BOND_MODE_TLB:
return bond_alb_xmit(skb, dev);
+ case BOND_MODE_TLB:
+ return bond_tlb_xmit(skb, dev);
default:
/* Should never happen, mode already checked */
pr_err("%s: Error: Unknown bonding mode %d\n",
- dev->name, bond->params.mode);
+ dev->name, BOND_MODE(bond));
WARN_ON_ONCE(1);
dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
@@ -3817,14 +3847,14 @@ static int bond_ethtool_get_settings(struct net_device *bond_dev,
ecmd->duplex = DUPLEX_UNKNOWN;
ecmd->port = PORT_OTHER;
- /* Since SLAVE_IS_OK returns false for all inactive or down slaves, we
+ /* Since bond_slave_can_tx returns false for all inactive or down slaves, we
* do not need to check mode. Though link speed might not represent
* the true receive or transmit bandwidth (not all modes are symmetric)
* this is an accurate maximum.
*/
read_lock(&bond->lock);
bond_for_each_slave(bond, slave, iter) {
- if (SLAVE_IS_OK(slave)) {
+ if (bond_slave_can_tx(slave)) {
if (slave->speed != SPEED_UNKNOWN)
speed += slave->speed;
if (ecmd->duplex == DUPLEX_UNKNOWN &&
@@ -3915,7 +3945,7 @@ void bond_setup(struct net_device *bond_dev)
/* Initialize the device options */
bond_dev->tx_queue_len = 0;
bond_dev->flags |= IFF_MASTER|IFF_MULTICAST;
- bond_dev->priv_flags |= IFF_BONDING;
+ bond_dev->priv_flags |= IFF_BONDING | IFF_UNICAST_FLT;
bond_dev->priv_flags &= ~(IFF_XMIT_DST_RELEASE | IFF_TX_SKB_SHARING);
/* At first, we block adding VLANs. That's the only way to
@@ -3994,7 +4024,8 @@ static int bond_check_params(struct bond_params *params)
if (xmit_hash_policy) {
if ((bond_mode != BOND_MODE_XOR) &&
- (bond_mode != BOND_MODE_8023AD)) {
+ (bond_mode != BOND_MODE_8023AD) &&
+ (bond_mode != BOND_MODE_TLB)) {
pr_info("xmit_hash_policy param is irrelevant in mode %s\n",
bond_mode_name(bond_mode));
} else {
@@ -4079,7 +4110,7 @@ static int bond_check_params(struct bond_params *params)
}
/* reset values for 802.3ad/TLB/ALB */
- if (BOND_NO_USES_ARP(bond_mode)) {
+ if (!bond_mode_uses_arp(bond_mode)) {
if (!miimon) {
pr_warn("Warning: miimon must be specified, otherwise bonding will not detect link failure, speed and duplex which are essential for 802.3ad operation\n");
pr_warn("Forcing miimon to 100msec\n");
@@ -4161,7 +4192,7 @@ static int bond_check_params(struct bond_params *params)
catch mistakes */
__be32 ip;
if (!in4_pton(arp_ip_target[i], -1, (u8 *)&ip, -1, NULL) ||
- IS_IP_TARGET_UNUSABLE_ADDRESS(ip)) {
+ !bond_is_ip_target_ok(ip)) {
pr_warn("Warning: bad arp_ip_target module parameter (%s), ARP monitoring will not be performed\n",
arp_ip_target[i]);
arp_interval = 0;
@@ -4234,7 +4265,7 @@ static int bond_check_params(struct bond_params *params)
pr_debug("Warning: either miimon or arp_interval and arp_ip_target module parameters must be specified, otherwise bonding will not detect link failures! see bonding.txt for details\n");
}
- if (primary && !USES_PRIMARY(bond_mode)) {
+ if (primary && !bond_mode_uses_primary(bond_mode)) {
/* currently, using a primary only makes sense
* in active backup, TLB or ALB modes
*/
@@ -4300,6 +4331,7 @@ static int bond_check_params(struct bond_params *params)
params->min_links = min_links;
params->lp_interval = lp_interval;
params->packets_per_slave = packets_per_slave;
+ params->tlb_dynamic_lb = 1; /* Default value */
if (packets_per_slave > 0) {
params->reciprocal_packets_per_slave =
reciprocal_value(packets_per_slave);
diff --git a/drivers/net/bonding/bond_netlink.c b/drivers/net/bonding/bond_netlink.c
index f847e165d252..5ab3c1847e67 100644
--- a/drivers/net/bonding/bond_netlink.c
+++ b/drivers/net/bonding/bond_netlink.c
@@ -56,10 +56,10 @@ static int bond_fill_slave_info(struct sk_buff *skb,
if (nla_put_u16(skb, IFLA_BOND_SLAVE_QUEUE_ID, slave->queue_id))
goto nla_put_failure;
- if (slave->bond->params.mode == BOND_MODE_8023AD) {
+ if (BOND_MODE(slave->bond) == BOND_MODE_8023AD) {
const struct aggregator *agg;
- agg = SLAVE_AD_INFO(slave).port.aggregator;
+ agg = SLAVE_AD_INFO(slave)->port.aggregator;
if (agg)
if (nla_put_u16(skb, IFLA_BOND_SLAVE_AD_AGGREGATOR_ID,
agg->aggregator_identifier))
@@ -407,7 +407,7 @@ static int bond_fill_info(struct sk_buff *skb,
unsigned int packets_per_slave;
int i, targets_added;
- if (nla_put_u8(skb, IFLA_BOND_MODE, bond->params.mode))
+ if (nla_put_u8(skb, IFLA_BOND_MODE, BOND_MODE(bond)))
goto nla_put_failure;
if (slave_dev &&
@@ -505,7 +505,7 @@ static int bond_fill_info(struct sk_buff *skb,
bond->params.ad_select))
goto nla_put_failure;
- if (bond->params.mode == BOND_MODE_8023AD) {
+ if (BOND_MODE(bond) == BOND_MODE_8023AD) {
struct ad_info info;
if (!bond_3ad_get_active_agg_info(bond, &info)) {
diff --git a/drivers/net/bonding/bond_options.c b/drivers/net/bonding/bond_options.c
index 832070298446..540e0167bf24 100644
--- a/drivers/net/bonding/bond_options.c
+++ b/drivers/net/bonding/bond_options.c
@@ -70,6 +70,8 @@ static int bond_option_mode_set(struct bonding *bond,
const struct bond_opt_value *newval);
static int bond_option_slaves_set(struct bonding *bond,
const struct bond_opt_value *newval);
+static int bond_option_tlb_dynamic_lb_set(struct bonding *bond,
+ const struct bond_opt_value *newval);
static const struct bond_opt_value bond_mode_tbl[] = {
@@ -180,6 +182,12 @@ static const struct bond_opt_value bond_lp_interval_tbl[] = {
{ NULL, -1, 0},
};
+static const struct bond_opt_value bond_tlb_dynamic_lb_tbl[] = {
+ { "off", 0, 0},
+ { "on", 1, BOND_VALFLAG_DEFAULT},
+ { NULL, -1, 0}
+};
+
static const struct bond_option bond_opts[] = {
[BOND_OPT_MODE] = {
.id = BOND_OPT_MODE,
@@ -200,7 +208,7 @@ static const struct bond_option bond_opts[] = {
[BOND_OPT_XMIT_HASH] = {
.id = BOND_OPT_XMIT_HASH,
.name = "xmit_hash_policy",
- .desc = "balance-xor and 802.3ad hashing method",
+ .desc = "balance-xor, 802.3ad, and tlb hashing method",
.values = bond_xmit_hashtype_tbl,
.set = bond_option_xmit_hash_policy_set
},
@@ -365,9 +373,33 @@ static const struct bond_option bond_opts[] = {
.flags = BOND_OPTFLAG_RAWVAL,
.set = bond_option_slaves_set
},
+ [BOND_OPT_TLB_DYNAMIC_LB] = {
+ .id = BOND_OPT_TLB_DYNAMIC_LB,
+ .name = "tlb_dynamic_lb",
+ .desc = "Enable dynamic flow shuffling",
+ .unsuppmodes = BOND_MODE_ALL_EX(BIT(BOND_MODE_TLB)),
+ .values = bond_tlb_dynamic_lb_tbl,
+ .flags = BOND_OPTFLAG_IFDOWN,
+ .set = bond_option_tlb_dynamic_lb_set,
+ },
{ }
};
+/* Searches for an option by name */
+const struct bond_option *bond_opt_get_by_name(const char *name)
+{
+ const struct bond_option *opt;
+ int option;
+
+ for (option = 0; option < BOND_OPT_LAST; option++) {
+ opt = bond_opt_get(option);
+ if (opt && !strcmp(opt->name, name))
+ return opt;
+ }
+
+ return NULL;
+}
+
/* Searches for a value in opt's values[] table */
const struct bond_opt_value *bond_opt_get_val(unsigned int option, u64 val)
{
@@ -641,7 +673,7 @@ const struct bond_option *bond_opt_get(unsigned int option)
int bond_option_mode_set(struct bonding *bond, const struct bond_opt_value *newval)
{
- if (BOND_NO_USES_ARP(newval->value) && bond->params.arp_interval) {
+ if (!bond_mode_uses_arp(newval->value) && bond->params.arp_interval) {
pr_info("%s: %s mode is incompatible with arp monitoring, start mii monitoring\n",
bond->dev->name, newval->string);
/* disable arp monitoring */
@@ -662,7 +694,7 @@ int bond_option_mode_set(struct bonding *bond, const struct bond_opt_value *newv
static struct net_device *__bond_option_active_slave_get(struct bonding *bond,
struct slave *slave)
{
- return USES_PRIMARY(bond->params.mode) && slave ? slave->dev : NULL;
+ return bond_uses_primary(bond) && slave ? slave->dev : NULL;
}
struct net_device *bond_option_active_slave_get_rcu(struct bonding *bond)
@@ -727,7 +759,7 @@ static int bond_option_active_slave_set(struct bonding *bond,
bond->dev->name, new_active->dev->name);
} else {
if (old_active && (new_active->link == BOND_LINK_UP) &&
- IS_UP(new_active->dev)) {
+ bond_slave_is_up(new_active)) {
pr_info("%s: Setting %s as active slave\n",
bond->dev->name, new_active->dev->name);
bond_change_active_slave(bond, new_active);
@@ -746,6 +778,10 @@ static int bond_option_active_slave_set(struct bonding *bond,
return ret;
}
+/* There are two tricky bits here. First, if MII monitoring is activated, then
+ * we must disable ARP monitoring. Second, if the timer isn't running, we must
+ * start it.
+ */
static int bond_option_miimon_set(struct bonding *bond,
const struct bond_opt_value *newval)
{
@@ -784,6 +820,10 @@ static int bond_option_miimon_set(struct bonding *bond,
return 0;
}
+/* Set up and down delays. These must be multiples of the
+ * MII monitoring value, and are stored internally as the multiplier.
+ * Thus, we must translate to MS for the real world.
+ */
static int bond_option_updelay_set(struct bonding *bond,
const struct bond_opt_value *newval)
{
@@ -842,6 +882,10 @@ static int bond_option_use_carrier_set(struct bonding *bond,
return 0;
}
+/* There are two tricky bits here. First, if ARP monitoring is activated, then
+ * we must disable MII monitoring. Second, if the ARP timer isn't running,
+ * we must start it.
+ */
static int bond_option_arp_interval_set(struct bonding *bond,
const struct bond_opt_value *newval)
{
@@ -899,7 +943,7 @@ static int _bond_option_arp_ip_target_add(struct bonding *bond, __be32 target)
__be32 *targets = bond->params.arp_targets;
int ind;
- if (IS_IP_TARGET_UNUSABLE_ADDRESS(target)) {
+ if (!bond_is_ip_target_ok(target)) {
pr_err("%s: invalid ARP target %pI4 specified for addition\n",
bond->dev->name, &target);
return -EINVAL;
@@ -944,7 +988,7 @@ static int bond_option_arp_ip_target_rem(struct bonding *bond, __be32 target)
unsigned long *targets_rx;
int ind, i;
- if (IS_IP_TARGET_UNUSABLE_ADDRESS(target)) {
+ if (!bond_is_ip_target_ok(target)) {
pr_err("%s: invalid ARP target %pI4 specified for removal\n",
bond->dev->name, &target);
return -EINVAL;
@@ -1338,3 +1382,13 @@ err_no_cmd:
ret = -EPERM;
goto out;
}
+
+static int bond_option_tlb_dynamic_lb_set(struct bonding *bond,
+ const struct bond_opt_value *newval)
+{
+ pr_info("%s: Setting dynamic-lb to %s (%llu)\n",
+ bond->dev->name, newval->string, newval->value);
+ bond->params.tlb_dynamic_lb = newval->value;
+
+ return 0;
+}
diff --git a/drivers/net/bonding/bond_options.h b/drivers/net/bonding/bond_options.h
index 12be9e1bfb0c..17ded5b29176 100644
--- a/drivers/net/bonding/bond_options.h
+++ b/drivers/net/bonding/bond_options.h
@@ -62,6 +62,7 @@ enum {
BOND_OPT_RESEND_IGMP,
BOND_OPT_LP_INTERVAL,
BOND_OPT_SLAVES,
+ BOND_OPT_TLB_DYNAMIC_LB,
BOND_OPT_LAST
};
@@ -104,6 +105,7 @@ int bond_opt_tryset_rtnl(struct bonding *bond, unsigned int option, char *buf);
const struct bond_opt_value *bond_opt_parse(const struct bond_option *opt,
struct bond_opt_value *val);
const struct bond_option *bond_opt_get(unsigned int option);
+const struct bond_option *bond_opt_get_by_name(const char *name);
const struct bond_opt_value *bond_opt_get_val(unsigned int option, u64 val);
/* This helper is used to initialize a bond_opt_value structure for parameter
diff --git a/drivers/net/bonding/bond_procfs.c b/drivers/net/bonding/bond_procfs.c
index 013fdd0f45e9..b215b479bb3a 100644
--- a/drivers/net/bonding/bond_procfs.c
+++ b/drivers/net/bonding/bond_procfs.c
@@ -72,9 +72,9 @@ static void bond_info_show_master(struct seq_file *seq)
curr = rcu_dereference(bond->curr_active_slave);
seq_printf(seq, "Bonding Mode: %s",
- bond_mode_name(bond->params.mode));
+ bond_mode_name(BOND_MODE(bond)));
- if (bond->params.mode == BOND_MODE_ACTIVEBACKUP &&
+ if (BOND_MODE(bond) == BOND_MODE_ACTIVEBACKUP &&
bond->params.fail_over_mac) {
optval = bond_opt_get_val(BOND_OPT_FAIL_OVER_MAC,
bond->params.fail_over_mac);
@@ -83,15 +83,15 @@ static void bond_info_show_master(struct seq_file *seq)
seq_printf(seq, "\n");
- if (bond->params.mode == BOND_MODE_XOR ||
- bond->params.mode == BOND_MODE_8023AD) {
+ if (BOND_MODE(bond) == BOND_MODE_XOR ||
+ BOND_MODE(bond) == BOND_MODE_8023AD) {
optval = bond_opt_get_val(BOND_OPT_XMIT_HASH,
bond->params.xmit_policy);
seq_printf(seq, "Transmit Hash Policy: %s (%d)\n",
optval->string, bond->params.xmit_policy);
}
- if (USES_PRIMARY(bond->params.mode)) {
+ if (bond_uses_primary(bond)) {
seq_printf(seq, "Primary Slave: %s",
(bond->primary_slave) ?
bond->primary_slave->dev->name : "None");
@@ -134,7 +134,7 @@ static void bond_info_show_master(struct seq_file *seq)
seq_printf(seq, "\n");
}
- if (bond->params.mode == BOND_MODE_8023AD) {
+ if (BOND_MODE(bond) == BOND_MODE_8023AD) {
struct ad_info ad_info;
seq_puts(seq, "\n802.3ad info\n");
@@ -188,9 +188,9 @@ static void bond_info_show_slave(struct seq_file *seq,
seq_printf(seq, "Permanent HW addr: %pM\n", slave->perm_hwaddr);
- if (bond->params.mode == BOND_MODE_8023AD) {
+ if (BOND_MODE(bond) == BOND_MODE_8023AD) {
const struct aggregator *agg
- = SLAVE_AD_INFO(slave).port.aggregator;
+ = SLAVE_AD_INFO(slave)->port.aggregator;
if (agg)
seq_printf(seq, "Aggregator ID: %d\n",
diff --git a/drivers/net/bonding/bond_sysfs.c b/drivers/net/bonding/bond_sysfs.c
index 5f6babcfc26e..daed52f68ce1 100644
--- a/drivers/net/bonding/bond_sysfs.c
+++ b/drivers/net/bonding/bond_sysfs.c
@@ -45,8 +45,7 @@
#define to_dev(obj) container_of(obj, struct device, kobj)
#define to_bond(cd) ((struct bonding *)(netdev_priv(to_net_dev(cd))))
-/*
- * "show" function for the bond_masters attribute.
+/* "show" function for the bond_masters attribute.
* The class parameter is ignored.
*/
static ssize_t bonding_show_bonds(struct class *cls,
@@ -88,14 +87,12 @@ static struct net_device *bond_get_by_name(struct bond_net *bn, const char *ifna
return NULL;
}
-/*
- * "store" function for the bond_masters attribute. This is what
+/* "store" function for the bond_masters attribute. This is what
* creates and deletes entire bonds.
*
* The class parameter is ignored.
*
*/
-
static ssize_t bonding_store_bonds(struct class *cls,
struct class_attribute *attr,
const char *buffer, size_t count)
@@ -158,9 +155,26 @@ static const struct class_attribute class_attr_bonding_masters = {
.store = bonding_store_bonds,
};
-/*
- * Show the slaves in the current bond.
- */
+/* Generic "store" method for bonding sysfs option setting */
+static ssize_t bonding_sysfs_store_option(struct device *d,
+ struct device_attribute *attr,
+ const char *buffer, size_t count)
+{
+ struct bonding *bond = to_bond(d);
+ const struct bond_option *opt;
+ int ret;
+
+ opt = bond_opt_get_by_name(attr->attr.name);
+ if (WARN_ON(!opt))
+ return -ENOENT;
+ ret = bond_opt_tryset_rtnl(bond, opt->id, (char *)buffer);
+ if (!ret)
+ ret = count;
+
+ return ret;
+}
+
+/* Show the slaves in the current bond. */
static ssize_t bonding_show_slaves(struct device *d,
struct device_attribute *attr, char *buf)
{
@@ -190,62 +204,24 @@ static ssize_t bonding_show_slaves(struct device *d,
return res;
}
-
-/*
- * Set the slaves in the current bond.
- * This is supposed to be only thin wrapper for bond_enslave and bond_release.
- * All hard work should be done there.
- */
-static ssize_t bonding_store_slaves(struct device *d,
- struct device_attribute *attr,
- const char *buffer, size_t count)
-{
- struct bonding *bond = to_bond(d);
- int ret;
-
- ret = bond_opt_tryset_rtnl(bond, BOND_OPT_SLAVES, (char *)buffer);
- if (!ret)
- ret = count;
-
- return ret;
-}
static DEVICE_ATTR(slaves, S_IRUGO | S_IWUSR, bonding_show_slaves,
- bonding_store_slaves);
+ bonding_sysfs_store_option);
-/*
- * Show and set the bonding mode. The bond interface must be down to
- * change the mode.
- */
+/* Show the bonding mode. */
static ssize_t bonding_show_mode(struct device *d,
struct device_attribute *attr, char *buf)
{
struct bonding *bond = to_bond(d);
const struct bond_opt_value *val;
- val = bond_opt_get_val(BOND_OPT_MODE, bond->params.mode);
+ val = bond_opt_get_val(BOND_OPT_MODE, BOND_MODE(bond));
- return sprintf(buf, "%s %d\n", val->string, bond->params.mode);
-}
-
-static ssize_t bonding_store_mode(struct device *d,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct bonding *bond = to_bond(d);
- int ret;
-
- ret = bond_opt_tryset_rtnl(bond, BOND_OPT_MODE, (char *)buf);
- if (!ret)
- ret = count;
-
- return ret;
+ return sprintf(buf, "%s %d\n", val->string, BOND_MODE(bond));
}
static DEVICE_ATTR(mode, S_IRUGO | S_IWUSR,
- bonding_show_mode, bonding_store_mode);
+ bonding_show_mode, bonding_sysfs_store_option);
-/*
- * Show and set the bonding transmit hash method.
- */
+/* Show the bonding transmit hash method. */
static ssize_t bonding_show_xmit_hash(struct device *d,
struct device_attribute *attr,
char *buf)
@@ -257,26 +233,10 @@ static ssize_t bonding_show_xmit_hash(struct device *d,
return sprintf(buf, "%s %d\n", val->string, bond->params.xmit_policy);
}
-
-static ssize_t bonding_store_xmit_hash(struct device *d,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct bonding *bond = to_bond(d);
- int ret;
-
- ret = bond_opt_tryset_rtnl(bond, BOND_OPT_XMIT_HASH, (char *)buf);
- if (!ret)
- ret = count;
-
- return ret;
-}
static DEVICE_ATTR(xmit_hash_policy, S_IRUGO | S_IWUSR,
- bonding_show_xmit_hash, bonding_store_xmit_hash);
+ bonding_show_xmit_hash, bonding_sysfs_store_option);
-/*
- * Show and set arp_validate.
- */
+/* Show arp_validate. */
static ssize_t bonding_show_arp_validate(struct device *d,
struct device_attribute *attr,
char *buf)
@@ -289,26 +249,10 @@ static ssize_t bonding_show_arp_validate(struct device *d,
return sprintf(buf, "%s %d\n", val->string, bond->params.arp_validate);
}
-
-static ssize_t bonding_store_arp_validate(struct device *d,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct bonding *bond = to_bond(d);
- int ret;
-
- ret = bond_opt_tryset_rtnl(bond, BOND_OPT_ARP_VALIDATE, (char *)buf);
- if (!ret)
- ret = count;
-
- return ret;
-}
-
static DEVICE_ATTR(arp_validate, S_IRUGO | S_IWUSR, bonding_show_arp_validate,
- bonding_store_arp_validate);
-/*
- * Show and set arp_all_targets.
- */
+ bonding_sysfs_store_option);
+
+/* Show arp_all_targets. */
static ssize_t bonding_show_arp_all_targets(struct device *d,
struct device_attribute *attr,
char *buf)
@@ -321,28 +265,10 @@ static ssize_t bonding_show_arp_all_targets(struct device *d,
return sprintf(buf, "%s %d\n",
val->string, bond->params.arp_all_targets);
}
-
-static ssize_t bonding_store_arp_all_targets(struct device *d,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct bonding *bond = to_bond(d);
- int ret;
-
- ret = bond_opt_tryset_rtnl(bond, BOND_OPT_ARP_ALL_TARGETS, (char *)buf);
- if (!ret)
- ret = count;
-
- return ret;
-}
-
static DEVICE_ATTR(arp_all_targets, S_IRUGO | S_IWUSR,
- bonding_show_arp_all_targets, bonding_store_arp_all_targets);
+ bonding_show_arp_all_targets, bonding_sysfs_store_option);
-/*
- * Show and store fail_over_mac. User only allowed to change the
- * value when there are no slaves.
- */
+/* Show fail_over_mac. */
static ssize_t bonding_show_fail_over_mac(struct device *d,
struct device_attribute *attr,
char *buf)
@@ -355,30 +281,10 @@ static ssize_t bonding_show_fail_over_mac(struct device *d,
return sprintf(buf, "%s %d\n", val->string, bond->params.fail_over_mac);
}
-
-static ssize_t bonding_store_fail_over_mac(struct device *d,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct bonding *bond = to_bond(d);
- int ret;
-
- ret = bond_opt_tryset_rtnl(bond, BOND_OPT_FAIL_OVER_MAC, (char *)buf);
- if (!ret)
- ret = count;
-
- return ret;
-}
-
static DEVICE_ATTR(fail_over_mac, S_IRUGO | S_IWUSR,
- bonding_show_fail_over_mac, bonding_store_fail_over_mac);
+ bonding_show_fail_over_mac, bonding_sysfs_store_option);
-/*
- * Show and set the arp timer interval. There are two tricky bits
- * here. First, if ARP monitoring is activated, then we must disable
- * MII monitoring. Second, if the ARP timer isn't running, we must
- * start it.
- */
+/* Show the arp timer interval. */
static ssize_t bonding_show_arp_interval(struct device *d,
struct device_attribute *attr,
char *buf)
@@ -387,26 +293,10 @@ static ssize_t bonding_show_arp_interval(struct device *d,
return sprintf(buf, "%d\n", bond->params.arp_interval);
}
-
-static ssize_t bonding_store_arp_interval(struct device *d,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct bonding *bond = to_bond(d);
- int ret;
-
- ret = bond_opt_tryset_rtnl(bond, BOND_OPT_ARP_INTERVAL, (char *)buf);
- if (!ret)
- ret = count;
-
- return ret;
-}
static DEVICE_ATTR(arp_interval, S_IRUGO | S_IWUSR,
- bonding_show_arp_interval, bonding_store_arp_interval);
+ bonding_show_arp_interval, bonding_sysfs_store_option);
-/*
- * Show and set the arp targets.
- */
+/* Show the arp targets. */
static ssize_t bonding_show_arp_targets(struct device *d,
struct device_attribute *attr,
char *buf)
@@ -424,27 +314,10 @@ static ssize_t bonding_show_arp_targets(struct device *d,
return res;
}
+static DEVICE_ATTR(arp_ip_target, S_IRUGO | S_IWUSR,
+ bonding_show_arp_targets, bonding_sysfs_store_option);
-static ssize_t bonding_store_arp_targets(struct device *d,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct bonding *bond = to_bond(d);
- int ret;
-
- ret = bond_opt_tryset_rtnl(bond, BOND_OPT_ARP_TARGETS, (char *)buf);
- if (!ret)
- ret = count;
-
- return ret;
-}
-static DEVICE_ATTR(arp_ip_target, S_IRUGO | S_IWUSR , bonding_show_arp_targets, bonding_store_arp_targets);
-
-/*
- * Show and set the up and down delays. These must be multiples of the
- * MII monitoring value, and are stored internally as the multiplier.
- * Thus, we must translate to MS for the real world.
- */
+/* Show the up and down delays. */
static ssize_t bonding_show_downdelay(struct device *d,
struct device_attribute *attr,
char *buf)
@@ -453,22 +326,8 @@ static ssize_t bonding_show_downdelay(struct device *d,
return sprintf(buf, "%d\n", bond->params.downdelay * bond->params.miimon);
}
-
-static ssize_t bonding_store_downdelay(struct device *d,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct bonding *bond = to_bond(d);
- int ret;
-
- ret = bond_opt_tryset_rtnl(bond, BOND_OPT_DOWNDELAY, (char *)buf);
- if (!ret)
- ret = count;
-
- return ret;
-}
static DEVICE_ATTR(downdelay, S_IRUGO | S_IWUSR,
- bonding_show_downdelay, bonding_store_downdelay);
+ bonding_show_downdelay, bonding_sysfs_store_option);
static ssize_t bonding_show_updelay(struct device *d,
struct device_attribute *attr,
@@ -479,27 +338,10 @@ static ssize_t bonding_show_updelay(struct device *d,
return sprintf(buf, "%d\n", bond->params.updelay * bond->params.miimon);
}
-
-static ssize_t bonding_store_updelay(struct device *d,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct bonding *bond = to_bond(d);
- int ret;
-
- ret = bond_opt_tryset_rtnl(bond, BOND_OPT_UPDELAY, (char *)buf);
- if (!ret)
- ret = count;
-
- return ret;
-}
static DEVICE_ATTR(updelay, S_IRUGO | S_IWUSR,
- bonding_show_updelay, bonding_store_updelay);
+ bonding_show_updelay, bonding_sysfs_store_option);
-/*
- * Show and set the LACP interval. Interface must be down, and the mode
- * must be set to 802.3ad mode.
- */
+/* Show the LACP interval. */
static ssize_t bonding_show_lacp(struct device *d,
struct device_attribute *attr,
char *buf)
@@ -511,22 +353,8 @@ static ssize_t bonding_show_lacp(struct device *d,
return sprintf(buf, "%s %d\n", val->string, bond->params.lacp_fast);
}
-
-static ssize_t bonding_store_lacp(struct device *d,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct bonding *bond = to_bond(d);
- int ret;
-
- ret = bond_opt_tryset_rtnl(bond, BOND_OPT_LACP_RATE, (char *)buf);
- if (!ret)
- ret = count;
-
- return ret;
-}
static DEVICE_ATTR(lacp_rate, S_IRUGO | S_IWUSR,
- bonding_show_lacp, bonding_store_lacp);
+ bonding_show_lacp, bonding_sysfs_store_option);
static ssize_t bonding_show_min_links(struct device *d,
struct device_attribute *attr,
@@ -536,22 +364,8 @@ static ssize_t bonding_show_min_links(struct device *d,
return sprintf(buf, "%u\n", bond->params.min_links);
}
-
-static ssize_t bonding_store_min_links(struct device *d,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct bonding *bond = to_bond(d);
- int ret;
-
- ret = bond_opt_tryset_rtnl(bond, BOND_OPT_MINLINKS, (char *)buf);
- if (!ret)
- ret = count;
-
- return ret;
-}
static DEVICE_ATTR(min_links, S_IRUGO | S_IWUSR,
- bonding_show_min_links, bonding_store_min_links);
+ bonding_show_min_links, bonding_sysfs_store_option);
static ssize_t bonding_show_ad_select(struct device *d,
struct device_attribute *attr,
@@ -564,27 +378,10 @@ static ssize_t bonding_show_ad_select(struct device *d,
return sprintf(buf, "%s %d\n", val->string, bond->params.ad_select);
}
-
-
-static ssize_t bonding_store_ad_select(struct device *d,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct bonding *bond = to_bond(d);
- int ret;
-
- ret = bond_opt_tryset_rtnl(bond, BOND_OPT_AD_SELECT, (char *)buf);
- if (!ret)
- ret = count;
-
- return ret;
-}
static DEVICE_ATTR(ad_select, S_IRUGO | S_IWUSR,
- bonding_show_ad_select, bonding_store_ad_select);
+ bonding_show_ad_select, bonding_sysfs_store_option);
-/*
- * Show and set the number of peer notifications to send after a failover event.
- */
+/* Show and set the number of peer notifications to send after a failover event. */
static ssize_t bonding_show_num_peer_notif(struct device *d,
struct device_attribute *attr,
char *buf)
@@ -611,12 +408,7 @@ static DEVICE_ATTR(num_grat_arp, S_IRUGO | S_IWUSR,
static DEVICE_ATTR(num_unsol_na, S_IRUGO | S_IWUSR,
bonding_show_num_peer_notif, bonding_store_num_peer_notif);
-/*
- * Show and set the MII monitor interval. There are two tricky bits
- * here. First, if MII monitoring is activated, then we must disable
- * ARP monitoring. Second, if the timer isn't running, we must
- * start it.
- */
+/* Show the MII monitor interval. */
static ssize_t bonding_show_miimon(struct device *d,
struct device_attribute *attr,
char *buf)
@@ -625,30 +417,10 @@ static ssize_t bonding_show_miimon(struct device *d,
return sprintf(buf, "%d\n", bond->params.miimon);
}
-
-static ssize_t bonding_store_miimon(struct device *d,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct bonding *bond = to_bond(d);
- int ret;
-
- ret = bond_opt_tryset_rtnl(bond, BOND_OPT_MIIMON, (char *)buf);
- if (!ret)
- ret = count;
-
- return ret;
-}
static DEVICE_ATTR(miimon, S_IRUGO | S_IWUSR,
- bonding_show_miimon, bonding_store_miimon);
+ bonding_show_miimon, bonding_sysfs_store_option);
-/*
- * Show and set the primary slave. The store function is much
- * simpler than bonding_store_slaves function because it only needs to
- * handle one interface name.
- * The bond must be a mode that supports a primary for this be
- * set.
- */
+/* Show the primary slave. */
static ssize_t bonding_show_primary(struct device *d,
struct device_attribute *attr,
char *buf)
@@ -661,26 +433,10 @@ static ssize_t bonding_show_primary(struct device *d,
return count;
}
-
-static ssize_t bonding_store_primary(struct device *d,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct bonding *bond = to_bond(d);
- int ret;
-
- ret = bond_opt_tryset_rtnl(bond, BOND_OPT_PRIMARY, (char *)buf);
- if (!ret)
- ret = count;
-
- return ret;
-}
static DEVICE_ATTR(primary, S_IRUGO | S_IWUSR,
- bonding_show_primary, bonding_store_primary);
+ bonding_show_primary, bonding_sysfs_store_option);
-/*
- * Show and set the primary_reselect flag.
- */
+/* Show the primary_reselect flag. */
static ssize_t bonding_show_primary_reselect(struct device *d,
struct device_attribute *attr,
char *buf)
@@ -694,28 +450,10 @@ static ssize_t bonding_show_primary_reselect(struct device *d,
return sprintf(buf, "%s %d\n",
val->string, bond->params.primary_reselect);
}
-
-static ssize_t bonding_store_primary_reselect(struct device *d,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct bonding *bond = to_bond(d);
- int ret;
-
- ret = bond_opt_tryset_rtnl(bond, BOND_OPT_PRIMARY_RESELECT,
- (char *)buf);
- if (!ret)
- ret = count;
-
- return ret;
-}
static DEVICE_ATTR(primary_reselect, S_IRUGO | S_IWUSR,
- bonding_show_primary_reselect,
- bonding_store_primary_reselect);
+ bonding_show_primary_reselect, bonding_sysfs_store_option);
-/*
- * Show and set the use_carrier flag.
- */
+/* Show the use_carrier flag. */
static ssize_t bonding_show_carrier(struct device *d,
struct device_attribute *attr,
char *buf)
@@ -724,27 +462,11 @@ static ssize_t bonding_show_carrier(struct device *d,
return sprintf(buf, "%d\n", bond->params.use_carrier);
}
-
-static ssize_t bonding_store_carrier(struct device *d,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct bonding *bond = to_bond(d);
- int ret;
-
- ret = bond_opt_tryset_rtnl(bond, BOND_OPT_USE_CARRIER, (char *)buf);
- if (!ret)
- ret = count;
-
- return ret;
-}
static DEVICE_ATTR(use_carrier, S_IRUGO | S_IWUSR,
- bonding_show_carrier, bonding_store_carrier);
+ bonding_show_carrier, bonding_sysfs_store_option);
-/*
- * Show and set currently active_slave.
- */
+/* Show currently active_slave. */
static ssize_t bonding_show_active_slave(struct device *d,
struct device_attribute *attr,
char *buf)
@@ -761,27 +483,10 @@ static ssize_t bonding_show_active_slave(struct device *d,
return count;
}
-
-static ssize_t bonding_store_active_slave(struct device *d,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct bonding *bond = to_bond(d);
- int ret;
-
- ret = bond_opt_tryset_rtnl(bond, BOND_OPT_ACTIVE_SLAVE, (char *)buf);
- if (!ret)
- ret = count;
-
- return ret;
-}
static DEVICE_ATTR(active_slave, S_IRUGO | S_IWUSR,
- bonding_show_active_slave, bonding_store_active_slave);
-
+ bonding_show_active_slave, bonding_sysfs_store_option);
-/*
- * Show link status of the bond interface.
- */
+/* Show link status of the bond interface. */
static ssize_t bonding_show_mii_status(struct device *d,
struct device_attribute *attr,
char *buf)
@@ -792,9 +497,7 @@ static ssize_t bonding_show_mii_status(struct device *d,
}
static DEVICE_ATTR(mii_status, S_IRUGO, bonding_show_mii_status, NULL);
-/*
- * Show current 802.3ad aggregator ID.
- */
+/* Show current 802.3ad aggregator ID. */
static ssize_t bonding_show_ad_aggregator(struct device *d,
struct device_attribute *attr,
char *buf)
@@ -802,7 +505,7 @@ static ssize_t bonding_show_ad_aggregator(struct device *d,
int count = 0;
struct bonding *bond = to_bond(d);
- if (bond->params.mode == BOND_MODE_8023AD) {
+ if (BOND_MODE(bond) == BOND_MODE_8023AD) {
struct ad_info ad_info;
count = sprintf(buf, "%d\n",
bond_3ad_get_active_agg_info(bond, &ad_info)
@@ -814,9 +517,7 @@ static ssize_t bonding_show_ad_aggregator(struct device *d,
static DEVICE_ATTR(ad_aggregator, S_IRUGO, bonding_show_ad_aggregator, NULL);
-/*
- * Show number of active 802.3ad ports.
- */
+/* Show number of active 802.3ad ports. */
static ssize_t bonding_show_ad_num_ports(struct device *d,
struct device_attribute *attr,
char *buf)
@@ -824,7 +525,7 @@ static ssize_t bonding_show_ad_num_ports(struct device *d,
int count = 0;
struct bonding *bond = to_bond(d);
- if (bond->params.mode == BOND_MODE_8023AD) {
+ if (BOND_MODE(bond) == BOND_MODE_8023AD) {
struct ad_info ad_info;
count = sprintf(buf, "%d\n",
bond_3ad_get_active_agg_info(bond, &ad_info)
@@ -836,9 +537,7 @@ static ssize_t bonding_show_ad_num_ports(struct device *d,
static DEVICE_ATTR(ad_num_ports, S_IRUGO, bonding_show_ad_num_ports, NULL);
-/*
- * Show current 802.3ad actor key.
- */
+/* Show current 802.3ad actor key. */
static ssize_t bonding_show_ad_actor_key(struct device *d,
struct device_attribute *attr,
char *buf)
@@ -846,7 +545,7 @@ static ssize_t bonding_show_ad_actor_key(struct device *d,
int count = 0;
struct bonding *bond = to_bond(d);
- if (bond->params.mode == BOND_MODE_8023AD) {
+ if (BOND_MODE(bond) == BOND_MODE_8023AD) {
struct ad_info ad_info;
count = sprintf(buf, "%d\n",
bond_3ad_get_active_agg_info(bond, &ad_info)
@@ -858,9 +557,7 @@ static ssize_t bonding_show_ad_actor_key(struct device *d,
static DEVICE_ATTR(ad_actor_key, S_IRUGO, bonding_show_ad_actor_key, NULL);
-/*
- * Show current 802.3ad partner key.
- */
+/* Show current 802.3ad partner key. */
static ssize_t bonding_show_ad_partner_key(struct device *d,
struct device_attribute *attr,
char *buf)
@@ -868,7 +565,7 @@ static ssize_t bonding_show_ad_partner_key(struct device *d,
int count = 0;
struct bonding *bond = to_bond(d);
- if (bond->params.mode == BOND_MODE_8023AD) {
+ if (BOND_MODE(bond) == BOND_MODE_8023AD) {
struct ad_info ad_info;
count = sprintf(buf, "%d\n",
bond_3ad_get_active_agg_info(bond, &ad_info)
@@ -880,9 +577,7 @@ static ssize_t bonding_show_ad_partner_key(struct device *d,
static DEVICE_ATTR(ad_partner_key, S_IRUGO, bonding_show_ad_partner_key, NULL);
-/*
- * Show current 802.3ad partner mac.
- */
+/* Show current 802.3ad partner mac. */
static ssize_t bonding_show_ad_partner_mac(struct device *d,
struct device_attribute *attr,
char *buf)
@@ -890,7 +585,7 @@ static ssize_t bonding_show_ad_partner_mac(struct device *d,
int count = 0;
struct bonding *bond = to_bond(d);
- if (bond->params.mode == BOND_MODE_8023AD) {
+ if (BOND_MODE(bond) == BOND_MODE_8023AD) {
struct ad_info ad_info;
if (!bond_3ad_get_active_agg_info(bond, &ad_info))
count = sprintf(buf, "%pM\n", ad_info.partner_system);
@@ -900,9 +595,7 @@ static ssize_t bonding_show_ad_partner_mac(struct device *d,
}
static DEVICE_ATTR(ad_partner_mac, S_IRUGO, bonding_show_ad_partner_mac, NULL);
-/*
- * Show the queue_ids of the slaves in the current bond.
- */
+/* Show the queue_ids of the slaves in the current bond. */
static ssize_t bonding_show_queue_id(struct device *d,
struct device_attribute *attr,
char *buf)
@@ -933,31 +626,11 @@ static ssize_t bonding_show_queue_id(struct device *d,
return res;
}
-
-/*
- * Set the queue_ids of the slaves in the current bond. The bond
- * interface must be enslaved for this to work.
- */
-static ssize_t bonding_store_queue_id(struct device *d,
- struct device_attribute *attr,
- const char *buffer, size_t count)
-{
- struct bonding *bond = to_bond(d);
- int ret;
-
- ret = bond_opt_tryset_rtnl(bond, BOND_OPT_QUEUE_ID, (char *)buffer);
- if (!ret)
- ret = count;
-
- return ret;
-}
static DEVICE_ATTR(queue_id, S_IRUGO | S_IWUSR, bonding_show_queue_id,
- bonding_store_queue_id);
+ bonding_sysfs_store_option);
-/*
- * Show and set the all_slaves_active flag.
- */
+/* Show the all_slaves_active flag. */
static ssize_t bonding_show_slaves_active(struct device *d,
struct device_attribute *attr,
char *buf)
@@ -966,27 +639,10 @@ static ssize_t bonding_show_slaves_active(struct device *d,
return sprintf(buf, "%d\n", bond->params.all_slaves_active);
}
-
-static ssize_t bonding_store_slaves_active(struct device *d,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct bonding *bond = to_bond(d);
- int ret;
-
- ret = bond_opt_tryset_rtnl(bond, BOND_OPT_ALL_SLAVES_ACTIVE,
- (char *)buf);
- if (!ret)
- ret = count;
-
- return ret;
-}
static DEVICE_ATTR(all_slaves_active, S_IRUGO | S_IWUSR,
- bonding_show_slaves_active, bonding_store_slaves_active);
+ bonding_show_slaves_active, bonding_sysfs_store_option);
-/*
- * Show and set the number of IGMP membership reports to send on link failure
- */
+/* Show the number of IGMP membership reports to send on link failure */
static ssize_t bonding_show_resend_igmp(struct device *d,
struct device_attribute *attr,
char *buf)
@@ -995,23 +651,8 @@ static ssize_t bonding_show_resend_igmp(struct device *d,
return sprintf(buf, "%d\n", bond->params.resend_igmp);
}
-
-static ssize_t bonding_store_resend_igmp(struct device *d,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct bonding *bond = to_bond(d);
- int ret;
-
- ret = bond_opt_tryset_rtnl(bond, BOND_OPT_RESEND_IGMP, (char *)buf);
- if (!ret)
- ret = count;
-
- return ret;
-}
-
static DEVICE_ATTR(resend_igmp, S_IRUGO | S_IWUSR,
- bonding_show_resend_igmp, bonding_store_resend_igmp);
+ bonding_show_resend_igmp, bonding_sysfs_store_option);
static ssize_t bonding_show_lp_interval(struct device *d,
@@ -1019,25 +660,21 @@ static ssize_t bonding_show_lp_interval(struct device *d,
char *buf)
{
struct bonding *bond = to_bond(d);
+
return sprintf(buf, "%d\n", bond->params.lp_interval);
}
+static DEVICE_ATTR(lp_interval, S_IRUGO | S_IWUSR,
+ bonding_show_lp_interval, bonding_sysfs_store_option);
-static ssize_t bonding_store_lp_interval(struct device *d,
- struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t bonding_show_tlb_dynamic_lb(struct device *d,
+ struct device_attribute *attr,
+ char *buf)
{
struct bonding *bond = to_bond(d);
- int ret;
-
- ret = bond_opt_tryset_rtnl(bond, BOND_OPT_LP_INTERVAL, (char *)buf);
- if (!ret)
- ret = count;
-
- return ret;
+ return sprintf(buf, "%d\n", bond->params.tlb_dynamic_lb);
}
-
-static DEVICE_ATTR(lp_interval, S_IRUGO | S_IWUSR,
- bonding_show_lp_interval, bonding_store_lp_interval);
+static DEVICE_ATTR(tlb_dynamic_lb, S_IRUGO | S_IWUSR,
+ bonding_show_tlb_dynamic_lb, bonding_sysfs_store_option);
static ssize_t bonding_show_packets_per_slave(struct device *d,
struct device_attribute *attr,
@@ -1045,27 +682,11 @@ static ssize_t bonding_show_packets_per_slave(struct device *d,
{
struct bonding *bond = to_bond(d);
unsigned int packets_per_slave = bond->params.packets_per_slave;
- return sprintf(buf, "%u\n", packets_per_slave);
-}
-
-static ssize_t bonding_store_packets_per_slave(struct device *d,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct bonding *bond = to_bond(d);
- int ret;
- ret = bond_opt_tryset_rtnl(bond, BOND_OPT_PACKETS_PER_SLAVE,
- (char *)buf);
- if (!ret)
- ret = count;
-
- return ret;
+ return sprintf(buf, "%u\n", packets_per_slave);
}
-
static DEVICE_ATTR(packets_per_slave, S_IRUGO | S_IWUSR,
- bonding_show_packets_per_slave,
- bonding_store_packets_per_slave);
+ bonding_show_packets_per_slave, bonding_sysfs_store_option);
static struct attribute *per_bond_attrs[] = {
&dev_attr_slaves.attr,
@@ -1099,6 +720,7 @@ static struct attribute *per_bond_attrs[] = {
&dev_attr_min_links.attr,
&dev_attr_lp_interval.attr,
&dev_attr_packets_per_slave.attr,
+ &dev_attr_tlb_dynamic_lb.attr,
NULL,
};
@@ -1107,8 +729,7 @@ static struct attribute_group bonding_group = {
.attrs = per_bond_attrs,
};
-/*
- * Initialize sysfs. This sets up the bonding_masters file in
+/* Initialize sysfs. This sets up the bonding_masters file in
* /sys/class/net.
*/
int bond_create_sysfs(struct bond_net *bn)
@@ -1120,8 +741,7 @@ int bond_create_sysfs(struct bond_net *bn)
ret = netdev_class_create_file_ns(&bn->class_attr_bonding_masters,
bn->net);
- /*
- * Permit multiple loads of the module by ignoring failures to
+ /* Permit multiple loads of the module by ignoring failures to
* create the bonding_masters sysfs file. Bonding devices
* created by second or subsequent loads of the module will
* not be listed in, or controllable by, bonding_masters, but
@@ -1144,16 +764,13 @@ int bond_create_sysfs(struct bond_net *bn)
}
-/*
- * Remove /sys/class/net/bonding_masters.
- */
+/* Remove /sys/class/net/bonding_masters. */
void bond_destroy_sysfs(struct bond_net *bn)
{
netdev_class_remove_file_ns(&bn->class_attr_bonding_masters, bn->net);
}
-/*
- * Initialize sysfs for each bond. This sets up and registers
+/* Initialize sysfs for each bond. This sets up and registers
* the 'bondctl' directory for each individual bond under /sys/class/net.
*/
void bond_prepare_sysfs_group(struct bonding *bond)
diff --git a/drivers/net/bonding/bond_sysfs_slave.c b/drivers/net/bonding/bond_sysfs_slave.c
index 2e4eec5450c8..198677f58ce0 100644
--- a/drivers/net/bonding/bond_sysfs_slave.c
+++ b/drivers/net/bonding/bond_sysfs_slave.c
@@ -69,8 +69,8 @@ static ssize_t ad_aggregator_id_show(struct slave *slave, char *buf)
{
const struct aggregator *agg;
- if (slave->bond->params.mode == BOND_MODE_8023AD) {
- agg = SLAVE_AD_INFO(slave).port.aggregator;
+ if (BOND_MODE(slave->bond) == BOND_MODE_8023AD) {
+ agg = SLAVE_AD_INFO(slave)->port.aggregator;
if (agg)
return sprintf(buf, "%d\n",
agg->aggregator_identifier);
diff --git a/drivers/net/bonding/bonding.h b/drivers/net/bonding/bonding.h
index 00bea320e3b5..0b4d9cde0b05 100644
--- a/drivers/net/bonding/bonding.h
+++ b/drivers/net/bonding/bonding.h
@@ -41,42 +41,6 @@
#define BOND_DEFAULT_MIIMON 100
-#define IS_UP(dev) \
- ((((dev)->flags & IFF_UP) == IFF_UP) && \
- netif_running(dev) && \
- netif_carrier_ok(dev))
-
-/*
- * Checks whether slave is ready for transmit.
- */
-#define SLAVE_IS_OK(slave) \
- (((slave)->dev->flags & IFF_UP) && \
- netif_running((slave)->dev) && \
- ((slave)->link == BOND_LINK_UP) && \
- bond_is_active_slave(slave))
-
-
-#define USES_PRIMARY(mode) \
- (((mode) == BOND_MODE_ACTIVEBACKUP) || \
- ((mode) == BOND_MODE_TLB) || \
- ((mode) == BOND_MODE_ALB))
-
-#define BOND_NO_USES_ARP(mode) \
- (((mode) == BOND_MODE_8023AD) || \
- ((mode) == BOND_MODE_TLB) || \
- ((mode) == BOND_MODE_ALB))
-
-#define TX_QUEUE_OVERRIDE(mode) \
- (((mode) == BOND_MODE_ACTIVEBACKUP) || \
- ((mode) == BOND_MODE_ROUNDROBIN))
-
-#define BOND_MODE_IS_LB(mode) \
- (((mode) == BOND_MODE_TLB) || \
- ((mode) == BOND_MODE_ALB))
-
-#define IS_IP_TARGET_UNUSABLE_ADDRESS(a) \
- ((htonl(INADDR_BROADCAST) == a) || \
- ipv4_is_zeronet(a))
/*
* Less bad way to call ioctl from within the kernel; this needs to be
* done some other way to get the call out of interrupt context.
@@ -90,6 +54,8 @@
set_fs(fs); \
res; })
+#define BOND_MODE(bond) ((bond)->params.mode)
+
/* slave list primitives */
#define bond_slave_list(bond) (&(bond)->dev->adj_list.lower)
@@ -175,6 +141,7 @@ struct bond_params {
int resend_igmp;
int lp_interval;
int packets_per_slave;
+ int tlb_dynamic_lb;
struct reciprocal_value reciprocal_packets_per_slave;
};
@@ -183,8 +150,6 @@ struct bond_parm_tbl {
int mode;
};
-#define BOND_MAX_MODENAME_LEN 20
-
struct slave {
struct net_device *dev; /* first - useful for panic debug */
struct bonding *bond; /* our master */
@@ -205,7 +170,7 @@ struct slave {
u32 speed;
u16 queue_id;
u8 perm_hwaddr[ETH_ALEN];
- struct ad_slave_info ad_info; /* HUGE - better to dynamically alloc */
+ struct ad_slave_info *ad_info;
struct tlb_slave_info tlb_info;
#ifdef CONFIG_NET_POLL_CONTROLLER
struct netpoll *np;
@@ -285,14 +250,41 @@ static inline struct slave *bond_get_slave_by_dev(struct bonding *bond,
static inline struct bonding *bond_get_bond_by_slave(struct slave *slave)
{
- if (!slave || !slave->bond)
- return NULL;
return slave->bond;
}
+static inline bool bond_should_override_tx_queue(struct bonding *bond)
+{
+ return BOND_MODE(bond) == BOND_MODE_ACTIVEBACKUP ||
+ BOND_MODE(bond) == BOND_MODE_ROUNDROBIN;
+}
+
static inline bool bond_is_lb(const struct bonding *bond)
{
- return BOND_MODE_IS_LB(bond->params.mode);
+ return BOND_MODE(bond) == BOND_MODE_TLB ||
+ BOND_MODE(bond) == BOND_MODE_ALB;
+}
+
+static inline bool bond_mode_uses_arp(int mode)
+{
+ return mode != BOND_MODE_8023AD && mode != BOND_MODE_TLB &&
+ mode != BOND_MODE_ALB;
+}
+
+static inline bool bond_mode_uses_primary(int mode)
+{
+ return mode == BOND_MODE_ACTIVEBACKUP || mode == BOND_MODE_TLB ||
+ mode == BOND_MODE_ALB;
+}
+
+static inline bool bond_uses_primary(struct bonding *bond)
+{
+ return bond_mode_uses_primary(BOND_MODE(bond));
+}
+
+static inline bool bond_slave_is_up(struct slave *slave)
+{
+ return netif_running(slave->dev) && netif_carrier_ok(slave->dev);
}
static inline void bond_set_active_slave(struct slave *slave)
@@ -365,6 +357,12 @@ static inline bool bond_is_active_slave(struct slave *slave)
return !bond_slave_state(slave);
}
+static inline bool bond_slave_can_tx(struct slave *slave)
+{
+ return bond_slave_is_up(slave) && slave->link == BOND_LINK_UP &&
+ bond_is_active_slave(slave);
+}
+
#define BOND_PRI_RESELECT_ALWAYS 0
#define BOND_PRI_RESELECT_BETTER 1
#define BOND_PRI_RESELECT_FAILURE 2
@@ -396,12 +394,16 @@ static inline int slave_do_arp_validate(struct bonding *bond,
return bond->params.arp_validate & (1 << bond_slave_state(slave));
}
-static inline int slave_do_arp_validate_only(struct bonding *bond,
- struct slave *slave)
+static inline int slave_do_arp_validate_only(struct bonding *bond)
{
return bond->params.arp_validate & BOND_ARP_FILTER;
}
+static inline int bond_is_ip_target_ok(__be32 addr)
+{
+ return !ipv4_is_lbcast(addr) && !ipv4_is_zeronet(addr);
+}
+
/* Get the oldest arp which we've received on this slave for bond's
* arp_targets.
*/
@@ -479,16 +481,14 @@ static inline __be32 bond_confirm_addr(struct net_device *dev, __be32 dst, __be3
return addr;
}
-static inline bool slave_can_tx(struct slave *slave)
-{
- if (IS_UP(slave->dev) && slave->link == BOND_LINK_UP &&
- bond_is_active_slave(slave))
- return true;
- else
- return false;
-}
-
-struct bond_net;
+struct bond_net {
+ struct net *net; /* Associated network namespace */
+ struct list_head dev_list;
+#ifdef CONFIG_PROC_FS
+ struct proc_dir_entry *proc_dir;
+#endif
+ struct class_attribute class_attr_bonding_masters;
+};
int bond_arp_rcv(const struct sk_buff *skb, struct bonding *bond, struct slave *slave);
void bond_dev_queue_xmit(struct bonding *bond, struct sk_buff *skb, struct net_device *slave_dev);
@@ -500,7 +500,7 @@ int bond_sysfs_slave_add(struct slave *slave);
void bond_sysfs_slave_del(struct slave *slave);
int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev);
int bond_release(struct net_device *bond_dev, struct net_device *slave_dev);
-int bond_xmit_hash(struct bonding *bond, struct sk_buff *skb, int count);
+u32 bond_xmit_hash(struct bonding *bond, struct sk_buff *skb);
void bond_select_active_slave(struct bonding *bond);
void bond_change_active_slave(struct bonding *bond, struct slave *new_active);
void bond_create_debugfs(void);
@@ -516,15 +516,9 @@ void bond_netlink_fini(void);
struct net_device *bond_option_active_slave_get_rcu(struct bonding *bond);
struct net_device *bond_option_active_slave_get(struct bonding *bond);
const char *bond_slave_link_status(s8 link);
-
-struct bond_net {
- struct net * net; /* Associated network namespace */
- struct list_head dev_list;
-#ifdef CONFIG_PROC_FS
- struct proc_dir_entry * proc_dir;
-#endif
- struct class_attribute class_attr_bonding_masters;
-};
+bool bond_verify_device_path(struct net_device *start_dev,
+ struct net_device *end_dev,
+ struct bond_vlan_tag *tags);
#ifdef CONFIG_PROC_FS
void bond_create_proc_entry(struct bonding *bond);
@@ -576,6 +570,27 @@ static inline struct slave *bond_slave_has_mac_rcu(struct bonding *bond,
return NULL;
}
+/* Caller must hold rcu_read_lock() for read */
+static inline bool bond_slave_has_mac_rx(struct bonding *bond, const u8 *mac)
+{
+ struct list_head *iter;
+ struct slave *tmp;
+ struct netdev_hw_addr *ha;
+
+ bond_for_each_slave_rcu(bond, tmp, iter)
+ if (ether_addr_equal_64bits(mac, tmp->dev->dev_addr))
+ return true;
+
+ if (netdev_uc_empty(bond->dev))
+ return false;
+
+ netdev_for_each_uc_addr(ha, bond->dev)
+ if (ether_addr_equal_64bits(mac, ha->addr))
+ return true;
+
+ return false;
+}
+
/* Check if the ip is present in arp ip list, or first free slot if ip == 0
* Returns -1 if not found, index if found
*/
diff --git a/drivers/net/can/Kconfig b/drivers/net/can/Kconfig
index 9e7d95dae2c7..41688229c570 100644
--- a/drivers/net/can/Kconfig
+++ b/drivers/net/can/Kconfig
@@ -65,7 +65,7 @@ config CAN_LEDS
config CAN_AT91
tristate "Atmel AT91 onchip CAN controller"
- depends on ARM
+ depends on ARCH_AT91 || COMPILE_TEST
---help---
This is a driver for the SoC CAN controller in Atmel's AT91SAM9263
and AT91SAM9X5 processors.
@@ -77,12 +77,6 @@ config CAN_TI_HECC
Driver for TI HECC (High End CAN Controller) module found on many
TI devices. The device specifications are available from www.ti.com
-config CAN_MCP251X
- tristate "Microchip MCP251x SPI CAN controllers"
- depends on SPI && HAS_DMA
- ---help---
- Driver for the Microchip MCP251x SPI CAN controllers.
-
config CAN_BFIN
depends on BF534 || BF536 || BF537 || BF538 || BF539 || BF54x
tristate "Analog Devices Blackfin on-chip CAN"
@@ -110,7 +104,7 @@ config CAN_FLEXCAN
config PCH_CAN
tristate "Intel EG20T PCH CAN controller"
- depends on PCI
+ depends on PCI && (X86_32 || COMPILE_TEST)
---help---
This driver is for PCH CAN of Topcliff (Intel EG20T PCH) which
is an IOH for x86 embedded processor (Intel Atom E6xx series).
@@ -125,6 +119,24 @@ config CAN_GRCAN
endian syntheses of the cores would need some modifications on
the hardware level to work.
+config CAN_RCAR
+ tristate "Renesas R-Car CAN controller"
+ depends on ARM
+ ---help---
+ Say Y here if you want to use CAN controller found on Renesas R-Car
+ SoCs.
+
+ To compile this driver as a module, choose M here: the module will
+ be called rcar_can.
+
+config CAN_XILINXCAN
+ tristate "Xilinx CAN"
+ depends on ARCH_ZYNQ || MICROBLAZE || COMPILE_TEST
+ depends on COMMON_CLK && HAS_IOMEM
+ ---help---
+ Xilinx CAN driver. This driver supports both soft AXI CAN IP and
+ Zynq CANPS IP.
+
source "drivers/net/can/mscan/Kconfig"
source "drivers/net/can/sja1000/Kconfig"
@@ -133,6 +145,8 @@ source "drivers/net/can/c_can/Kconfig"
source "drivers/net/can/cc770/Kconfig"
+source "drivers/net/can/spi/Kconfig"
+
source "drivers/net/can/usb/Kconfig"
source "drivers/net/can/softing/Kconfig"
diff --git a/drivers/net/can/Makefile b/drivers/net/can/Makefile
index c7440392adbb..1697f22353a9 100644
--- a/drivers/net/can/Makefile
+++ b/drivers/net/can/Makefile
@@ -10,6 +10,7 @@ can-dev-y := dev.o
can-dev-$(CONFIG_CAN_LEDS) += led.o
+obj-y += spi/
obj-y += usb/
obj-y += softing/
@@ -19,11 +20,12 @@ obj-$(CONFIG_CAN_C_CAN) += c_can/
obj-$(CONFIG_CAN_CC770) += cc770/
obj-$(CONFIG_CAN_AT91) += at91_can.o
obj-$(CONFIG_CAN_TI_HECC) += ti_hecc.o
-obj-$(CONFIG_CAN_MCP251X) += mcp251x.o
obj-$(CONFIG_CAN_BFIN) += bfin_can.o
obj-$(CONFIG_CAN_JANZ_ICAN3) += janz-ican3.o
obj-$(CONFIG_CAN_FLEXCAN) += flexcan.o
obj-$(CONFIG_PCH_CAN) += pch_can.o
obj-$(CONFIG_CAN_GRCAN) += grcan.o
+obj-$(CONFIG_CAN_RCAR) += rcar_can.o
+obj-$(CONFIG_CAN_XILINXCAN) += xilinx_can.o
ccflags-$(CONFIG_CAN_DEBUG_DEVICES) := -DDEBUG
diff --git a/drivers/net/can/c_can/c_can.c b/drivers/net/can/c_can/c_can.c
index 95e04e2002da..8e78bb48f5a4 100644
--- a/drivers/net/can/c_can/c_can.c
+++ b/drivers/net/can/c_can/c_can.c
@@ -252,8 +252,7 @@ static void c_can_obj_update(struct net_device *dev, int iface, u32 cmd, u32 obj
struct c_can_priv *priv = netdev_priv(dev);
int cnt, reg = C_CAN_IFACE(COMREQ_REG, iface);
- priv->write_reg(priv, reg + 1, cmd);
- priv->write_reg(priv, reg, obj);
+ priv->write_reg32(priv, reg, (cmd << 16) | obj);
for (cnt = MIN_TIMEOUT_VALUE; cnt; cnt--) {
if (!(priv->read_reg(priv, reg) & IF_COMR_BUSY))
@@ -328,8 +327,7 @@ static void c_can_setup_tx_object(struct net_device *dev, int iface,
change_bit(idx, &priv->tx_dir);
}
- priv->write_reg(priv, C_CAN_IFACE(ARB1_REG, iface), arb);
- priv->write_reg(priv, C_CAN_IFACE(ARB2_REG, iface), arb >> 16);
+ priv->write_reg32(priv, C_CAN_IFACE(ARB1_REG, iface), arb);
priv->write_reg(priv, C_CAN_IFACE(MSGCTRL_REG, iface), ctrl);
@@ -391,8 +389,7 @@ static int c_can_read_msg_object(struct net_device *dev, int iface, u32 ctrl)
frame->can_dlc = get_can_dlc(ctrl & 0x0F);
- arb = priv->read_reg(priv, C_CAN_IFACE(ARB1_REG, iface));
- arb |= priv->read_reg(priv, C_CAN_IFACE(ARB2_REG, iface)) << 16;
+ arb = priv->read_reg32(priv, C_CAN_IFACE(ARB1_REG, iface));
if (arb & IF_ARB_MSGXTD)
frame->can_id = (arb & CAN_EFF_MASK) | CAN_EFF_FLAG;
@@ -424,12 +421,10 @@ static void c_can_setup_receive_object(struct net_device *dev, int iface,
struct c_can_priv *priv = netdev_priv(dev);
mask |= BIT(29);
- priv->write_reg(priv, C_CAN_IFACE(MASK1_REG, iface), mask);
- priv->write_reg(priv, C_CAN_IFACE(MASK2_REG, iface), mask >> 16);
+ priv->write_reg32(priv, C_CAN_IFACE(MASK1_REG, iface), mask);
id |= IF_ARB_MSGVAL;
- priv->write_reg(priv, C_CAN_IFACE(ARB1_REG, iface), id);
- priv->write_reg(priv, C_CAN_IFACE(ARB2_REG, iface), id >> 16);
+ priv->write_reg32(priv, C_CAN_IFACE(ARB1_REG, iface), id);
priv->write_reg(priv, C_CAN_IFACE(MSGCTRL_REG, iface), mcont);
c_can_object_put(dev, iface, obj, IF_COMM_RCV_SETUP);
diff --git a/drivers/net/can/c_can/c_can.h b/drivers/net/can/c_can/c_can.h
index c56f1b1c11ca..99ad1aa576b0 100644
--- a/drivers/net/can/c_can/c_can.h
+++ b/drivers/net/can/c_can/c_can.h
@@ -78,6 +78,7 @@ enum reg {
C_CAN_INTPND2_REG,
C_CAN_MSGVAL1_REG,
C_CAN_MSGVAL2_REG,
+ C_CAN_FUNCTION_REG,
};
static const u16 reg_map_c_can[] = {
@@ -129,6 +130,7 @@ static const u16 reg_map_d_can[] = {
[C_CAN_BRPEXT_REG] = 0x0E,
[C_CAN_INT_REG] = 0x10,
[C_CAN_TEST_REG] = 0x14,
+ [C_CAN_FUNCTION_REG] = 0x18,
[C_CAN_TXRQST1_REG] = 0x88,
[C_CAN_TXRQST2_REG] = 0x8A,
[C_CAN_NEWDAT1_REG] = 0x9C,
@@ -176,8 +178,10 @@ struct c_can_priv {
atomic_t tx_active;
unsigned long tx_dir;
int last_status;
- u16 (*read_reg) (struct c_can_priv *priv, enum reg index);
- void (*write_reg) (struct c_can_priv *priv, enum reg index, u16 val);
+ u16 (*read_reg) (const struct c_can_priv *priv, enum reg index);
+ void (*write_reg) (const struct c_can_priv *priv, enum reg index, u16 val);
+ u32 (*read_reg32) (const struct c_can_priv *priv, enum reg index);
+ void (*write_reg32) (const struct c_can_priv *priv, enum reg index, u32 val);
void __iomem *base;
const u16 *regs;
void *priv; /* for board-specific data */
diff --git a/drivers/net/can/c_can/c_can_pci.c b/drivers/net/can/c_can/c_can_pci.c
index fe5f6303b584..5d11e0e4225b 100644
--- a/drivers/net/can/c_can/c_can_pci.c
+++ b/drivers/net/can/c_can/c_can_pci.c
@@ -19,9 +19,13 @@
#include "c_can.h"
+#define PCI_DEVICE_ID_PCH_CAN 0x8818
+#define PCH_PCI_SOFT_RESET 0x01fc
+
enum c_can_pci_reg_align {
C_CAN_REG_ALIGN_16,
C_CAN_REG_ALIGN_32,
+ C_CAN_REG_32,
};
struct c_can_pci_data {
@@ -31,6 +35,10 @@ struct c_can_pci_data {
enum c_can_pci_reg_align reg_align;
/* Set the frequency */
unsigned int freq;
+ /* PCI bar number */
+ int bar;
+ /* Callback for reset */
+ void (*init)(const struct c_can_priv *priv, bool enable);
};
/*
@@ -39,30 +47,70 @@ struct c_can_pci_data {
* registers can be aligned to a 16-bit boundary or 32-bit boundary etc.
* Handle the same by providing a common read/write interface.
*/
-static u16 c_can_pci_read_reg_aligned_to_16bit(struct c_can_priv *priv,
+static u16 c_can_pci_read_reg_aligned_to_16bit(const struct c_can_priv *priv,
enum reg index)
{
return readw(priv->base + priv->regs[index]);
}
-static void c_can_pci_write_reg_aligned_to_16bit(struct c_can_priv *priv,
+static void c_can_pci_write_reg_aligned_to_16bit(const struct c_can_priv *priv,
enum reg index, u16 val)
{
writew(val, priv->base + priv->regs[index]);
}
-static u16 c_can_pci_read_reg_aligned_to_32bit(struct c_can_priv *priv,
+static u16 c_can_pci_read_reg_aligned_to_32bit(const struct c_can_priv *priv,
enum reg index)
{
return readw(priv->base + 2 * priv->regs[index]);
}
-static void c_can_pci_write_reg_aligned_to_32bit(struct c_can_priv *priv,
+static void c_can_pci_write_reg_aligned_to_32bit(const struct c_can_priv *priv,
enum reg index, u16 val)
{
writew(val, priv->base + 2 * priv->regs[index]);
}
+static u16 c_can_pci_read_reg_32bit(const struct c_can_priv *priv,
+ enum reg index)
+{
+ return (u16)ioread32(priv->base + 2 * priv->regs[index]);
+}
+
+static void c_can_pci_write_reg_32bit(const struct c_can_priv *priv,
+ enum reg index, u16 val)
+{
+ iowrite32((u32)val, priv->base + 2 * priv->regs[index]);
+}
+
+static u32 c_can_pci_read_reg32(const struct c_can_priv *priv, enum reg index)
+{
+ u32 val;
+
+ val = priv->read_reg(priv, index);
+ val |= ((u32) priv->read_reg(priv, index + 1)) << 16;
+
+ return val;
+}
+
+static void c_can_pci_write_reg32(const struct c_can_priv *priv, enum reg index,
+ u32 val)
+{
+ priv->write_reg(priv, index + 1, val >> 16);
+ priv->write_reg(priv, index, val);
+}
+
+static void c_can_pci_reset_pch(const struct c_can_priv *priv, bool enable)
+{
+ if (enable) {
+ u32 __iomem *addr = priv->base + PCH_PCI_SOFT_RESET;
+
+ /* write to sw reset register */
+ iowrite32(1, addr);
+ iowrite32(0, addr);
+ }
+}
+
static int c_can_pci_probe(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
@@ -90,7 +138,8 @@ static int c_can_pci_probe(struct pci_dev *pdev,
pci_set_master(pdev);
}
- addr = pci_iomap(pdev, 0, pci_resource_len(pdev, 0));
+ addr = pci_iomap(pdev, c_can_pci_data->bar,
+ pci_resource_len(pdev, c_can_pci_data->bar));
if (!addr) {
dev_err(&pdev->dev,
"device has no PCI memory resources, "
@@ -147,10 +196,18 @@ static int c_can_pci_probe(struct pci_dev *pdev,
priv->read_reg = c_can_pci_read_reg_aligned_to_16bit;
priv->write_reg = c_can_pci_write_reg_aligned_to_16bit;
break;
+ case C_CAN_REG_32:
+ priv->read_reg = c_can_pci_read_reg_32bit;
+ priv->write_reg = c_can_pci_write_reg_32bit;
+ break;
default:
ret = -EINVAL;
goto out_free_c_can;
}
+ priv->read_reg32 = c_can_pci_read_reg32;
+ priv->write_reg32 = c_can_pci_write_reg32;
+
+ priv->raminit = c_can_pci_data->init;
ret = register_c_can_dev(dev);
if (ret) {
@@ -198,6 +255,15 @@ static struct c_can_pci_data c_can_sta2x11= {
.type = BOSCH_C_CAN,
.reg_align = C_CAN_REG_ALIGN_32,
.freq = 52000000, /* 52 Mhz */
+ .bar = 0,
+};
+
+static struct c_can_pci_data c_can_pch = {
+ .type = BOSCH_C_CAN,
+ .reg_align = C_CAN_REG_32,
+ .freq = 50000000, /* 50 MHz */
+ .init = c_can_pci_reset_pch,
+ .bar = 1,
};
#define C_CAN_ID(_vend, _dev, _driverdata) { \
@@ -207,6 +273,8 @@ static struct c_can_pci_data c_can_sta2x11= {
static DEFINE_PCI_DEVICE_TABLE(c_can_pci_tbl) = {
C_CAN_ID(PCI_VENDOR_ID_STMICRO, PCI_DEVICE_ID_STMICRO_CAN,
c_can_sta2x11),
+ C_CAN_ID(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_PCH_CAN,
+ c_can_pch),
{},
};
static struct pci_driver c_can_pci_driver = {
diff --git a/drivers/net/can/c_can/c_can_platform.c b/drivers/net/can/c_can/c_can_platform.c
index 1df0b322d1e4..824108cd9fd5 100644
--- a/drivers/net/can/c_can/c_can_platform.c
+++ b/drivers/net/can/c_can/c_can_platform.c
@@ -40,6 +40,7 @@
#define CAN_RAMINIT_START_MASK(i) (0x001 << (i))
#define CAN_RAMINIT_DONE_MASK(i) (0x100 << (i))
#define CAN_RAMINIT_ALL_MASK(i) (0x101 << (i))
+#define DCAN_RAM_INIT_BIT (1 << 3)
static DEFINE_SPINLOCK(raminit_lock);
/*
* 16-bit c_can registers can be arranged differently in the memory
@@ -47,31 +48,31 @@ static DEFINE_SPINLOCK(raminit_lock);
* registers can be aligned to a 16-bit boundary or 32-bit boundary etc.
* Handle the same by providing a common read/write interface.
*/
-static u16 c_can_plat_read_reg_aligned_to_16bit(struct c_can_priv *priv,
+static u16 c_can_plat_read_reg_aligned_to_16bit(const struct c_can_priv *priv,
enum reg index)
{
return readw(priv->base + priv->regs[index]);
}
-static void c_can_plat_write_reg_aligned_to_16bit(struct c_can_priv *priv,
+static void c_can_plat_write_reg_aligned_to_16bit(const struct c_can_priv *priv,
enum reg index, u16 val)
{
writew(val, priv->base + priv->regs[index]);
}
-static u16 c_can_plat_read_reg_aligned_to_32bit(struct c_can_priv *priv,
+static u16 c_can_plat_read_reg_aligned_to_32bit(const struct c_can_priv *priv,
enum reg index)
{
return readw(priv->base + 2 * priv->regs[index]);
}
-static void c_can_plat_write_reg_aligned_to_32bit(struct c_can_priv *priv,
+static void c_can_plat_write_reg_aligned_to_32bit(const struct c_can_priv *priv,
enum reg index, u16 val)
{
writew(val, priv->base + 2 * priv->regs[index]);
}
-static void c_can_hw_raminit_wait(const struct c_can_priv *priv, u32 mask,
+static void c_can_hw_raminit_wait_ti(const struct c_can_priv *priv, u32 mask,
u32 val)
{
/* We look only at the bits of our instance. */
@@ -80,7 +81,7 @@ static void c_can_hw_raminit_wait(const struct c_can_priv *priv, u32 mask,
udelay(1);
}
-static void c_can_hw_raminit(const struct c_can_priv *priv, bool enable)
+static void c_can_hw_raminit_ti(const struct c_can_priv *priv, bool enable)
{
u32 mask = CAN_RAMINIT_ALL_MASK(priv->instance);
u32 ctrl;
@@ -96,18 +97,68 @@ static void c_can_hw_raminit(const struct c_can_priv *priv, bool enable)
ctrl |= CAN_RAMINIT_DONE_MASK(priv->instance);
writel(ctrl, priv->raminit_ctrlreg);
ctrl &= ~CAN_RAMINIT_DONE_MASK(priv->instance);
- c_can_hw_raminit_wait(priv, ctrl, mask);
+ c_can_hw_raminit_wait_ti(priv, ctrl, mask);
if (enable) {
/* Set start bit and wait for the done bit. */
ctrl |= CAN_RAMINIT_START_MASK(priv->instance);
writel(ctrl, priv->raminit_ctrlreg);
ctrl |= CAN_RAMINIT_DONE_MASK(priv->instance);
- c_can_hw_raminit_wait(priv, ctrl, mask);
+ c_can_hw_raminit_wait_ti(priv, ctrl, mask);
}
spin_unlock(&raminit_lock);
}
+static u32 c_can_plat_read_reg32(const struct c_can_priv *priv, enum reg index)
+{
+ u32 val;
+
+ val = priv->read_reg(priv, index);
+ val |= ((u32) priv->read_reg(priv, index + 1)) << 16;
+
+ return val;
+}
+
+static void c_can_plat_write_reg32(const struct c_can_priv *priv, enum reg index,
+ u32 val)
+{
+ priv->write_reg(priv, index + 1, val >> 16);
+ priv->write_reg(priv, index, val);
+}
+
+static u32 d_can_plat_read_reg32(const struct c_can_priv *priv, enum reg index)
+{
+ return readl(priv->base + priv->regs[index]);
+}
+
+static void d_can_plat_write_reg32(const struct c_can_priv *priv, enum reg index,
+ u32 val)
+{
+ writel(val, priv->base + priv->regs[index]);
+}
+
+static void c_can_hw_raminit_wait(const struct c_can_priv *priv, u32 mask)
+{
+ while (priv->read_reg32(priv, C_CAN_FUNCTION_REG) & mask)
+ udelay(1);
+}
+
+static void c_can_hw_raminit(const struct c_can_priv *priv, bool enable)
+{
+ u32 ctrl;
+
+ ctrl = priv->read_reg32(priv, C_CAN_FUNCTION_REG);
+ ctrl &= ~DCAN_RAM_INIT_BIT;
+ priv->write_reg32(priv, C_CAN_FUNCTION_REG, ctrl);
+ c_can_hw_raminit_wait(priv, ctrl);
+
+ if (enable) {
+ ctrl |= DCAN_RAM_INIT_BIT;
+ priv->write_reg32(priv, C_CAN_FUNCTION_REG, ctrl);
+ c_can_hw_raminit_wait(priv, ctrl);
+ }
+}
+
static struct platform_device_id c_can_id_table[] = {
[BOSCH_C_CAN_PLATFORM] = {
.name = KBUILD_MODNAME,
@@ -201,11 +252,15 @@ static int c_can_plat_probe(struct platform_device *pdev)
case IORESOURCE_MEM_32BIT:
priv->read_reg = c_can_plat_read_reg_aligned_to_32bit;
priv->write_reg = c_can_plat_write_reg_aligned_to_32bit;
+ priv->read_reg32 = c_can_plat_read_reg32;
+ priv->write_reg32 = c_can_plat_write_reg32;
break;
case IORESOURCE_MEM_16BIT:
default:
priv->read_reg = c_can_plat_read_reg_aligned_to_16bit;
priv->write_reg = c_can_plat_write_reg_aligned_to_16bit;
+ priv->read_reg32 = c_can_plat_read_reg32;
+ priv->write_reg32 = c_can_plat_write_reg32;
break;
}
break;
@@ -214,6 +269,8 @@ static int c_can_plat_probe(struct platform_device *pdev)
priv->can.ctrlmode_supported |= CAN_CTRLMODE_3_SAMPLES;
priv->read_reg = c_can_plat_read_reg_aligned_to_16bit;
priv->write_reg = c_can_plat_write_reg_aligned_to_16bit;
+ priv->read_reg32 = d_can_plat_read_reg32;
+ priv->write_reg32 = d_can_plat_write_reg32;
if (pdev->dev.of_node)
priv->instance = of_alias_get_id(pdev->dev.of_node, "d_can");
@@ -221,11 +278,20 @@ static int c_can_plat_probe(struct platform_device *pdev)
priv->instance = pdev->id;
res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ /* Not all D_CAN modules have a separate register for the D_CAN
+ * RAM initialization. Use default RAM init bit in D_CAN module
+ * if not specified in DT.
+ */
+ if (!res) {
+ priv->raminit = c_can_hw_raminit;
+ break;
+ }
+
priv->raminit_ctrlreg = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(priv->raminit_ctrlreg) || priv->instance < 0)
dev_info(&pdev->dev, "control memory is not used for raminit\n");
else
- priv->raminit = c_can_hw_raminit;
+ priv->raminit = c_can_hw_raminit_ti;
break;
default:
ret = -EINVAL;
diff --git a/drivers/net/can/mscan/Kconfig b/drivers/net/can/mscan/Kconfig
index f19be5269e7b..81c711719490 100644
--- a/drivers/net/can/mscan/Kconfig
+++ b/drivers/net/can/mscan/Kconfig
@@ -1,5 +1,5 @@
config CAN_MSCAN
- depends on PPC || M68K
+ depends on PPC
tristate "Support for Freescale MSCAN based chips"
---help---
The Motorola Scalable Controller Area Network (MSCAN) definition
diff --git a/drivers/net/can/rcar_can.c b/drivers/net/can/rcar_can.c
new file mode 100644
index 000000000000..5268d216ecfa
--- /dev/null
+++ b/drivers/net/can/rcar_can.c
@@ -0,0 +1,876 @@
+/* Renesas R-Car CAN device driver
+ *
+ * Copyright (C) 2013 Cogent Embedded, Inc. <source@cogentembedded.com>
+ * Copyright (C) 2013 Renesas Solutions Corp.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/interrupt.h>
+#include <linux/errno.h>
+#include <linux/netdevice.h>
+#include <linux/platform_device.h>
+#include <linux/can/led.h>
+#include <linux/can/dev.h>
+#include <linux/clk.h>
+#include <linux/can/platform/rcar_can.h>
+
+#define RCAR_CAN_DRV_NAME "rcar_can"
+
+/* Mailbox configuration:
+ * mailbox 60 - 63 - Rx FIFO mailboxes
+ * mailbox 56 - 59 - Tx FIFO mailboxes
+ * non-FIFO mailboxes are not used
+ */
+#define RCAR_CAN_N_MBX 64 /* Number of mailboxes in non-FIFO mode */
+#define RCAR_CAN_RX_FIFO_MBX 60 /* Mailbox - window to Rx FIFO */
+#define RCAR_CAN_TX_FIFO_MBX 56 /* Mailbox - window to Tx FIFO */
+#define RCAR_CAN_FIFO_DEPTH 4
+
+/* Mailbox registers structure */
+struct rcar_can_mbox_regs {
+ u32 id; /* IDE and RTR bits, SID and EID */
+ u8 stub; /* Not used */
+ u8 dlc; /* Data Length Code - bits [0..3] */
+ u8 data[8]; /* Data Bytes */
+ u8 tsh; /* Time Stamp Higher Byte */
+ u8 tsl; /* Time Stamp Lower Byte */
+};
+
+struct rcar_can_regs {
+ struct rcar_can_mbox_regs mb[RCAR_CAN_N_MBX]; /* Mailbox registers */
+ u32 mkr_2_9[8]; /* Mask Registers 2-9 */
+ u32 fidcr[2]; /* FIFO Received ID Compare Register */
+ u32 mkivlr1; /* Mask Invalid Register 1 */
+ u32 mier1; /* Mailbox Interrupt Enable Register 1 */
+ u32 mkr_0_1[2]; /* Mask Registers 0-1 */
+ u32 mkivlr0; /* Mask Invalid Register 0*/
+ u32 mier0; /* Mailbox Interrupt Enable Register 0 */
+ u8 pad_440[0x3c0];
+ u8 mctl[64]; /* Message Control Registers */
+ u16 ctlr; /* Control Register */
+ u16 str; /* Status register */
+ u8 bcr[3]; /* Bit Configuration Register */
+ u8 clkr; /* Clock Select Register */
+ u8 rfcr; /* Receive FIFO Control Register */
+ u8 rfpcr; /* Receive FIFO Pointer Control Register */
+ u8 tfcr; /* Transmit FIFO Control Register */
+ u8 tfpcr; /* Transmit FIFO Pointer Control Register */
+ u8 eier; /* Error Interrupt Enable Register */
+ u8 eifr; /* Error Interrupt Factor Judge Register */
+ u8 recr; /* Receive Error Count Register */
+ u8 tecr; /* Transmit Error Count Register */
+ u8 ecsr; /* Error Code Store Register */
+ u8 cssr; /* Channel Search Support Register */
+ u8 mssr; /* Mailbox Search Status Register */
+ u8 msmr; /* Mailbox Search Mode Register */
+ u16 tsr; /* Time Stamp Register */
+ u8 afsr; /* Acceptance Filter Support Register */
+ u8 pad_857;
+ u8 tcr; /* Test Control Register */
+ u8 pad_859[7];
+ u8 ier; /* Interrupt Enable Register */
+ u8 isr; /* Interrupt Status Register */
+ u8 pad_862;
+ u8 mbsmr; /* Mailbox Search Mask Register */
+};
+
+struct rcar_can_priv {
+ struct can_priv can; /* Must be the first member! */
+ struct net_device *ndev;
+ struct napi_struct napi;
+ struct rcar_can_regs __iomem *regs;
+ struct clk *clk;
+ u8 tx_dlc[RCAR_CAN_FIFO_DEPTH];
+ u32 tx_head;
+ u32 tx_tail;
+ u8 clock_select;
+ u8 ier;
+};
+
+static const struct can_bittiming_const rcar_can_bittiming_const = {
+ .name = RCAR_CAN_DRV_NAME,
+ .tseg1_min = 4,
+ .tseg1_max = 16,
+ .tseg2_min = 2,
+ .tseg2_max = 8,
+ .sjw_max = 4,
+ .brp_min = 1,
+ .brp_max = 1024,
+ .brp_inc = 1,
+};
+
+/* Control Register bits */
+#define RCAR_CAN_CTLR_BOM (3 << 11) /* Bus-Off Recovery Mode Bits */
+#define RCAR_CAN_CTLR_BOM_ENT (1 << 11) /* Entry to halt mode */
+ /* at bus-off entry */
+#define RCAR_CAN_CTLR_SLPM (1 << 10)
+#define RCAR_CAN_CTLR_CANM (3 << 8) /* Operating Mode Select Bit */
+#define RCAR_CAN_CTLR_CANM_HALT (1 << 9)
+#define RCAR_CAN_CTLR_CANM_RESET (1 << 8)
+#define RCAR_CAN_CTLR_CANM_FORCE_RESET (3 << 8)
+#define RCAR_CAN_CTLR_MLM (1 << 3) /* Message Lost Mode Select */
+#define RCAR_CAN_CTLR_IDFM (3 << 1) /* ID Format Mode Select Bits */
+#define RCAR_CAN_CTLR_IDFM_MIXED (1 << 2) /* Mixed ID mode */
+#define RCAR_CAN_CTLR_MBM (1 << 0) /* Mailbox Mode select */
+
+/* Status Register bits */
+#define RCAR_CAN_STR_RSTST (1 << 8) /* Reset Status Bit */
+
+/* FIFO Received ID Compare Registers 0 and 1 bits */
+#define RCAR_CAN_FIDCR_IDE (1 << 31) /* ID Extension Bit */
+#define RCAR_CAN_FIDCR_RTR (1 << 30) /* Remote Transmission Request Bit */
+
+/* Receive FIFO Control Register bits */
+#define RCAR_CAN_RFCR_RFEST (1 << 7) /* Receive FIFO Empty Status Flag */
+#define RCAR_CAN_RFCR_RFE (1 << 0) /* Receive FIFO Enable */
+
+/* Transmit FIFO Control Register bits */
+#define RCAR_CAN_TFCR_TFUST (7 << 1) /* Transmit FIFO Unsent Message */
+ /* Number Status Bits */
+#define RCAR_CAN_TFCR_TFUST_SHIFT 1 /* Offset of Transmit FIFO Unsent */
+ /* Message Number Status Bits */
+#define RCAR_CAN_TFCR_TFE (1 << 0) /* Transmit FIFO Enable */
+
+#define RCAR_CAN_N_RX_MKREGS1 2 /* Number of mask registers */
+ /* for Rx mailboxes 0-31 */
+#define RCAR_CAN_N_RX_MKREGS2 8
+
+/* Bit Configuration Register settings */
+#define RCAR_CAN_BCR_TSEG1(x) (((x) & 0x0f) << 20)
+#define RCAR_CAN_BCR_BPR(x) (((x) & 0x3ff) << 8)
+#define RCAR_CAN_BCR_SJW(x) (((x) & 0x3) << 4)
+#define RCAR_CAN_BCR_TSEG2(x) ((x) & 0x07)
+
+/* Mailbox and Mask Registers bits */
+#define RCAR_CAN_IDE (1 << 31)
+#define RCAR_CAN_RTR (1 << 30)
+#define RCAR_CAN_SID_SHIFT 18
+
+/* Mailbox Interrupt Enable Register 1 bits */
+#define RCAR_CAN_MIER1_RXFIE (1 << 28) /* Receive FIFO Interrupt Enable */
+#define RCAR_CAN_MIER1_TXFIE (1 << 24) /* Transmit FIFO Interrupt Enable */
+
+/* Interrupt Enable Register bits */
+#define RCAR_CAN_IER_ERSIE (1 << 5) /* Error (ERS) Interrupt Enable Bit */
+#define RCAR_CAN_IER_RXFIE (1 << 4) /* Reception FIFO Interrupt */
+ /* Enable Bit */
+#define RCAR_CAN_IER_TXFIE (1 << 3) /* Transmission FIFO Interrupt */
+ /* Enable Bit */
+/* Interrupt Status Register bits */
+#define RCAR_CAN_ISR_ERSF (1 << 5) /* Error (ERS) Interrupt Status Bit */
+#define RCAR_CAN_ISR_RXFF (1 << 4) /* Reception FIFO Interrupt */
+ /* Status Bit */
+#define RCAR_CAN_ISR_TXFF (1 << 3) /* Transmission FIFO Interrupt */
+ /* Status Bit */
+
+/* Error Interrupt Enable Register bits */
+#define RCAR_CAN_EIER_BLIE (1 << 7) /* Bus Lock Interrupt Enable */
+#define RCAR_CAN_EIER_OLIE (1 << 6) /* Overload Frame Transmit */
+ /* Interrupt Enable */
+#define RCAR_CAN_EIER_ORIE (1 << 5) /* Receive Overrun Interrupt Enable */
+#define RCAR_CAN_EIER_BORIE (1 << 4) /* Bus-Off Recovery Interrupt Enable */
+#define RCAR_CAN_EIER_BOEIE (1 << 3) /* Bus-Off Entry Interrupt Enable */
+#define RCAR_CAN_EIER_EPIE (1 << 2) /* Error Passive Interrupt Enable */
+#define RCAR_CAN_EIER_EWIE (1 << 1) /* Error Warning Interrupt Enable */
+#define RCAR_CAN_EIER_BEIE (1 << 0) /* Bus Error Interrupt Enable */
+
+/* Error Interrupt Factor Judge Register bits */
+#define RCAR_CAN_EIFR_BLIF (1 << 7) /* Bus Lock Detect Flag */
+#define RCAR_CAN_EIFR_OLIF (1 << 6) /* Overload Frame Transmission */
+ /* Detect Flag */
+#define RCAR_CAN_EIFR_ORIF (1 << 5) /* Receive Overrun Detect Flag */
+#define RCAR_CAN_EIFR_BORIF (1 << 4) /* Bus-Off Recovery Detect Flag */
+#define RCAR_CAN_EIFR_BOEIF (1 << 3) /* Bus-Off Entry Detect Flag */
+#define RCAR_CAN_EIFR_EPIF (1 << 2) /* Error Passive Detect Flag */
+#define RCAR_CAN_EIFR_EWIF (1 << 1) /* Error Warning Detect Flag */
+#define RCAR_CAN_EIFR_BEIF (1 << 0) /* Bus Error Detect Flag */
+
+/* Error Code Store Register bits */
+#define RCAR_CAN_ECSR_EDPM (1 << 7) /* Error Display Mode Select Bit */
+#define RCAR_CAN_ECSR_ADEF (1 << 6) /* ACK Delimiter Error Flag */
+#define RCAR_CAN_ECSR_BE0F (1 << 5) /* Bit Error (dominant) Flag */
+#define RCAR_CAN_ECSR_BE1F (1 << 4) /* Bit Error (recessive) Flag */
+#define RCAR_CAN_ECSR_CEF (1 << 3) /* CRC Error Flag */
+#define RCAR_CAN_ECSR_AEF (1 << 2) /* ACK Error Flag */
+#define RCAR_CAN_ECSR_FEF (1 << 1) /* Form Error Flag */
+#define RCAR_CAN_ECSR_SEF (1 << 0) /* Stuff Error Flag */
+
+#define RCAR_CAN_NAPI_WEIGHT 4
+#define MAX_STR_READS 0x100
+
+static void tx_failure_cleanup(struct net_device *ndev)
+{
+ int i;
+
+ for (i = 0; i < RCAR_CAN_FIFO_DEPTH; i++)
+ can_free_echo_skb(ndev, i);
+}
+
+static void rcar_can_error(struct net_device *ndev)
+{
+ struct rcar_can_priv *priv = netdev_priv(ndev);
+ struct net_device_stats *stats = &ndev->stats;
+ struct can_frame *cf;
+ struct sk_buff *skb;
+ u8 eifr, txerr = 0, rxerr = 0;
+
+ /* Propagate the error condition to the CAN stack */
+ skb = alloc_can_err_skb(ndev, &cf);
+
+ eifr = readb(&priv->regs->eifr);
+ if (eifr & (RCAR_CAN_EIFR_EWIF | RCAR_CAN_EIFR_EPIF)) {
+ txerr = readb(&priv->regs->tecr);
+ rxerr = readb(&priv->regs->recr);
+ if (skb) {
+ cf->can_id |= CAN_ERR_CRTL;
+ cf->data[6] = txerr;
+ cf->data[7] = rxerr;
+ }
+ }
+ if (eifr & RCAR_CAN_EIFR_BEIF) {
+ int rx_errors = 0, tx_errors = 0;
+ u8 ecsr;
+
+ netdev_dbg(priv->ndev, "Bus error interrupt:\n");
+ if (skb) {
+ cf->can_id |= CAN_ERR_BUSERROR | CAN_ERR_PROT;
+ cf->data[2] = CAN_ERR_PROT_UNSPEC;
+ }
+ ecsr = readb(&priv->regs->ecsr);
+ if (ecsr & RCAR_CAN_ECSR_ADEF) {
+ netdev_dbg(priv->ndev, "ACK Delimiter Error\n");
+ tx_errors++;
+ writeb(~RCAR_CAN_ECSR_ADEF, &priv->regs->ecsr);
+ if (skb)
+ cf->data[3] |= CAN_ERR_PROT_LOC_ACK_DEL;
+ }
+ if (ecsr & RCAR_CAN_ECSR_BE0F) {
+ netdev_dbg(priv->ndev, "Bit Error (dominant)\n");
+ tx_errors++;
+ writeb(~RCAR_CAN_ECSR_BE0F, &priv->regs->ecsr);
+ if (skb)
+ cf->data[2] |= CAN_ERR_PROT_BIT0;
+ }
+ if (ecsr & RCAR_CAN_ECSR_BE1F) {
+ netdev_dbg(priv->ndev, "Bit Error (recessive)\n");
+ tx_errors++;
+ writeb(~RCAR_CAN_ECSR_BE1F, &priv->regs->ecsr);
+ if (skb)
+ cf->data[2] |= CAN_ERR_PROT_BIT1;
+ }
+ if (ecsr & RCAR_CAN_ECSR_CEF) {
+ netdev_dbg(priv->ndev, "CRC Error\n");
+ rx_errors++;
+ writeb(~RCAR_CAN_ECSR_CEF, &priv->regs->ecsr);
+ if (skb)
+ cf->data[3] |= CAN_ERR_PROT_LOC_CRC_SEQ;
+ }
+ if (ecsr & RCAR_CAN_ECSR_AEF) {
+ netdev_dbg(priv->ndev, "ACK Error\n");
+ tx_errors++;
+ writeb(~RCAR_CAN_ECSR_AEF, &priv->regs->ecsr);
+ if (skb) {
+ cf->can_id |= CAN_ERR_ACK;
+ cf->data[3] |= CAN_ERR_PROT_LOC_ACK;
+ }
+ }
+ if (ecsr & RCAR_CAN_ECSR_FEF) {
+ netdev_dbg(priv->ndev, "Form Error\n");
+ rx_errors++;
+ writeb(~RCAR_CAN_ECSR_FEF, &priv->regs->ecsr);
+ if (skb)
+ cf->data[2] |= CAN_ERR_PROT_FORM;
+ }
+ if (ecsr & RCAR_CAN_ECSR_SEF) {
+ netdev_dbg(priv->ndev, "Stuff Error\n");
+ rx_errors++;
+ writeb(~RCAR_CAN_ECSR_SEF, &priv->regs->ecsr);
+ if (skb)
+ cf->data[2] |= CAN_ERR_PROT_STUFF;
+ }
+
+ priv->can.can_stats.bus_error++;
+ ndev->stats.rx_errors += rx_errors;
+ ndev->stats.tx_errors += tx_errors;
+ writeb(~RCAR_CAN_EIFR_BEIF, &priv->regs->eifr);
+ }
+ if (eifr & RCAR_CAN_EIFR_EWIF) {
+ netdev_dbg(priv->ndev, "Error warning interrupt\n");
+ priv->can.state = CAN_STATE_ERROR_WARNING;
+ priv->can.can_stats.error_warning++;
+ /* Clear interrupt condition */
+ writeb(~RCAR_CAN_EIFR_EWIF, &priv->regs->eifr);
+ if (skb)
+ cf->data[1] = txerr > rxerr ? CAN_ERR_CRTL_TX_WARNING :
+ CAN_ERR_CRTL_RX_WARNING;
+ }
+ if (eifr & RCAR_CAN_EIFR_EPIF) {
+ netdev_dbg(priv->ndev, "Error passive interrupt\n");
+ priv->can.state = CAN_STATE_ERROR_PASSIVE;
+ priv->can.can_stats.error_passive++;
+ /* Clear interrupt condition */
+ writeb(~RCAR_CAN_EIFR_EPIF, &priv->regs->eifr);
+ if (skb)
+ cf->data[1] = txerr > rxerr ? CAN_ERR_CRTL_TX_PASSIVE :
+ CAN_ERR_CRTL_RX_PASSIVE;
+ }
+ if (eifr & RCAR_CAN_EIFR_BOEIF) {
+ netdev_dbg(priv->ndev, "Bus-off entry interrupt\n");
+ tx_failure_cleanup(ndev);
+ priv->ier = RCAR_CAN_IER_ERSIE;
+ writeb(priv->ier, &priv->regs->ier);
+ priv->can.state = CAN_STATE_BUS_OFF;
+ /* Clear interrupt condition */
+ writeb(~RCAR_CAN_EIFR_BOEIF, &priv->regs->eifr);
+ can_bus_off(ndev);
+ if (skb)
+ cf->can_id |= CAN_ERR_BUSOFF;
+ }
+ if (eifr & RCAR_CAN_EIFR_ORIF) {
+ netdev_dbg(priv->ndev, "Receive overrun error interrupt\n");
+ ndev->stats.rx_over_errors++;
+ ndev->stats.rx_errors++;
+ writeb(~RCAR_CAN_EIFR_ORIF, &priv->regs->eifr);
+ if (skb) {
+ cf->can_id |= CAN_ERR_CRTL;
+ cf->data[1] = CAN_ERR_CRTL_RX_OVERFLOW;
+ }
+ }
+ if (eifr & RCAR_CAN_EIFR_OLIF) {
+ netdev_dbg(priv->ndev,
+ "Overload Frame Transmission error interrupt\n");
+ ndev->stats.rx_over_errors++;
+ ndev->stats.rx_errors++;
+ writeb(~RCAR_CAN_EIFR_OLIF, &priv->regs->eifr);
+ if (skb) {
+ cf->can_id |= CAN_ERR_PROT;
+ cf->data[2] |= CAN_ERR_PROT_OVERLOAD;
+ }
+ }
+
+ if (skb) {
+ stats->rx_packets++;
+ stats->rx_bytes += cf->can_dlc;
+ netif_rx(skb);
+ }
+}
+
+static void rcar_can_tx_done(struct net_device *ndev)
+{
+ struct rcar_can_priv *priv = netdev_priv(ndev);
+ struct net_device_stats *stats = &ndev->stats;
+ u8 isr;
+
+ while (1) {
+ u8 unsent = readb(&priv->regs->tfcr);
+
+ unsent = (unsent & RCAR_CAN_TFCR_TFUST) >>
+ RCAR_CAN_TFCR_TFUST_SHIFT;
+ if (priv->tx_head - priv->tx_tail <= unsent)
+ break;
+ stats->tx_packets++;
+ stats->tx_bytes += priv->tx_dlc[priv->tx_tail %
+ RCAR_CAN_FIFO_DEPTH];
+ priv->tx_dlc[priv->tx_tail % RCAR_CAN_FIFO_DEPTH] = 0;
+ can_get_echo_skb(ndev, priv->tx_tail % RCAR_CAN_FIFO_DEPTH);
+ priv->tx_tail++;
+ netif_wake_queue(ndev);
+ }
+ /* Clear interrupt */
+ isr = readb(&priv->regs->isr);
+ writeb(isr & ~RCAR_CAN_ISR_TXFF, &priv->regs->isr);
+ can_led_event(ndev, CAN_LED_EVENT_TX);
+}
+
+static irqreturn_t rcar_can_interrupt(int irq, void *dev_id)
+{
+ struct net_device *ndev = dev_id;
+ struct rcar_can_priv *priv = netdev_priv(ndev);
+ u8 isr;
+
+ isr = readb(&priv->regs->isr);
+ if (!(isr & priv->ier))
+ return IRQ_NONE;
+
+ if (isr & RCAR_CAN_ISR_ERSF)
+ rcar_can_error(ndev);
+
+ if (isr & RCAR_CAN_ISR_TXFF)
+ rcar_can_tx_done(ndev);
+
+ if (isr & RCAR_CAN_ISR_RXFF) {
+ if (napi_schedule_prep(&priv->napi)) {
+ /* Disable Rx FIFO interrupts */
+ priv->ier &= ~RCAR_CAN_IER_RXFIE;
+ writeb(priv->ier, &priv->regs->ier);
+ __napi_schedule(&priv->napi);
+ }
+ }
+
+ return IRQ_HANDLED;
+}
+
+static void rcar_can_set_bittiming(struct net_device *dev)
+{
+ struct rcar_can_priv *priv = netdev_priv(dev);
+ struct can_bittiming *bt = &priv->can.bittiming;
+ u32 bcr;
+
+ bcr = RCAR_CAN_BCR_TSEG1(bt->phase_seg1 + bt->prop_seg - 1) |
+ RCAR_CAN_BCR_BPR(bt->brp - 1) | RCAR_CAN_BCR_SJW(bt->sjw - 1) |
+ RCAR_CAN_BCR_TSEG2(bt->phase_seg2 - 1);
+ /* Don't overwrite CLKR with 32-bit BCR access; CLKR has 8-bit access.
+ * All the registers are big-endian but they get byte-swapped on 32-bit
+ * read/write (but not on 8-bit, contrary to the manuals)...
+ */
+ writel((bcr << 8) | priv->clock_select, &priv->regs->bcr);
+}
+
+static void rcar_can_start(struct net_device *ndev)
+{
+ struct rcar_can_priv *priv = netdev_priv(ndev);
+ u16 ctlr;
+ int i;
+
+ /* Set controller to known mode:
+ * - FIFO mailbox mode
+ * - accept all messages
+ * - overrun mode
+ * CAN is in sleep mode after MCU hardware or software reset.
+ */
+ ctlr = readw(&priv->regs->ctlr);
+ ctlr &= ~RCAR_CAN_CTLR_SLPM;
+ writew(ctlr, &priv->regs->ctlr);
+ /* Go to reset mode */
+ ctlr |= RCAR_CAN_CTLR_CANM_FORCE_RESET;
+ writew(ctlr, &priv->regs->ctlr);
+ for (i = 0; i < MAX_STR_READS; i++) {
+ if (readw(&priv->regs->str) & RCAR_CAN_STR_RSTST)
+ break;
+ }
+ rcar_can_set_bittiming(ndev);
+ ctlr |= RCAR_CAN_CTLR_IDFM_MIXED; /* Select mixed ID mode */
+ ctlr |= RCAR_CAN_CTLR_BOM_ENT; /* Entry to halt mode automatically */
+ /* at bus-off */
+ ctlr |= RCAR_CAN_CTLR_MBM; /* Select FIFO mailbox mode */
+ ctlr |= RCAR_CAN_CTLR_MLM; /* Overrun mode */
+ writew(ctlr, &priv->regs->ctlr);
+
+ /* Accept all SID and EID */
+ writel(0, &priv->regs->mkr_2_9[6]);
+ writel(0, &priv->regs->mkr_2_9[7]);
+ /* In FIFO mailbox mode, write "0" to bits 24 to 31 */
+ writel(0, &priv->regs->mkivlr1);
+ /* Accept all frames */
+ writel(0, &priv->regs->fidcr[0]);
+ writel(RCAR_CAN_FIDCR_IDE | RCAR_CAN_FIDCR_RTR, &priv->regs->fidcr[1]);
+ /* Enable and configure FIFO mailbox interrupts */
+ writel(RCAR_CAN_MIER1_RXFIE | RCAR_CAN_MIER1_TXFIE, &priv->regs->mier1);
+
+ priv->ier = RCAR_CAN_IER_ERSIE | RCAR_CAN_IER_RXFIE |
+ RCAR_CAN_IER_TXFIE;
+ writeb(priv->ier, &priv->regs->ier);
+
+ /* Accumulate error codes */
+ writeb(RCAR_CAN_ECSR_EDPM, &priv->regs->ecsr);
+ /* Enable error interrupts */
+ writeb(RCAR_CAN_EIER_EWIE | RCAR_CAN_EIER_EPIE | RCAR_CAN_EIER_BOEIE |
+ (priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING ?
+ RCAR_CAN_EIER_BEIE : 0) | RCAR_CAN_EIER_ORIE |
+ RCAR_CAN_EIER_OLIE, &priv->regs->eier);
+ priv->can.state = CAN_STATE_ERROR_ACTIVE;
+
+ /* Go to operation mode */
+ writew(ctlr & ~RCAR_CAN_CTLR_CANM, &priv->regs->ctlr);
+ for (i = 0; i < MAX_STR_READS; i++) {
+ if (!(readw(&priv->regs->str) & RCAR_CAN_STR_RSTST))
+ break;
+ }
+ /* Enable Rx and Tx FIFO */
+ writeb(RCAR_CAN_RFCR_RFE, &priv->regs->rfcr);
+ writeb(RCAR_CAN_TFCR_TFE, &priv->regs->tfcr);
+}
+
+static int rcar_can_open(struct net_device *ndev)
+{
+ struct rcar_can_priv *priv = netdev_priv(ndev);
+ int err;
+
+ err = clk_prepare_enable(priv->clk);
+ if (err) {
+ netdev_err(ndev, "clk_prepare_enable() failed, error %d\n",
+ err);
+ goto out;
+ }
+ err = open_candev(ndev);
+ if (err) {
+ netdev_err(ndev, "open_candev() failed, error %d\n", err);
+ goto out_clock;
+ }
+ napi_enable(&priv->napi);
+ err = request_irq(ndev->irq, rcar_can_interrupt, 0, ndev->name, ndev);
+ if (err) {
+ netdev_err(ndev, "error requesting interrupt %x\n", ndev->irq);
+ goto out_close;
+ }
+ can_led_event(ndev, CAN_LED_EVENT_OPEN);
+ rcar_can_start(ndev);
+ netif_start_queue(ndev);
+ return 0;
+out_close:
+ napi_disable(&priv->napi);
+ close_candev(ndev);
+out_clock:
+ clk_disable_unprepare(priv->clk);
+out:
+ return err;
+}
+
+static void rcar_can_stop(struct net_device *ndev)
+{
+ struct rcar_can_priv *priv = netdev_priv(ndev);
+ u16 ctlr;
+ int i;
+
+ /* Go to (force) reset mode */
+ ctlr = readw(&priv->regs->ctlr);
+ ctlr |= RCAR_CAN_CTLR_CANM_FORCE_RESET;
+ writew(ctlr, &priv->regs->ctlr);
+ for (i = 0; i < MAX_STR_READS; i++) {
+ if (readw(&priv->regs->str) & RCAR_CAN_STR_RSTST)
+ break;
+ }
+ writel(0, &priv->regs->mier0);
+ writel(0, &priv->regs->mier1);
+ writeb(0, &priv->regs->ier);
+ writeb(0, &priv->regs->eier);
+ /* Go to sleep mode */
+ ctlr |= RCAR_CAN_CTLR_SLPM;
+ writew(ctlr, &priv->regs->ctlr);
+ priv->can.state = CAN_STATE_STOPPED;
+}
+
+static int rcar_can_close(struct net_device *ndev)
+{
+ struct rcar_can_priv *priv = netdev_priv(ndev);
+
+ netif_stop_queue(ndev);
+ rcar_can_stop(ndev);
+ free_irq(ndev->irq, ndev);
+ napi_disable(&priv->napi);
+ clk_disable_unprepare(priv->clk);
+ close_candev(ndev);
+ can_led_event(ndev, CAN_LED_EVENT_STOP);
+ return 0;
+}
+
+static netdev_tx_t rcar_can_start_xmit(struct sk_buff *skb,
+ struct net_device *ndev)
+{
+ struct rcar_can_priv *priv = netdev_priv(ndev);
+ struct can_frame *cf = (struct can_frame *)skb->data;
+ u32 data, i;
+
+ if (can_dropped_invalid_skb(ndev, skb))
+ return NETDEV_TX_OK;
+
+ if (cf->can_id & CAN_EFF_FLAG) /* Extended frame format */
+ data = (cf->can_id & CAN_EFF_MASK) | RCAR_CAN_IDE;
+ else /* Standard frame format */
+ data = (cf->can_id & CAN_SFF_MASK) << RCAR_CAN_SID_SHIFT;
+
+ if (cf->can_id & CAN_RTR_FLAG) { /* Remote transmission request */
+ data |= RCAR_CAN_RTR;
+ } else {
+ for (i = 0; i < cf->can_dlc; i++)
+ writeb(cf->data[i],
+ &priv->regs->mb[RCAR_CAN_TX_FIFO_MBX].data[i]);
+ }
+
+ writel(data, &priv->regs->mb[RCAR_CAN_TX_FIFO_MBX].id);
+
+ writeb(cf->can_dlc, &priv->regs->mb[RCAR_CAN_TX_FIFO_MBX].dlc);
+
+ priv->tx_dlc[priv->tx_head % RCAR_CAN_FIFO_DEPTH] = cf->can_dlc;
+ can_put_echo_skb(skb, ndev, priv->tx_head % RCAR_CAN_FIFO_DEPTH);
+ priv->tx_head++;
+ /* Start Tx: write 0xff to the TFPCR register to increment
+ * the CPU-side pointer for the transmit FIFO to the next
+ * mailbox location
+ */
+ writeb(0xff, &priv->regs->tfpcr);
+ /* Stop the queue if we've filled all FIFO entries */
+ if (priv->tx_head - priv->tx_tail >= RCAR_CAN_FIFO_DEPTH)
+ netif_stop_queue(ndev);
+
+ return NETDEV_TX_OK;
+}
+
+static const struct net_device_ops rcar_can_netdev_ops = {
+ .ndo_open = rcar_can_open,
+ .ndo_stop = rcar_can_close,
+ .ndo_start_xmit = rcar_can_start_xmit,
+};
+
+static void rcar_can_rx_pkt(struct rcar_can_priv *priv)
+{
+ struct net_device_stats *stats = &priv->ndev->stats;
+ struct can_frame *cf;
+ struct sk_buff *skb;
+ u32 data;
+ u8 dlc;
+
+ skb = alloc_can_skb(priv->ndev, &cf);
+ if (!skb) {
+ stats->rx_dropped++;
+ return;
+ }
+
+ data = readl(&priv->regs->mb[RCAR_CAN_RX_FIFO_MBX].id);
+ if (data & RCAR_CAN_IDE)
+ cf->can_id = (data & CAN_EFF_MASK) | CAN_EFF_FLAG;
+ else
+ cf->can_id = (data >> RCAR_CAN_SID_SHIFT) & CAN_SFF_MASK;
+
+ dlc = readb(&priv->regs->mb[RCAR_CAN_RX_FIFO_MBX].dlc);
+ cf->can_dlc = get_can_dlc(dlc);
+ if (data & RCAR_CAN_RTR) {
+ cf->can_id |= CAN_RTR_FLAG;
+ } else {
+ for (dlc = 0; dlc < cf->can_dlc; dlc++)
+ cf->data[dlc] =
+ readb(&priv->regs->mb[RCAR_CAN_RX_FIFO_MBX].data[dlc]);
+ }
+
+ can_led_event(priv->ndev, CAN_LED_EVENT_RX);
+
+ stats->rx_bytes += cf->can_dlc;
+ stats->rx_packets++;
+ netif_receive_skb(skb);
+}
+
+static int rcar_can_rx_poll(struct napi_struct *napi, int quota)
+{
+ struct rcar_can_priv *priv = container_of(napi,
+ struct rcar_can_priv, napi);
+ int num_pkts;
+
+ for (num_pkts = 0; num_pkts < quota; num_pkts++) {
+ u8 rfcr, isr;
+
+ isr = readb(&priv->regs->isr);
+ /* Clear interrupt bit */
+ if (isr & RCAR_CAN_ISR_RXFF)
+ writeb(isr & ~RCAR_CAN_ISR_RXFF, &priv->regs->isr);
+ rfcr = readb(&priv->regs->rfcr);
+ if (rfcr & RCAR_CAN_RFCR_RFEST)
+ break;
+ rcar_can_rx_pkt(priv);
+ /* Write 0xff to the RFPCR register to increment
+ * the CPU-side pointer for the receive FIFO
+ * to the next mailbox location
+ */
+ writeb(0xff, &priv->regs->rfpcr);
+ }
+ /* All packets processed */
+ if (num_pkts < quota) {
+ napi_complete(napi);
+ priv->ier |= RCAR_CAN_IER_RXFIE;
+ writeb(priv->ier, &priv->regs->ier);
+ }
+ return num_pkts;
+}
+
+static int rcar_can_do_set_mode(struct net_device *ndev, enum can_mode mode)
+{
+ switch (mode) {
+ case CAN_MODE_START:
+ rcar_can_start(ndev);
+ netif_wake_queue(ndev);
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int rcar_can_get_berr_counter(const struct net_device *dev,
+ struct can_berr_counter *bec)
+{
+ struct rcar_can_priv *priv = netdev_priv(dev);
+ int err;
+
+ err = clk_prepare_enable(priv->clk);
+ if (err)
+ return err;
+ bec->txerr = readb(&priv->regs->tecr);
+ bec->rxerr = readb(&priv->regs->recr);
+ clk_disable_unprepare(priv->clk);
+ return 0;
+}
+
+static int rcar_can_probe(struct platform_device *pdev)
+{
+ struct rcar_can_platform_data *pdata;
+ struct rcar_can_priv *priv;
+ struct net_device *ndev;
+ struct resource *mem;
+ void __iomem *addr;
+ int err = -ENODEV;
+ int irq;
+
+ pdata = dev_get_platdata(&pdev->dev);
+ if (!pdata) {
+ dev_err(&pdev->dev, "No platform data provided!\n");
+ goto fail;
+ }
+
+ irq = platform_get_irq(pdev, 0);
+ if (!irq) {
+ dev_err(&pdev->dev, "No IRQ resource\n");
+ goto fail;
+ }
+
+ mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ addr = devm_ioremap_resource(&pdev->dev, mem);
+ if (IS_ERR(addr)) {
+ err = PTR_ERR(addr);
+ goto fail;
+ }
+
+ ndev = alloc_candev(sizeof(struct rcar_can_priv), RCAR_CAN_FIFO_DEPTH);
+ if (!ndev) {
+ dev_err(&pdev->dev, "alloc_candev() failed\n");
+ err = -ENOMEM;
+ goto fail;
+ }
+
+ priv = netdev_priv(ndev);
+
+ priv->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(priv->clk)) {
+ err = PTR_ERR(priv->clk);
+ dev_err(&pdev->dev, "cannot get clock: %d\n", err);
+ goto fail_clk;
+ }
+
+ ndev->netdev_ops = &rcar_can_netdev_ops;
+ ndev->irq = irq;
+ ndev->flags |= IFF_ECHO;
+ priv->ndev = ndev;
+ priv->regs = addr;
+ priv->clock_select = pdata->clock_select;
+ priv->can.clock.freq = clk_get_rate(priv->clk);
+ priv->can.bittiming_const = &rcar_can_bittiming_const;
+ priv->can.do_set_mode = rcar_can_do_set_mode;
+ priv->can.do_get_berr_counter = rcar_can_get_berr_counter;
+ priv->can.ctrlmode_supported = CAN_CTRLMODE_BERR_REPORTING;
+ platform_set_drvdata(pdev, ndev);
+ SET_NETDEV_DEV(ndev, &pdev->dev);
+
+ netif_napi_add(ndev, &priv->napi, rcar_can_rx_poll,
+ RCAR_CAN_NAPI_WEIGHT);
+ err = register_candev(ndev);
+ if (err) {
+ dev_err(&pdev->dev, "register_candev() failed, error %d\n",
+ err);
+ goto fail_candev;
+ }
+
+ devm_can_led_init(ndev);
+
+ dev_info(&pdev->dev, "device registered (reg_base=%p, irq=%u)\n",
+ priv->regs, ndev->irq);
+
+ return 0;
+fail_candev:
+ netif_napi_del(&priv->napi);
+fail_clk:
+ free_candev(ndev);
+fail:
+ return err;
+}
+
+static int rcar_can_remove(struct platform_device *pdev)
+{
+ struct net_device *ndev = platform_get_drvdata(pdev);
+ struct rcar_can_priv *priv = netdev_priv(ndev);
+
+ unregister_candev(ndev);
+ netif_napi_del(&priv->napi);
+ free_candev(ndev);
+ return 0;
+}
+
+static int __maybe_unused rcar_can_suspend(struct device *dev)
+{
+ struct net_device *ndev = dev_get_drvdata(dev);
+ struct rcar_can_priv *priv = netdev_priv(ndev);
+ u16 ctlr;
+
+ if (netif_running(ndev)) {
+ netif_stop_queue(ndev);
+ netif_device_detach(ndev);
+ }
+ ctlr = readw(&priv->regs->ctlr);
+ ctlr |= RCAR_CAN_CTLR_CANM_HALT;
+ writew(ctlr, &priv->regs->ctlr);
+ ctlr |= RCAR_CAN_CTLR_SLPM;
+ writew(ctlr, &priv->regs->ctlr);
+ priv->can.state = CAN_STATE_SLEEPING;
+
+ clk_disable(priv->clk);
+ return 0;
+}
+
+static int __maybe_unused rcar_can_resume(struct device *dev)
+{
+ struct net_device *ndev = dev_get_drvdata(dev);
+ struct rcar_can_priv *priv = netdev_priv(ndev);
+ u16 ctlr;
+ int err;
+
+ err = clk_enable(priv->clk);
+ if (err) {
+ netdev_err(ndev, "clk_enable() failed, error %d\n", err);
+ return err;
+ }
+
+ ctlr = readw(&priv->regs->ctlr);
+ ctlr &= ~RCAR_CAN_CTLR_SLPM;
+ writew(ctlr, &priv->regs->ctlr);
+ ctlr &= ~RCAR_CAN_CTLR_CANM;
+ writew(ctlr, &priv->regs->ctlr);
+ priv->can.state = CAN_STATE_ERROR_ACTIVE;
+
+ if (netif_running(ndev)) {
+ netif_device_attach(ndev);
+ netif_start_queue(ndev);
+ }
+ return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(rcar_can_pm_ops, rcar_can_suspend, rcar_can_resume);
+
+static struct platform_driver rcar_can_driver = {
+ .driver = {
+ .name = RCAR_CAN_DRV_NAME,
+ .owner = THIS_MODULE,
+ .pm = &rcar_can_pm_ops,
+ },
+ .probe = rcar_can_probe,
+ .remove = rcar_can_remove,
+};
+
+module_platform_driver(rcar_can_driver);
+
+MODULE_AUTHOR("Cogent Embedded, Inc.");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("CAN driver for Renesas R-Car SoC");
+MODULE_ALIAS("platform:" RCAR_CAN_DRV_NAME);
diff --git a/drivers/net/can/softing/softing_main.c b/drivers/net/can/softing/softing_main.c
index 7d8c8f3672dd..bacd236ce306 100644
--- a/drivers/net/can/softing/softing_main.c
+++ b/drivers/net/can/softing/softing_main.c
@@ -556,15 +556,6 @@ failed:
/*
* netdev sysfs
*/
-static ssize_t show_channel(struct device *dev, struct device_attribute *attr,
- char *buf)
-{
- struct net_device *ndev = to_net_dev(dev);
- struct softing_priv *priv = netdev2softing(ndev);
-
- return sprintf(buf, "%i\n", priv->index);
-}
-
static ssize_t show_chip(struct device *dev, struct device_attribute *attr,
char *buf)
{
@@ -609,12 +600,10 @@ static ssize_t store_output(struct device *dev, struct device_attribute *attr,
return count;
}
-static const DEVICE_ATTR(channel, S_IRUGO, show_channel, NULL);
static const DEVICE_ATTR(chip, S_IRUGO, show_chip, NULL);
static const DEVICE_ATTR(output, S_IRUGO | S_IWUSR, show_output, store_output);
static const struct attribute *const netdev_sysfs_attrs[] = {
- &dev_attr_channel.attr,
&dev_attr_chip.attr,
&dev_attr_output.attr,
NULL,
@@ -679,17 +668,20 @@ static int softing_netdev_register(struct net_device *netdev)
{
int ret;
- netdev->sysfs_groups[0] = &netdev_sysfs_group;
ret = register_candev(netdev);
if (ret) {
dev_alert(&netdev->dev, "register failed\n");
return ret;
}
+ if (sysfs_create_group(&netdev->dev.kobj, &netdev_sysfs_group) < 0)
+ netdev_alert(netdev, "sysfs group failed\n");
+
return 0;
}
static void softing_netdev_cleanup(struct net_device *netdev)
{
+ sysfs_remove_group(&netdev->dev.kobj, &netdev_sysfs_group);
unregister_candev(netdev);
free_candev(netdev);
}
@@ -721,8 +713,6 @@ DEV_ATTR_RO(firmware_version, id.fw_version);
DEV_ATTR_RO_STR(hardware, pdat->name);
DEV_ATTR_RO(hardware_version, id.hw_version);
DEV_ATTR_RO(license, id.license);
-DEV_ATTR_RO(frequency, id.freq);
-DEV_ATTR_RO(txpending, tx.pending);
static struct attribute *softing_pdev_attrs[] = {
&dev_attr_serial.attr,
@@ -731,8 +721,6 @@ static struct attribute *softing_pdev_attrs[] = {
&dev_attr_hardware.attr,
&dev_attr_hardware_version.attr,
&dev_attr_license.attr,
- &dev_attr_frequency.attr,
- &dev_attr_txpending.attr,
NULL,
};
diff --git a/drivers/net/can/spi/Kconfig b/drivers/net/can/spi/Kconfig
new file mode 100644
index 000000000000..148cae5871a6
--- /dev/null
+++ b/drivers/net/can/spi/Kconfig
@@ -0,0 +1,10 @@
+menu "CAN SPI interfaces"
+ depends on SPI
+
+config CAN_MCP251X
+ tristate "Microchip MCP251x SPI CAN controllers"
+ depends on HAS_DMA
+ ---help---
+ Driver for the Microchip MCP251x SPI CAN controllers.
+
+endmenu
diff --git a/drivers/net/can/spi/Makefile b/drivers/net/can/spi/Makefile
new file mode 100644
index 000000000000..90bcacffbc65
--- /dev/null
+++ b/drivers/net/can/spi/Makefile
@@ -0,0 +1,8 @@
+#
+# Makefile for the Linux Controller Area Network SPI drivers.
+#
+
+
+obj-$(CONFIG_CAN_MCP251X) += mcp251x.o
+
+ccflags-$(CONFIG_CAN_DEBUG_DEVICES) := -DDEBUG
diff --git a/drivers/net/can/mcp251x.c b/drivers/net/can/spi/mcp251x.c
index 28c11f815245..5df239e68812 100644
--- a/drivers/net/can/mcp251x.c
+++ b/drivers/net/can/spi/mcp251x.c
@@ -214,6 +214,8 @@
#define TX_ECHO_SKB_MAX 1
+#define MCP251X_OST_DELAY_MS (5)
+
#define DEVICE_NAME "mcp251x"
static int mcp251x_enable_dma; /* Enable SPI DMA. Default: 0 (Off) */
@@ -624,50 +626,45 @@ static int mcp251x_setup(struct net_device *net, struct mcp251x_priv *priv,
static int mcp251x_hw_reset(struct spi_device *spi)
{
struct mcp251x_priv *priv = spi_get_drvdata(spi);
+ u8 reg;
int ret;
- unsigned long timeout;
+
+ /* Wait for oscillator startup timer after power up */
+ mdelay(MCP251X_OST_DELAY_MS);
priv->spi_tx_buf[0] = INSTRUCTION_RESET;
- ret = spi_write(spi, priv->spi_tx_buf, 1);
- if (ret) {
- dev_err(&spi->dev, "reset failed: ret = %d\n", ret);
- return -EIO;
- }
+ ret = mcp251x_spi_trans(spi, 1);
+ if (ret)
+ return ret;
+
+ /* Wait for oscillator startup timer after reset */
+ mdelay(MCP251X_OST_DELAY_MS);
+
+ reg = mcp251x_read_reg(spi, CANSTAT);
+ if ((reg & CANCTRL_REQOP_MASK) != CANCTRL_REQOP_CONF)
+ return -ENODEV;
- /* Wait for reset to finish */
- timeout = jiffies + HZ;
- mdelay(10);
- while ((mcp251x_read_reg(spi, CANSTAT) & CANCTRL_REQOP_MASK)
- != CANCTRL_REQOP_CONF) {
- schedule();
- if (time_after(jiffies, timeout)) {
- dev_err(&spi->dev, "MCP251x didn't"
- " enter in conf mode after reset\n");
- return -EBUSY;
- }
- }
return 0;
}
static int mcp251x_hw_probe(struct spi_device *spi)
{
- int st1, st2;
+ u8 ctrl;
+ int ret;
- mcp251x_hw_reset(spi);
+ ret = mcp251x_hw_reset(spi);
+ if (ret)
+ return ret;
- /*
- * Please note that these are "magic values" based on after
- * reset defaults taken from data sheet which allows us to see
- * if we really have a chip on the bus (we avoid common all
- * zeroes or all ones situations)
- */
- st1 = mcp251x_read_reg(spi, CANSTAT) & 0xEE;
- st2 = mcp251x_read_reg(spi, CANCTRL) & 0x17;
+ ctrl = mcp251x_read_reg(spi, CANCTRL);
+
+ dev_dbg(&spi->dev, "CANCTRL 0x%02x\n", ctrl);
- dev_dbg(&spi->dev, "CANSTAT 0x%02x CANCTRL 0x%02x\n", st1, st2);
+ /* Check for power up default value */
+ if ((ctrl & 0x17) != 0x07)
+ return -ENODEV;
- /* Check for power up default values */
- return (st1 == 0x80 && st2 == 0x07) ? 1 : 0;
+ return 0;
}
static int mcp251x_power_enable(struct regulator *reg, int enable)
@@ -776,7 +773,6 @@ static void mcp251x_restart_work_handler(struct work_struct *ws)
mutex_lock(&priv->mcp_lock);
if (priv->after_suspend) {
- mdelay(10);
mcp251x_hw_reset(spi);
mcp251x_setup(net, priv, spi);
if (priv->after_suspend & AFTER_SUSPEND_RESTART) {
@@ -955,7 +951,7 @@ static int mcp251x_open(struct net_device *net)
priv->tx_len = 0;
ret = request_threaded_irq(spi->irq, NULL, mcp251x_can_ist,
- flags, DEVICE_NAME, priv);
+ flags | IRQF_ONESHOT, DEVICE_NAME, priv);
if (ret) {
dev_err(&spi->dev, "failed to acquire irq %d\n", spi->irq);
mcp251x_power_enable(priv->transceiver, 0);
@@ -1032,8 +1028,8 @@ static int mcp251x_can_probe(struct spi_device *spi)
struct mcp251x_platform_data *pdata = dev_get_platdata(&spi->dev);
struct net_device *net;
struct mcp251x_priv *priv;
- int freq, ret = -ENODEV;
struct clk *clk;
+ int freq, ret;
clk = devm_clk_get(&spi->dev, NULL);
if (IS_ERR(clk)) {
@@ -1076,6 +1072,18 @@ static int mcp251x_can_probe(struct spi_device *spi)
priv->net = net;
priv->clk = clk;
+ spi_set_drvdata(spi, priv);
+
+ /* Configure the SPI bus */
+ spi->bits_per_word = 8;
+ if (mcp251x_is_2510(spi))
+ spi->max_speed_hz = spi->max_speed_hz ? : 5 * 1000 * 1000;
+ else
+ spi->max_speed_hz = spi->max_speed_hz ? : 10 * 1000 * 1000;
+ ret = spi_setup(spi);
+ if (ret)
+ goto out_clk;
+
priv->power = devm_regulator_get(&spi->dev, "vdd");
priv->transceiver = devm_regulator_get(&spi->dev, "xceiver");
if ((PTR_ERR(priv->power) == -EPROBE_DEFER) ||
@@ -1088,8 +1096,6 @@ static int mcp251x_can_probe(struct spi_device *spi)
if (ret)
goto out_clk;
- spi_set_drvdata(spi, priv);
-
priv->spi = spi;
mutex_init(&priv->mcp_lock);
@@ -1134,20 +1140,11 @@ static int mcp251x_can_probe(struct spi_device *spi)
SET_NETDEV_DEV(net, &spi->dev);
- /* Configure the SPI bus */
- spi->mode = spi->mode ? : SPI_MODE_0;
- if (mcp251x_is_2510(spi))
- spi->max_speed_hz = spi->max_speed_hz ? : 5 * 1000 * 1000;
- else
- spi->max_speed_hz = spi->max_speed_hz ? : 10 * 1000 * 1000;
- spi->bits_per_word = 8;
- spi_setup(spi);
-
/* Here is OK to not lock the MCP, no one knows about it yet */
- if (!mcp251x_hw_probe(spi)) {
- ret = -ENODEV;
+ ret = mcp251x_hw_probe(spi);
+ if (ret)
goto error_probe;
- }
+
mcp251x_hw_sleep(spi);
ret = register_candev(net);
@@ -1156,7 +1153,7 @@ static int mcp251x_can_probe(struct spi_device *spi)
devm_can_led_init(net);
- return ret;
+ return 0;
error_probe:
if (mcp251x_enable_dma)
diff --git a/drivers/net/can/usb/Kconfig b/drivers/net/can/usb/Kconfig
index fc96a3d83ebe..a77db919363c 100644
--- a/drivers/net/can/usb/Kconfig
+++ b/drivers/net/can/usb/Kconfig
@@ -13,13 +13,21 @@ config CAN_ESD_USB2
This driver supports the CAN-USB/2 interface
from esd electronic system design gmbh (http://www.esd.eu).
+config CAN_GS_USB
+ tristate "Geschwister Schneider UG interfaces"
+ ---help---
+ This driver supports the Geschwister Schneider USB/CAN devices.
+ If unsure choose N,
+ choose Y for built in support,
+ M to compile as module (module will be named: gs_usb).
+
config CAN_KVASER_USB
tristate "Kvaser CAN/USB interface"
---help---
This driver adds support for Kvaser CAN/USB devices like Kvaser
Leaf Light.
- The driver gives support for the following devices:
+ The driver provides support for the following devices:
- Kvaser Leaf Light
- Kvaser Leaf Professional HS
- Kvaser Leaf SemiPro HS
@@ -36,6 +44,8 @@ config CAN_KVASER_USB
- Kvaser Leaf Light "China"
- Kvaser BlackBird SemiPro
- Kvaser USBcan R
+ - Kvaser Leaf Light v2
+ - Kvaser Mini PCI Express HS
If unsure, say N.
diff --git a/drivers/net/can/usb/Makefile b/drivers/net/can/usb/Makefile
index becef460a91a..7b9a393b1ac8 100644
--- a/drivers/net/can/usb/Makefile
+++ b/drivers/net/can/usb/Makefile
@@ -4,6 +4,7 @@
obj-$(CONFIG_CAN_EMS_USB) += ems_usb.o
obj-$(CONFIG_CAN_ESD_USB2) += esd_usb2.o
+obj-$(CONFIG_CAN_GS_USB) += gs_usb.o
obj-$(CONFIG_CAN_KVASER_USB) += kvaser_usb.o
obj-$(CONFIG_CAN_PEAK_USB) += peak_usb/
obj-$(CONFIG_CAN_8DEV_USB) += usb_8dev.o
diff --git a/drivers/net/can/usb/gs_usb.c b/drivers/net/can/usb/gs_usb.c
new file mode 100644
index 000000000000..04b0f84612f0
--- /dev/null
+++ b/drivers/net/can/usb/gs_usb.c
@@ -0,0 +1,971 @@
+/* CAN driver for Geschwister Schneider USB/CAN devices.
+ *
+ * Copyright (C) 2013 Geschwister Schneider Technologie-,
+ * Entwicklungs- und Vertriebs UG (Haftungsbeschränkt).
+ *
+ * Many thanks to all socketcan devs!
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+
+#include <linux/init.h>
+#include <linux/signal.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/usb.h>
+
+#include <linux/can.h>
+#include <linux/can/dev.h>
+#include <linux/can/error.h>
+
+/* Device specific constants */
+#define USB_GSUSB_1_VENDOR_ID 0x1d50
+#define USB_GSUSB_1_PRODUCT_ID 0x606f
+
+#define GSUSB_ENDPOINT_IN 1
+#define GSUSB_ENDPOINT_OUT 2
+
+/* Device specific constants */
+enum gs_usb_breq {
+ GS_USB_BREQ_HOST_FORMAT = 0,
+ GS_USB_BREQ_BITTIMING,
+ GS_USB_BREQ_MODE,
+ GS_USB_BREQ_BERR,
+ GS_USB_BREQ_BT_CONST,
+ GS_USB_BREQ_DEVICE_CONFIG
+};
+
+enum gs_can_mode {
+ /* reset a channel. turns it off */
+ GS_CAN_MODE_RESET = 0,
+ /* starts a channel */
+ GS_CAN_MODE_START
+};
+
+enum gs_can_state {
+ GS_CAN_STATE_ERROR_ACTIVE = 0,
+ GS_CAN_STATE_ERROR_WARNING,
+ GS_CAN_STATE_ERROR_PASSIVE,
+ GS_CAN_STATE_BUS_OFF,
+ GS_CAN_STATE_STOPPED,
+ GS_CAN_STATE_SLEEPING
+};
+
+/* data types passed between host and device */
+struct gs_host_config {
+ u32 byte_order;
+} __packed;
+/* All data exchanged between host and device is exchanged in host byte order,
+ * thanks to the struct gs_host_config byte_order member, which is sent first
+ * to indicate the desired byte order.
+ */
+
+struct gs_device_config {
+ u8 reserved1;
+ u8 reserved2;
+ u8 reserved3;
+ u8 icount;
+ u32 sw_version;
+ u32 hw_version;
+} __packed;
+
+#define GS_CAN_MODE_NORMAL 0
+#define GS_CAN_MODE_LISTEN_ONLY (1<<0)
+#define GS_CAN_MODE_LOOP_BACK (1<<1)
+#define GS_CAN_MODE_TRIPLE_SAMPLE (1<<2)
+#define GS_CAN_MODE_ONE_SHOT (1<<3)
+
+struct gs_device_mode {
+ u32 mode;
+ u32 flags;
+} __packed;
+
+struct gs_device_state {
+ u32 state;
+ u32 rxerr;
+ u32 txerr;
+} __packed;
+
+struct gs_device_bittiming {
+ u32 prop_seg;
+ u32 phase_seg1;
+ u32 phase_seg2;
+ u32 sjw;
+ u32 brp;
+} __packed;
+
+#define GS_CAN_FEATURE_LISTEN_ONLY (1<<0)
+#define GS_CAN_FEATURE_LOOP_BACK (1<<1)
+#define GS_CAN_FEATURE_TRIPLE_SAMPLE (1<<2)
+#define GS_CAN_FEATURE_ONE_SHOT (1<<3)
+
+struct gs_device_bt_const {
+ u32 feature;
+ u32 fclk_can;
+ u32 tseg1_min;
+ u32 tseg1_max;
+ u32 tseg2_min;
+ u32 tseg2_max;
+ u32 sjw_max;
+ u32 brp_min;
+ u32 brp_max;
+ u32 brp_inc;
+} __packed;
+
+#define GS_CAN_FLAG_OVERFLOW 1
+
+struct gs_host_frame {
+ u32 echo_id;
+ u32 can_id;
+
+ u8 can_dlc;
+ u8 channel;
+ u8 flags;
+ u8 reserved;
+
+ u8 data[8];
+} __packed;
+/* The GS USB devices make use of the same flags and masks as in
+ * linux/can.h and linux/can/error.h, and no additional mapping is necessary.
+ */
+
+/* Only send a max of GS_MAX_TX_URBS frames per channel at a time. */
+#define GS_MAX_TX_URBS 10
+/* Only launch a max of GS_MAX_RX_URBS usb requests at a time. */
+#define GS_MAX_RX_URBS 30
+/* Maximum number of interfaces the driver supports per device.
+ * Current hardware only supports 2 interfaces. The future may vary.
+ */
+#define GS_MAX_INTF 2
+
+struct gs_tx_context {
+ struct gs_can *dev;
+ unsigned int echo_id;
+};
+
+struct gs_can {
+ struct can_priv can; /* must be the first member */
+
+ struct gs_usb *parent;
+
+ struct net_device *netdev;
+ struct usb_device *udev;
+ struct usb_interface *iface;
+
+ struct can_bittiming_const bt_const;
+ unsigned int channel; /* channel number */
+
+ /* This lock prevents a race condition between xmit and recieve. */
+ spinlock_t tx_ctx_lock;
+ struct gs_tx_context tx_context[GS_MAX_TX_URBS];
+
+ struct usb_anchor tx_submitted;
+ atomic_t active_tx_urbs;
+};
+
+/* usb interface struct */
+struct gs_usb {
+ struct gs_can *canch[GS_MAX_INTF];
+ struct usb_anchor rx_submitted;
+ atomic_t active_channels;
+ struct usb_device *udev;
+};
+
+/* 'allocate' a tx context.
+ * returns a valid tx context or NULL if there is no space.
+ */
+static struct gs_tx_context *gs_alloc_tx_context(struct gs_can *dev)
+{
+ int i = 0;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev->tx_ctx_lock, flags);
+
+ for (; i < GS_MAX_TX_URBS; i++) {
+ if (dev->tx_context[i].echo_id == GS_MAX_TX_URBS) {
+ dev->tx_context[i].echo_id = i;
+ spin_unlock_irqrestore(&dev->tx_ctx_lock, flags);
+ return &dev->tx_context[i];
+ }
+ }
+
+ spin_unlock_irqrestore(&dev->tx_ctx_lock, flags);
+ return NULL;
+}
+
+/* releases a tx context
+ */
+static void gs_free_tx_context(struct gs_tx_context *txc)
+{
+ txc->echo_id = GS_MAX_TX_URBS;
+}
+
+/* Get a tx context by id.
+ */
+static struct gs_tx_context *gs_get_tx_context(struct gs_can *dev, unsigned int id)
+{
+ unsigned long flags;
+
+ if (id < GS_MAX_TX_URBS) {
+ spin_lock_irqsave(&dev->tx_ctx_lock, flags);
+ if (dev->tx_context[id].echo_id == id) {
+ spin_unlock_irqrestore(&dev->tx_ctx_lock, flags);
+ return &dev->tx_context[id];
+ }
+ spin_unlock_irqrestore(&dev->tx_ctx_lock, flags);
+ }
+ return NULL;
+}
+
+static int gs_cmd_reset(struct gs_usb *gsusb, struct gs_can *gsdev)
+{
+ struct gs_device_mode *dm;
+ struct usb_interface *intf = gsdev->iface;
+ int rc;
+
+ dm = kzalloc(sizeof(*dm), GFP_KERNEL);
+ if (!dm)
+ return -ENOMEM;
+
+ dm->mode = GS_CAN_MODE_RESET;
+
+ rc = usb_control_msg(interface_to_usbdev(intf),
+ usb_sndctrlpipe(interface_to_usbdev(intf), 0),
+ GS_USB_BREQ_MODE,
+ USB_DIR_OUT|USB_TYPE_VENDOR|USB_RECIP_INTERFACE,
+ gsdev->channel,
+ 0,
+ dm,
+ sizeof(*dm),
+ 1000);
+
+ return rc;
+}
+
+static void gs_update_state(struct gs_can *dev, struct can_frame *cf)
+{
+ struct can_device_stats *can_stats = &dev->can.can_stats;
+
+ if (cf->can_id & CAN_ERR_RESTARTED) {
+ dev->can.state = CAN_STATE_ERROR_ACTIVE;
+ can_stats->restarts++;
+ } else if (cf->can_id & CAN_ERR_BUSOFF) {
+ dev->can.state = CAN_STATE_BUS_OFF;
+ can_stats->bus_off++;
+ } else if (cf->can_id & CAN_ERR_CRTL) {
+ if ((cf->data[1] & CAN_ERR_CRTL_TX_WARNING) ||
+ (cf->data[1] & CAN_ERR_CRTL_RX_WARNING)) {
+ dev->can.state = CAN_STATE_ERROR_WARNING;
+ can_stats->error_warning++;
+ } else if ((cf->data[1] & CAN_ERR_CRTL_TX_PASSIVE) ||
+ (cf->data[1] & CAN_ERR_CRTL_RX_PASSIVE)) {
+ dev->can.state = CAN_STATE_ERROR_PASSIVE;
+ can_stats->error_passive++;
+ } else {
+ dev->can.state = CAN_STATE_ERROR_ACTIVE;
+ }
+ }
+}
+
+static void gs_usb_recieve_bulk_callback(struct urb *urb)
+{
+ struct gs_usb *usbcan = urb->context;
+ struct gs_can *dev;
+ struct net_device *netdev;
+ int rc;
+ struct net_device_stats *stats;
+ struct gs_host_frame *hf = urb->transfer_buffer;
+ struct gs_tx_context *txc;
+ struct can_frame *cf;
+ struct sk_buff *skb;
+
+ BUG_ON(!usbcan);
+
+ switch (urb->status) {
+ case 0: /* success */
+ break;
+ case -ENOENT:
+ case -ESHUTDOWN:
+ return;
+ default:
+ /* do not resubmit aborted urbs. eg: when device goes down */
+ return;
+ }
+
+ /* device reports out of range channel id */
+ if (hf->channel >= GS_MAX_INTF)
+ goto resubmit_urb;
+
+ dev = usbcan->canch[hf->channel];
+
+ netdev = dev->netdev;
+ stats = &netdev->stats;
+
+ if (!netif_device_present(netdev))
+ return;
+
+ if (hf->echo_id == -1) { /* normal rx */
+ skb = alloc_can_skb(dev->netdev, &cf);
+ if (!skb)
+ return;
+
+ cf->can_id = hf->can_id;
+
+ cf->can_dlc = get_can_dlc(hf->can_dlc);
+ memcpy(cf->data, hf->data, 8);
+
+ /* ERROR frames tell us information about the controller */
+ if (hf->can_id & CAN_ERR_FLAG)
+ gs_update_state(dev, cf);
+
+ netdev->stats.rx_packets++;
+ netdev->stats.rx_bytes += hf->can_dlc;
+
+ netif_rx(skb);
+ } else { /* echo_id == hf->echo_id */
+ if (hf->echo_id >= GS_MAX_TX_URBS) {
+ netdev_err(netdev,
+ "Unexpected out of range echo id %d\n",
+ hf->echo_id);
+ goto resubmit_urb;
+ }
+
+ netdev->stats.tx_packets++;
+ netdev->stats.tx_bytes += hf->can_dlc;
+
+ txc = gs_get_tx_context(dev, hf->echo_id);
+
+ /* bad devices send bad echo_ids. */
+ if (!txc) {
+ netdev_err(netdev,
+ "Unexpected unused echo id %d\n",
+ hf->echo_id);
+ goto resubmit_urb;
+ }
+
+ can_get_echo_skb(netdev, hf->echo_id);
+
+ gs_free_tx_context(txc);
+
+ netif_wake_queue(netdev);
+ }
+
+ if (hf->flags & GS_CAN_FLAG_OVERFLOW) {
+ skb = alloc_can_err_skb(netdev, &cf);
+ if (!skb)
+ goto resubmit_urb;
+
+ cf->can_id |= CAN_ERR_CRTL;
+ cf->can_dlc = CAN_ERR_DLC;
+ cf->data[1] = CAN_ERR_CRTL_RX_OVERFLOW;
+ stats->rx_over_errors++;
+ stats->rx_errors++;
+ netif_rx(skb);
+ }
+
+ resubmit_urb:
+ usb_fill_bulk_urb(urb,
+ usbcan->udev,
+ usb_rcvbulkpipe(usbcan->udev, GSUSB_ENDPOINT_IN),
+ hf,
+ sizeof(struct gs_host_frame),
+ gs_usb_recieve_bulk_callback,
+ usbcan
+ );
+
+ rc = usb_submit_urb(urb, GFP_ATOMIC);
+
+ /* USB failure take down all interfaces */
+ if (rc == -ENODEV) {
+ for (rc = 0; rc < GS_MAX_INTF; rc++) {
+ if (usbcan->canch[rc])
+ netif_device_detach(usbcan->canch[rc]->netdev);
+ }
+ }
+}
+
+static int gs_usb_set_bittiming(struct net_device *netdev)
+{
+ struct gs_can *dev = netdev_priv(netdev);
+ struct can_bittiming *bt = &dev->can.bittiming;
+ struct usb_interface *intf = dev->iface;
+ int rc;
+ struct gs_device_bittiming *dbt;
+
+ dbt = kmalloc(sizeof(*dbt), GFP_KERNEL);
+ if (!dbt)
+ return -ENOMEM;
+
+ dbt->prop_seg = bt->prop_seg;
+ dbt->phase_seg1 = bt->phase_seg1;
+ dbt->phase_seg2 = bt->phase_seg2;
+ dbt->sjw = bt->sjw;
+ dbt->brp = bt->brp;
+
+ /* request bit timings */
+ rc = usb_control_msg(interface_to_usbdev(intf),
+ usb_sndctrlpipe(interface_to_usbdev(intf), 0),
+ GS_USB_BREQ_BITTIMING,
+ USB_DIR_OUT|USB_TYPE_VENDOR|USB_RECIP_INTERFACE,
+ dev->channel,
+ 0,
+ dbt,
+ sizeof(*dbt),
+ 1000);
+
+ kfree(dbt);
+
+ if (rc < 0)
+ dev_err(netdev->dev.parent, "Couldn't set bittimings (err=%d)",
+ rc);
+
+ return rc;
+}
+
+static void gs_usb_xmit_callback(struct urb *urb)
+{
+ struct gs_tx_context *txc = urb->context;
+ struct gs_can *dev = txc->dev;
+ struct net_device *netdev = dev->netdev;
+
+ if (urb->status)
+ netdev_info(netdev, "usb xmit fail %d\n", txc->echo_id);
+
+ usb_free_coherent(urb->dev,
+ urb->transfer_buffer_length,
+ urb->transfer_buffer,
+ urb->transfer_dma);
+
+ atomic_dec(&dev->active_tx_urbs);
+
+ if (!netif_device_present(netdev))
+ return;
+
+ if (netif_queue_stopped(netdev))
+ netif_wake_queue(netdev);
+}
+
+static netdev_tx_t gs_can_start_xmit(struct sk_buff *skb, struct net_device *netdev)
+{
+ struct gs_can *dev = netdev_priv(netdev);
+ struct net_device_stats *stats = &dev->netdev->stats;
+ struct urb *urb;
+ struct gs_host_frame *hf;
+ struct can_frame *cf;
+ int rc;
+ unsigned int idx;
+ struct gs_tx_context *txc;
+
+ if (can_dropped_invalid_skb(netdev, skb))
+ return NETDEV_TX_OK;
+
+ /* find an empty context to keep track of transmission */
+ txc = gs_alloc_tx_context(dev);
+ if (!txc)
+ return NETDEV_TX_BUSY;
+
+ /* create a URB, and a buffer for it */
+ urb = usb_alloc_urb(0, GFP_ATOMIC);
+ if (!urb) {
+ netdev_err(netdev, "No memory left for URB\n");
+ goto nomem_urb;
+ }
+
+ hf = usb_alloc_coherent(dev->udev, sizeof(*hf), GFP_ATOMIC,
+ &urb->transfer_dma);
+ if (!hf) {
+ netdev_err(netdev, "No memory left for USB buffer\n");
+ goto nomem_hf;
+ }
+
+ idx = txc->echo_id;
+
+ if (idx >= GS_MAX_TX_URBS) {
+ netdev_err(netdev, "Invalid tx context %d\n", idx);
+ goto badidx;
+ }
+
+ hf->echo_id = idx;
+ hf->channel = dev->channel;
+
+ cf = (struct can_frame *)skb->data;
+
+ hf->can_id = cf->can_id;
+ hf->can_dlc = cf->can_dlc;
+ memcpy(hf->data, cf->data, cf->can_dlc);
+
+ usb_fill_bulk_urb(urb, dev->udev,
+ usb_sndbulkpipe(dev->udev, GSUSB_ENDPOINT_OUT),
+ hf,
+ sizeof(*hf),
+ gs_usb_xmit_callback,
+ txc);
+
+ urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
+ usb_anchor_urb(urb, &dev->tx_submitted);
+
+ can_put_echo_skb(skb, netdev, idx);
+
+ atomic_inc(&dev->active_tx_urbs);
+
+ rc = usb_submit_urb(urb, GFP_ATOMIC);
+ if (unlikely(rc)) { /* usb send failed */
+ atomic_dec(&dev->active_tx_urbs);
+
+ can_free_echo_skb(netdev, idx);
+ gs_free_tx_context(txc);
+
+ usb_unanchor_urb(urb);
+ usb_free_coherent(dev->udev,
+ sizeof(*hf),
+ hf,
+ urb->transfer_dma);
+
+
+ if (rc == -ENODEV) {
+ netif_device_detach(netdev);
+ } else {
+ netdev_err(netdev, "usb_submit failed (err=%d)\n", rc);
+ stats->tx_dropped++;
+ }
+ } else {
+ /* Slow down tx path */
+ if (atomic_read(&dev->active_tx_urbs) >= GS_MAX_TX_URBS)
+ netif_stop_queue(netdev);
+ }
+
+ /* let usb core take care of this urb */
+ usb_free_urb(urb);
+
+ return NETDEV_TX_OK;
+
+ badidx:
+ usb_free_coherent(dev->udev,
+ sizeof(*hf),
+ hf,
+ urb->transfer_dma);
+ nomem_hf:
+ usb_free_urb(urb);
+
+ nomem_urb:
+ gs_free_tx_context(txc);
+ dev_kfree_skb(skb);
+ stats->tx_dropped++;
+ return NETDEV_TX_OK;
+}
+
+static int gs_can_open(struct net_device *netdev)
+{
+ struct gs_can *dev = netdev_priv(netdev);
+ struct gs_usb *parent = dev->parent;
+ int rc, i;
+ struct gs_device_mode *dm;
+ u32 ctrlmode;
+
+ rc = open_candev(netdev);
+ if (rc)
+ return rc;
+
+ if (atomic_add_return(1, &parent->active_channels) == 1) {
+ for (i = 0; i < GS_MAX_RX_URBS; i++) {
+ struct urb *urb;
+ u8 *buf;
+
+ /* alloc rx urb */
+ urb = usb_alloc_urb(0, GFP_KERNEL);
+ if (!urb) {
+ netdev_err(netdev,
+ "No memory left for URB\n");
+ return -ENOMEM;
+ }
+
+ /* alloc rx buffer */
+ buf = usb_alloc_coherent(dev->udev,
+ sizeof(struct gs_host_frame),
+ GFP_KERNEL,
+ &urb->transfer_dma);
+ if (!buf) {
+ netdev_err(netdev,
+ "No memory left for USB buffer\n");
+ usb_free_urb(urb);
+ return -ENOMEM;
+ }
+
+ /* fill, anchor, and submit rx urb */
+ usb_fill_bulk_urb(urb,
+ dev->udev,
+ usb_rcvbulkpipe(dev->udev,
+ GSUSB_ENDPOINT_IN),
+ buf,
+ sizeof(struct gs_host_frame),
+ gs_usb_recieve_bulk_callback,
+ parent);
+ urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
+
+ usb_anchor_urb(urb, &parent->rx_submitted);
+
+ rc = usb_submit_urb(urb, GFP_KERNEL);
+ if (rc) {
+ if (rc == -ENODEV)
+ netif_device_detach(dev->netdev);
+
+ netdev_err(netdev,
+ "usb_submit failed (err=%d)\n",
+ rc);
+
+ usb_unanchor_urb(urb);
+ break;
+ }
+
+ /* Drop reference,
+ * USB core will take care of freeing it
+ */
+ usb_free_urb(urb);
+ }
+ }
+
+ dm = kmalloc(sizeof(*dm), GFP_KERNEL);
+ if (!dm)
+ return -ENOMEM;
+
+ /* flags */
+ ctrlmode = dev->can.ctrlmode;
+ dm->flags = 0;
+
+ if (ctrlmode & CAN_CTRLMODE_LOOPBACK)
+ dm->flags |= GS_CAN_MODE_LOOP_BACK;
+ else if (ctrlmode & CAN_CTRLMODE_LISTENONLY)
+ dm->flags |= GS_CAN_MODE_LISTEN_ONLY;
+
+ /* Controller is not allowed to retry TX
+ * this mode is unavailable on atmels uc3c hardware
+ */
+ if (ctrlmode & CAN_CTRLMODE_ONE_SHOT)
+ dm->flags |= GS_CAN_MODE_ONE_SHOT;
+
+ if (ctrlmode & CAN_CTRLMODE_3_SAMPLES)
+ dm->flags |= GS_CAN_MODE_TRIPLE_SAMPLE;
+
+ /* finally start device */
+ dm->mode = GS_CAN_MODE_START;
+ rc = usb_control_msg(interface_to_usbdev(dev->iface),
+ usb_sndctrlpipe(interface_to_usbdev(dev->iface), 0),
+ GS_USB_BREQ_MODE,
+ USB_DIR_OUT|USB_TYPE_VENDOR|USB_RECIP_INTERFACE,
+ dev->channel,
+ 0,
+ dm,
+ sizeof(*dm),
+ 1000);
+
+ if (rc < 0) {
+ netdev_err(netdev, "Couldn't start device (err=%d)\n", rc);
+ kfree(dm);
+ return rc;
+ }
+
+ kfree(dm);
+
+ dev->can.state = CAN_STATE_ERROR_ACTIVE;
+
+ if (!(dev->can.ctrlmode & CAN_CTRLMODE_LISTENONLY))
+ netif_start_queue(netdev);
+
+ return 0;
+}
+
+static int gs_can_close(struct net_device *netdev)
+{
+ int rc;
+ struct gs_can *dev = netdev_priv(netdev);
+ struct gs_usb *parent = dev->parent;
+
+ netif_stop_queue(netdev);
+
+ /* Stop polling */
+ if (atomic_dec_and_test(&parent->active_channels))
+ usb_kill_anchored_urbs(&parent->rx_submitted);
+
+ /* Stop sending URBs */
+ usb_kill_anchored_urbs(&dev->tx_submitted);
+ atomic_set(&dev->active_tx_urbs, 0);
+
+ /* reset the device */
+ rc = gs_cmd_reset(parent, dev);
+ if (rc < 0)
+ netdev_warn(netdev, "Couldn't shutdown device (err=%d)", rc);
+
+ /* reset tx contexts */
+ for (rc = 0; rc < GS_MAX_TX_URBS; rc++) {
+ dev->tx_context[rc].dev = dev;
+ dev->tx_context[rc].echo_id = GS_MAX_TX_URBS;
+ }
+
+ /* close the netdev */
+ close_candev(netdev);
+
+ return 0;
+}
+
+static const struct net_device_ops gs_usb_netdev_ops = {
+ .ndo_open = gs_can_open,
+ .ndo_stop = gs_can_close,
+ .ndo_start_xmit = gs_can_start_xmit,
+};
+
+static struct gs_can *gs_make_candev(unsigned int channel, struct usb_interface *intf)
+{
+ struct gs_can *dev;
+ struct net_device *netdev;
+ int rc;
+ struct gs_device_bt_const *bt_const;
+
+ bt_const = kmalloc(sizeof(*bt_const), GFP_KERNEL);
+ if (!bt_const)
+ return ERR_PTR(-ENOMEM);
+
+ /* fetch bit timing constants */
+ rc = usb_control_msg(interface_to_usbdev(intf),
+ usb_rcvctrlpipe(interface_to_usbdev(intf), 0),
+ GS_USB_BREQ_BT_CONST,
+ USB_DIR_IN|USB_TYPE_VENDOR|USB_RECIP_INTERFACE,
+ channel,
+ 0,
+ bt_const,
+ sizeof(*bt_const),
+ 1000);
+
+ if (rc < 0) {
+ dev_err(&intf->dev,
+ "Couldn't get bit timing const for channel (err=%d)\n",
+ rc);
+ kfree(bt_const);
+ return ERR_PTR(rc);
+ }
+
+ /* create netdev */
+ netdev = alloc_candev(sizeof(struct gs_can), GS_MAX_TX_URBS);
+ if (!netdev) {
+ dev_err(&intf->dev, "Couldn't allocate candev\n");
+ kfree(bt_const);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ dev = netdev_priv(netdev);
+
+ netdev->netdev_ops = &gs_usb_netdev_ops;
+
+ netdev->flags |= IFF_ECHO; /* we support full roundtrip echo */
+
+ /* dev settup */
+ strcpy(dev->bt_const.name, "gs_usb");
+ dev->bt_const.tseg1_min = bt_const->tseg1_min;
+ dev->bt_const.tseg1_max = bt_const->tseg1_max;
+ dev->bt_const.tseg2_min = bt_const->tseg2_min;
+ dev->bt_const.tseg2_max = bt_const->tseg2_max;
+ dev->bt_const.sjw_max = bt_const->sjw_max;
+ dev->bt_const.brp_min = bt_const->brp_min;
+ dev->bt_const.brp_max = bt_const->brp_max;
+ dev->bt_const.brp_inc = bt_const->brp_inc;
+
+ dev->udev = interface_to_usbdev(intf);
+ dev->iface = intf;
+ dev->netdev = netdev;
+ dev->channel = channel;
+
+ init_usb_anchor(&dev->tx_submitted);
+ atomic_set(&dev->active_tx_urbs, 0);
+ spin_lock_init(&dev->tx_ctx_lock);
+ for (rc = 0; rc < GS_MAX_TX_URBS; rc++) {
+ dev->tx_context[rc].dev = dev;
+ dev->tx_context[rc].echo_id = GS_MAX_TX_URBS;
+ }
+
+ /* can settup */
+ dev->can.state = CAN_STATE_STOPPED;
+ dev->can.clock.freq = bt_const->fclk_can;
+ dev->can.bittiming_const = &dev->bt_const;
+ dev->can.do_set_bittiming = gs_usb_set_bittiming;
+
+ dev->can.ctrlmode_supported = 0;
+
+ if (bt_const->feature & GS_CAN_FEATURE_LISTEN_ONLY)
+ dev->can.ctrlmode_supported |= CAN_CTRLMODE_LISTENONLY;
+
+ if (bt_const->feature & GS_CAN_FEATURE_LOOP_BACK)
+ dev->can.ctrlmode_supported |= CAN_CTRLMODE_LOOPBACK;
+
+ if (bt_const->feature & GS_CAN_FEATURE_TRIPLE_SAMPLE)
+ dev->can.ctrlmode_supported |= CAN_CTRLMODE_3_SAMPLES;
+
+ if (bt_const->feature & GS_CAN_FEATURE_ONE_SHOT)
+ dev->can.ctrlmode_supported |= CAN_CTRLMODE_ONE_SHOT;
+
+ kfree(bt_const);
+
+ SET_NETDEV_DEV(netdev, &intf->dev);
+
+ rc = register_candev(dev->netdev);
+ if (rc) {
+ free_candev(dev->netdev);
+ dev_err(&intf->dev, "Couldn't register candev (err=%d)\n", rc);
+ return ERR_PTR(rc);
+ }
+
+ return dev;
+}
+
+static void gs_destroy_candev(struct gs_can *dev)
+{
+ unregister_candev(dev->netdev);
+ free_candev(dev->netdev);
+ usb_kill_anchored_urbs(&dev->tx_submitted);
+ kfree(dev);
+}
+
+static int gs_usb_probe(struct usb_interface *intf, const struct usb_device_id *id)
+{
+ struct gs_usb *dev;
+ int rc = -ENOMEM;
+ unsigned int icount, i;
+ struct gs_host_config *hconf;
+ struct gs_device_config *dconf;
+
+ hconf = kmalloc(sizeof(*hconf), GFP_KERNEL);
+ if (!hconf)
+ return -ENOMEM;
+
+ hconf->byte_order = 0x0000beef;
+
+ /* send host config */
+ rc = usb_control_msg(interface_to_usbdev(intf),
+ usb_sndctrlpipe(interface_to_usbdev(intf), 0),
+ GS_USB_BREQ_HOST_FORMAT,
+ USB_DIR_OUT|USB_TYPE_VENDOR|USB_RECIP_INTERFACE,
+ 1,
+ intf->altsetting[0].desc.bInterfaceNumber,
+ hconf,
+ sizeof(*hconf),
+ 1000);
+
+ kfree(hconf);
+
+ if (rc < 0) {
+ dev_err(&intf->dev, "Couldn't send data format (err=%d)\n",
+ rc);
+ return rc;
+ }
+
+ dconf = kmalloc(sizeof(*dconf), GFP_KERNEL);
+ if (!dconf)
+ return -ENOMEM;
+
+ /* read device config */
+ rc = usb_control_msg(interface_to_usbdev(intf),
+ usb_rcvctrlpipe(interface_to_usbdev(intf), 0),
+ GS_USB_BREQ_DEVICE_CONFIG,
+ USB_DIR_IN|USB_TYPE_VENDOR|USB_RECIP_INTERFACE,
+ 1,
+ intf->altsetting[0].desc.bInterfaceNumber,
+ dconf,
+ sizeof(*dconf),
+ 1000);
+ if (rc < 0) {
+ dev_err(&intf->dev, "Couldn't get device config: (err=%d)\n",
+ rc);
+
+ kfree(dconf);
+
+ return rc;
+ }
+
+ icount = dconf->icount+1;
+
+ kfree(dconf);
+
+ dev_info(&intf->dev, "Configuring for %d interfaces\n", icount);
+
+ if (icount > GS_MAX_INTF) {
+ dev_err(&intf->dev,
+ "Driver cannot handle more that %d CAN interfaces\n",
+ GS_MAX_INTF);
+ return -EINVAL;
+ }
+
+ dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+ init_usb_anchor(&dev->rx_submitted);
+
+ atomic_set(&dev->active_channels, 0);
+
+ usb_set_intfdata(intf, dev);
+ dev->udev = interface_to_usbdev(intf);
+
+ for (i = 0; i < icount; i++) {
+ dev->canch[i] = gs_make_candev(i, intf);
+ if (IS_ERR_OR_NULL(dev->canch[i])) {
+ /* on failure destroy previously created candevs */
+ icount = i;
+ for (i = 0; i < icount; i++) {
+ gs_destroy_candev(dev->canch[i]);
+ dev->canch[i] = NULL;
+ }
+ kfree(dev);
+ return rc;
+ }
+ dev->canch[i]->parent = dev;
+ }
+
+ return 0;
+}
+
+static void gs_usb_disconnect(struct usb_interface *intf)
+{
+ unsigned i;
+ struct gs_usb *dev = usb_get_intfdata(intf);
+ usb_set_intfdata(intf, NULL);
+
+ if (!dev) {
+ dev_err(&intf->dev, "Disconnect (nodata)\n");
+ return;
+ }
+
+ for (i = 0; i < GS_MAX_INTF; i++) {
+ struct gs_can *can = dev->canch[i];
+
+ if (!can)
+ continue;
+
+ gs_destroy_candev(can);
+ }
+
+ usb_kill_anchored_urbs(&dev->rx_submitted);
+}
+
+static const struct usb_device_id gs_usb_table[] = {
+ {USB_DEVICE(USB_GSUSB_1_VENDOR_ID, USB_GSUSB_1_PRODUCT_ID)},
+ {} /* Terminating entry */
+};
+
+MODULE_DEVICE_TABLE(usb, gs_usb_table);
+
+static struct usb_driver gs_usb_driver = {
+ .name = "gs_usb",
+ .probe = gs_usb_probe,
+ .disconnect = gs_usb_disconnect,
+ .id_table = gs_usb_table,
+};
+
+module_usb_driver(gs_usb_driver);
+
+MODULE_AUTHOR("Maximilian Schneider <mws@schneidersoft.net>");
+MODULE_DESCRIPTION(
+"Socket CAN device driver for Geschwister Schneider Technologie-, "
+"Entwicklungs- und Vertriebs UG. USB2.0 to CAN interfaces.");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/can/usb/kvaser_usb.c b/drivers/net/can/usb/kvaser_usb.c
index 4ca46edc061d..541fb7a05625 100644
--- a/drivers/net/can/usb/kvaser_usb.c
+++ b/drivers/net/can/usb/kvaser_usb.c
@@ -53,6 +53,8 @@
#define USB_OEM_MERCURY_PRODUCT_ID 34
#define USB_OEM_LEAF_PRODUCT_ID 35
#define USB_CAN_R_PRODUCT_ID 39
+#define USB_LEAF_LITE_V2_PRODUCT_ID 288
+#define USB_MINI_PCIE_HS_PRODUCT_ID 289
/* USB devices features */
#define KVASER_HAS_SILENT_MODE BIT(0)
@@ -356,6 +358,8 @@ static const struct usb_device_id kvaser_usb_table[] = {
.driver_info = KVASER_HAS_TXRX_ERRORS },
{ USB_DEVICE(KVASER_VENDOR_ID, USB_CAN_R_PRODUCT_ID),
.driver_info = KVASER_HAS_TXRX_ERRORS },
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_LITE_V2_PRODUCT_ID) },
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_MINI_PCIE_HS_PRODUCT_ID) },
{ }
};
MODULE_DEVICE_TABLE(usb, kvaser_usb_table);
@@ -379,38 +383,43 @@ static int kvaser_usb_wait_msg(const struct kvaser_usb *dev, u8 id,
void *buf;
int actual_len;
int err;
- int pos = 0;
+ int pos;
+ unsigned long to = jiffies + msecs_to_jiffies(USB_RECV_TIMEOUT);
buf = kzalloc(RX_BUFFER_SIZE, GFP_KERNEL);
if (!buf)
return -ENOMEM;
- err = usb_bulk_msg(dev->udev,
- usb_rcvbulkpipe(dev->udev,
- dev->bulk_in->bEndpointAddress),
- buf, RX_BUFFER_SIZE, &actual_len,
- USB_RECV_TIMEOUT);
- if (err < 0)
- goto end;
+ do {
+ err = usb_bulk_msg(dev->udev,
+ usb_rcvbulkpipe(dev->udev,
+ dev->bulk_in->bEndpointAddress),
+ buf, RX_BUFFER_SIZE, &actual_len,
+ USB_RECV_TIMEOUT);
+ if (err < 0)
+ goto end;
- while (pos <= actual_len - MSG_HEADER_LEN) {
- tmp = buf + pos;
+ pos = 0;
+ while (pos <= actual_len - MSG_HEADER_LEN) {
+ tmp = buf + pos;
- if (!tmp->len)
- break;
+ if (!tmp->len)
+ break;
- if (pos + tmp->len > actual_len) {
- dev_err(dev->udev->dev.parent, "Format error\n");
- break;
- }
+ if (pos + tmp->len > actual_len) {
+ dev_err(dev->udev->dev.parent,
+ "Format error\n");
+ break;
+ }
- if (tmp->id == id) {
- memcpy(msg, tmp, tmp->len);
- goto end;
- }
+ if (tmp->id == id) {
+ memcpy(msg, tmp, tmp->len);
+ goto end;
+ }
- pos += tmp->len;
- }
+ pos += tmp->len;
+ }
+ } while (time_before(jiffies, to));
err = -EINVAL;
diff --git a/drivers/net/can/xilinx_can.c b/drivers/net/can/xilinx_can.c
new file mode 100644
index 000000000000..5e8b5609c067
--- /dev/null
+++ b/drivers/net/can/xilinx_can.c
@@ -0,0 +1,1208 @@
+/* Xilinx CAN device driver
+ *
+ * Copyright (C) 2012 - 2014 Xilinx, Inc.
+ * Copyright (C) 2009 PetaLogix. All rights reserved.
+ *
+ * Description:
+ * This driver is developed for Axi CAN IP and for Zynq CANPS Controller.
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/skbuff.h>
+#include <linux/string.h>
+#include <linux/types.h>
+#include <linux/can/dev.h>
+#include <linux/can/error.h>
+#include <linux/can/led.h>
+
+#define DRIVER_NAME "xilinx_can"
+
+/* CAN registers set */
+enum xcan_reg {
+ XCAN_SRR_OFFSET = 0x00, /* Software reset */
+ XCAN_MSR_OFFSET = 0x04, /* Mode select */
+ XCAN_BRPR_OFFSET = 0x08, /* Baud rate prescaler */
+ XCAN_BTR_OFFSET = 0x0C, /* Bit timing */
+ XCAN_ECR_OFFSET = 0x10, /* Error counter */
+ XCAN_ESR_OFFSET = 0x14, /* Error status */
+ XCAN_SR_OFFSET = 0x18, /* Status */
+ XCAN_ISR_OFFSET = 0x1C, /* Interrupt status */
+ XCAN_IER_OFFSET = 0x20, /* Interrupt enable */
+ XCAN_ICR_OFFSET = 0x24, /* Interrupt clear */
+ XCAN_TXFIFO_ID_OFFSET = 0x30,/* TX FIFO ID */
+ XCAN_TXFIFO_DLC_OFFSET = 0x34, /* TX FIFO DLC */
+ XCAN_TXFIFO_DW1_OFFSET = 0x38, /* TX FIFO Data Word 1 */
+ XCAN_TXFIFO_DW2_OFFSET = 0x3C, /* TX FIFO Data Word 2 */
+ XCAN_RXFIFO_ID_OFFSET = 0x50, /* RX FIFO ID */
+ XCAN_RXFIFO_DLC_OFFSET = 0x54, /* RX FIFO DLC */
+ XCAN_RXFIFO_DW1_OFFSET = 0x58, /* RX FIFO Data Word 1 */
+ XCAN_RXFIFO_DW2_OFFSET = 0x5C, /* RX FIFO Data Word 2 */
+};
+
+/* CAN register bit masks - XCAN_<REG>_<BIT>_MASK */
+#define XCAN_SRR_CEN_MASK 0x00000002 /* CAN enable */
+#define XCAN_SRR_RESET_MASK 0x00000001 /* Soft Reset the CAN core */
+#define XCAN_MSR_LBACK_MASK 0x00000002 /* Loop back mode select */
+#define XCAN_MSR_SLEEP_MASK 0x00000001 /* Sleep mode select */
+#define XCAN_BRPR_BRP_MASK 0x000000FF /* Baud rate prescaler */
+#define XCAN_BTR_SJW_MASK 0x00000180 /* Synchronous jump width */
+#define XCAN_BTR_TS2_MASK 0x00000070 /* Time segment 2 */
+#define XCAN_BTR_TS1_MASK 0x0000000F /* Time segment 1 */
+#define XCAN_ECR_REC_MASK 0x0000FF00 /* Receive error counter */
+#define XCAN_ECR_TEC_MASK 0x000000FF /* Transmit error counter */
+#define XCAN_ESR_ACKER_MASK 0x00000010 /* ACK error */
+#define XCAN_ESR_BERR_MASK 0x00000008 /* Bit error */
+#define XCAN_ESR_STER_MASK 0x00000004 /* Stuff error */
+#define XCAN_ESR_FMER_MASK 0x00000002 /* Form error */
+#define XCAN_ESR_CRCER_MASK 0x00000001 /* CRC error */
+#define XCAN_SR_TXFLL_MASK 0x00000400 /* TX FIFO is full */
+#define XCAN_SR_ESTAT_MASK 0x00000180 /* Error status */
+#define XCAN_SR_ERRWRN_MASK 0x00000040 /* Error warning */
+#define XCAN_SR_NORMAL_MASK 0x00000008 /* Normal mode */
+#define XCAN_SR_LBACK_MASK 0x00000002 /* Loop back mode */
+#define XCAN_SR_CONFIG_MASK 0x00000001 /* Configuration mode */
+#define XCAN_IXR_TXFEMP_MASK 0x00004000 /* TX FIFO Empty */
+#define XCAN_IXR_WKUP_MASK 0x00000800 /* Wake up interrupt */
+#define XCAN_IXR_SLP_MASK 0x00000400 /* Sleep interrupt */
+#define XCAN_IXR_BSOFF_MASK 0x00000200 /* Bus off interrupt */
+#define XCAN_IXR_ERROR_MASK 0x00000100 /* Error interrupt */
+#define XCAN_IXR_RXNEMP_MASK 0x00000080 /* RX FIFO NotEmpty intr */
+#define XCAN_IXR_RXOFLW_MASK 0x00000040 /* RX FIFO Overflow intr */
+#define XCAN_IXR_RXOK_MASK 0x00000010 /* Message received intr */
+#define XCAN_IXR_TXFLL_MASK 0x00000004 /* Tx FIFO Full intr */
+#define XCAN_IXR_TXOK_MASK 0x00000002 /* TX successful intr */
+#define XCAN_IXR_ARBLST_MASK 0x00000001 /* Arbitration lost intr */
+#define XCAN_IDR_ID1_MASK 0xFFE00000 /* Standard msg identifier */
+#define XCAN_IDR_SRR_MASK 0x00100000 /* Substitute remote TXreq */
+#define XCAN_IDR_IDE_MASK 0x00080000 /* Identifier extension */
+#define XCAN_IDR_ID2_MASK 0x0007FFFE /* Extended message ident */
+#define XCAN_IDR_RTR_MASK 0x00000001 /* Remote TX request */
+#define XCAN_DLCR_DLC_MASK 0xF0000000 /* Data length code */
+
+#define XCAN_INTR_ALL (XCAN_IXR_TXOK_MASK | XCAN_IXR_BSOFF_MASK |\
+ XCAN_IXR_WKUP_MASK | XCAN_IXR_SLP_MASK | \
+ XCAN_IXR_RXNEMP_MASK | XCAN_IXR_ERROR_MASK | \
+ XCAN_IXR_ARBLST_MASK | XCAN_IXR_RXOK_MASK)
+
+/* CAN register bit shift - XCAN_<REG>_<BIT>_SHIFT */
+#define XCAN_BTR_SJW_SHIFT 7 /* Synchronous jump width */
+#define XCAN_BTR_TS2_SHIFT 4 /* Time segment 2 */
+#define XCAN_IDR_ID1_SHIFT 21 /* Standard Messg Identifier */
+#define XCAN_IDR_ID2_SHIFT 1 /* Extended Message Identifier */
+#define XCAN_DLCR_DLC_SHIFT 28 /* Data length code */
+#define XCAN_ESR_REC_SHIFT 8 /* Rx Error Count */
+
+/* CAN frame length constants */
+#define XCAN_FRAME_MAX_DATA_LEN 8
+#define XCAN_TIMEOUT (1 * HZ)
+
+/**
+ * struct xcan_priv - This definition define CAN driver instance
+ * @can: CAN private data structure.
+ * @tx_head: Tx CAN packets ready to send on the queue
+ * @tx_tail: Tx CAN packets successfully sended on the queue
+ * @tx_max: Maximum number packets the driver can send
+ * @napi: NAPI structure
+ * @read_reg: For reading data from CAN registers
+ * @write_reg: For writing data to CAN registers
+ * @dev: Network device data structure
+ * @reg_base: Ioremapped address to registers
+ * @irq_flags: For request_irq()
+ * @bus_clk: Pointer to struct clk
+ * @can_clk: Pointer to struct clk
+ */
+struct xcan_priv {
+ struct can_priv can;
+ unsigned int tx_head;
+ unsigned int tx_tail;
+ unsigned int tx_max;
+ struct napi_struct napi;
+ u32 (*read_reg)(const struct xcan_priv *priv, enum xcan_reg reg);
+ void (*write_reg)(const struct xcan_priv *priv, enum xcan_reg reg,
+ u32 val);
+ struct net_device *dev;
+ void __iomem *reg_base;
+ unsigned long irq_flags;
+ struct clk *bus_clk;
+ struct clk *can_clk;
+};
+
+/* CAN Bittiming constants as per Xilinx CAN specs */
+static const struct can_bittiming_const xcan_bittiming_const = {
+ .name = DRIVER_NAME,
+ .tseg1_min = 1,
+ .tseg1_max = 16,
+ .tseg2_min = 1,
+ .tseg2_max = 8,
+ .sjw_max = 4,
+ .brp_min = 1,
+ .brp_max = 256,
+ .brp_inc = 1,
+};
+
+/**
+ * xcan_write_reg_le - Write a value to the device register little endian
+ * @priv: Driver private data structure
+ * @reg: Register offset
+ * @val: Value to write at the Register offset
+ *
+ * Write data to the paricular CAN register
+ */
+static void xcan_write_reg_le(const struct xcan_priv *priv, enum xcan_reg reg,
+ u32 val)
+{
+ iowrite32(val, priv->reg_base + reg);
+}
+
+/**
+ * xcan_read_reg_le - Read a value from the device register little endian
+ * @priv: Driver private data structure
+ * @reg: Register offset
+ *
+ * Read data from the particular CAN register
+ * Return: value read from the CAN register
+ */
+static u32 xcan_read_reg_le(const struct xcan_priv *priv, enum xcan_reg reg)
+{
+ return ioread32(priv->reg_base + reg);
+}
+
+/**
+ * xcan_write_reg_be - Write a value to the device register big endian
+ * @priv: Driver private data structure
+ * @reg: Register offset
+ * @val: Value to write at the Register offset
+ *
+ * Write data to the paricular CAN register
+ */
+static void xcan_write_reg_be(const struct xcan_priv *priv, enum xcan_reg reg,
+ u32 val)
+{
+ iowrite32be(val, priv->reg_base + reg);
+}
+
+/**
+ * xcan_read_reg_be - Read a value from the device register big endian
+ * @priv: Driver private data structure
+ * @reg: Register offset
+ *
+ * Read data from the particular CAN register
+ * Return: value read from the CAN register
+ */
+static u32 xcan_read_reg_be(const struct xcan_priv *priv, enum xcan_reg reg)
+{
+ return ioread32be(priv->reg_base + reg);
+}
+
+/**
+ * set_reset_mode - Resets the CAN device mode
+ * @ndev: Pointer to net_device structure
+ *
+ * This is the driver reset mode routine.The driver
+ * enters into configuration mode.
+ *
+ * Return: 0 on success and failure value on error
+ */
+static int set_reset_mode(struct net_device *ndev)
+{
+ struct xcan_priv *priv = netdev_priv(ndev);
+ unsigned long timeout;
+
+ priv->write_reg(priv, XCAN_SRR_OFFSET, XCAN_SRR_RESET_MASK);
+
+ timeout = jiffies + XCAN_TIMEOUT;
+ while (!(priv->read_reg(priv, XCAN_SR_OFFSET) & XCAN_SR_CONFIG_MASK)) {
+ if (time_after(jiffies, timeout)) {
+ netdev_warn(ndev, "timed out for config mode\n");
+ return -ETIMEDOUT;
+ }
+ usleep_range(500, 10000);
+ }
+
+ return 0;
+}
+
+/**
+ * xcan_set_bittiming - CAN set bit timing routine
+ * @ndev: Pointer to net_device structure
+ *
+ * This is the driver set bittiming routine.
+ * Return: 0 on success and failure value on error
+ */
+static int xcan_set_bittiming(struct net_device *ndev)
+{
+ struct xcan_priv *priv = netdev_priv(ndev);
+ struct can_bittiming *bt = &priv->can.bittiming;
+ u32 btr0, btr1;
+ u32 is_config_mode;
+
+ /* Check whether Xilinx CAN is in configuration mode.
+ * It cannot set bit timing if Xilinx CAN is not in configuration mode.
+ */
+ is_config_mode = priv->read_reg(priv, XCAN_SR_OFFSET) &
+ XCAN_SR_CONFIG_MASK;
+ if (!is_config_mode) {
+ netdev_alert(ndev,
+ "BUG! Cannot set bittiming - CAN is not in config mode\n");
+ return -EPERM;
+ }
+
+ /* Setting Baud Rate prescalar value in BRPR Register */
+ btr0 = (bt->brp - 1);
+
+ /* Setting Time Segment 1 in BTR Register */
+ btr1 = (bt->prop_seg + bt->phase_seg1 - 1);
+
+ /* Setting Time Segment 2 in BTR Register */
+ btr1 |= (bt->phase_seg2 - 1) << XCAN_BTR_TS2_SHIFT;
+
+ /* Setting Synchronous jump width in BTR Register */
+ btr1 |= (bt->sjw - 1) << XCAN_BTR_SJW_SHIFT;
+
+ priv->write_reg(priv, XCAN_BRPR_OFFSET, btr0);
+ priv->write_reg(priv, XCAN_BTR_OFFSET, btr1);
+
+ netdev_dbg(ndev, "BRPR=0x%08x, BTR=0x%08x\n",
+ priv->read_reg(priv, XCAN_BRPR_OFFSET),
+ priv->read_reg(priv, XCAN_BTR_OFFSET));
+
+ return 0;
+}
+
+/**
+ * xcan_chip_start - This the drivers start routine
+ * @ndev: Pointer to net_device structure
+ *
+ * This is the drivers start routine.
+ * Based on the State of the CAN device it puts
+ * the CAN device into a proper mode.
+ *
+ * Return: 0 on success and failure value on error
+ */
+static int xcan_chip_start(struct net_device *ndev)
+{
+ struct xcan_priv *priv = netdev_priv(ndev);
+ u32 err, reg_msr, reg_sr_mask;
+ unsigned long timeout;
+
+ /* Check if it is in reset mode */
+ err = set_reset_mode(ndev);
+ if (err < 0)
+ return err;
+
+ err = xcan_set_bittiming(ndev);
+ if (err < 0)
+ return err;
+
+ /* Enable interrupts */
+ priv->write_reg(priv, XCAN_IER_OFFSET, XCAN_INTR_ALL);
+
+ /* Check whether it is loopback mode or normal mode */
+ if (priv->can.ctrlmode & CAN_CTRLMODE_LOOPBACK) {
+ reg_msr = XCAN_MSR_LBACK_MASK;
+ reg_sr_mask = XCAN_SR_LBACK_MASK;
+ } else {
+ reg_msr = 0x0;
+ reg_sr_mask = XCAN_SR_NORMAL_MASK;
+ }
+
+ priv->write_reg(priv, XCAN_MSR_OFFSET, reg_msr);
+ priv->write_reg(priv, XCAN_SRR_OFFSET, XCAN_SRR_CEN_MASK);
+
+ timeout = jiffies + XCAN_TIMEOUT;
+ while (!(priv->read_reg(priv, XCAN_SR_OFFSET) & reg_sr_mask)) {
+ if (time_after(jiffies, timeout)) {
+ netdev_warn(ndev,
+ "timed out for correct mode\n");
+ return -ETIMEDOUT;
+ }
+ }
+ netdev_dbg(ndev, "status:#x%08x\n",
+ priv->read_reg(priv, XCAN_SR_OFFSET));
+
+ priv->can.state = CAN_STATE_ERROR_ACTIVE;
+ return 0;
+}
+
+/**
+ * xcan_do_set_mode - This sets the mode of the driver
+ * @ndev: Pointer to net_device structure
+ * @mode: Tells the mode of the driver
+ *
+ * This check the drivers state and calls the
+ * the corresponding modes to set.
+ *
+ * Return: 0 on success and failure value on error
+ */
+static int xcan_do_set_mode(struct net_device *ndev, enum can_mode mode)
+{
+ int ret;
+
+ switch (mode) {
+ case CAN_MODE_START:
+ ret = xcan_chip_start(ndev);
+ if (ret < 0) {
+ netdev_err(ndev, "xcan_chip_start failed!\n");
+ return ret;
+ }
+ netif_wake_queue(ndev);
+ break;
+ default:
+ ret = -EOPNOTSUPP;
+ break;
+ }
+
+ return ret;
+}
+
+/**
+ * xcan_start_xmit - Starts the transmission
+ * @skb: sk_buff pointer that contains data to be Txed
+ * @ndev: Pointer to net_device structure
+ *
+ * This function is invoked from upper layers to initiate transmission. This
+ * function uses the next available free txbuff and populates their fields to
+ * start the transmission.
+ *
+ * Return: 0 on success and failure value on error
+ */
+static int xcan_start_xmit(struct sk_buff *skb, struct net_device *ndev)
+{
+ struct xcan_priv *priv = netdev_priv(ndev);
+ struct net_device_stats *stats = &ndev->stats;
+ struct can_frame *cf = (struct can_frame *)skb->data;
+ u32 id, dlc, data[2] = {0, 0};
+
+ if (can_dropped_invalid_skb(ndev, skb))
+ return NETDEV_TX_OK;
+
+ /* Check if the TX buffer is full */
+ if (unlikely(priv->read_reg(priv, XCAN_SR_OFFSET) &
+ XCAN_SR_TXFLL_MASK)) {
+ netif_stop_queue(ndev);
+ netdev_err(ndev, "BUG!, TX FIFO full when queue awake!\n");
+ return NETDEV_TX_BUSY;
+ }
+
+ /* Watch carefully on the bit sequence */
+ if (cf->can_id & CAN_EFF_FLAG) {
+ /* Extended CAN ID format */
+ id = ((cf->can_id & CAN_EFF_MASK) << XCAN_IDR_ID2_SHIFT) &
+ XCAN_IDR_ID2_MASK;
+ id |= (((cf->can_id & CAN_EFF_MASK) >>
+ (CAN_EFF_ID_BITS-CAN_SFF_ID_BITS)) <<
+ XCAN_IDR_ID1_SHIFT) & XCAN_IDR_ID1_MASK;
+
+ /* The substibute remote TX request bit should be "1"
+ * for extended frames as in the Xilinx CAN datasheet
+ */
+ id |= XCAN_IDR_IDE_MASK | XCAN_IDR_SRR_MASK;
+
+ if (cf->can_id & CAN_RTR_FLAG)
+ /* Extended frames remote TX request */
+ id |= XCAN_IDR_RTR_MASK;
+ } else {
+ /* Standard CAN ID format */
+ id = ((cf->can_id & CAN_SFF_MASK) << XCAN_IDR_ID1_SHIFT) &
+ XCAN_IDR_ID1_MASK;
+
+ if (cf->can_id & CAN_RTR_FLAG)
+ /* Standard frames remote TX request */
+ id |= XCAN_IDR_SRR_MASK;
+ }
+
+ dlc = cf->can_dlc << XCAN_DLCR_DLC_SHIFT;
+
+ if (cf->can_dlc > 0)
+ data[0] = be32_to_cpup((__be32 *)(cf->data + 0));
+ if (cf->can_dlc > 4)
+ data[1] = be32_to_cpup((__be32 *)(cf->data + 4));
+
+ can_put_echo_skb(skb, ndev, priv->tx_head % priv->tx_max);
+ priv->tx_head++;
+
+ /* Write the Frame to Xilinx CAN TX FIFO */
+ priv->write_reg(priv, XCAN_TXFIFO_ID_OFFSET, id);
+ /* If the CAN frame is RTR frame this write triggers tranmission */
+ priv->write_reg(priv, XCAN_TXFIFO_DLC_OFFSET, dlc);
+ if (!(cf->can_id & CAN_RTR_FLAG)) {
+ priv->write_reg(priv, XCAN_TXFIFO_DW1_OFFSET, data[0]);
+ /* If the CAN frame is Standard/Extended frame this
+ * write triggers tranmission
+ */
+ priv->write_reg(priv, XCAN_TXFIFO_DW2_OFFSET, data[1]);
+ stats->tx_bytes += cf->can_dlc;
+ }
+
+ /* Check if the TX buffer is full */
+ if ((priv->tx_head - priv->tx_tail) == priv->tx_max)
+ netif_stop_queue(ndev);
+
+ return NETDEV_TX_OK;
+}
+
+/**
+ * xcan_rx - Is called from CAN isr to complete the received
+ * frame processing
+ * @ndev: Pointer to net_device structure
+ *
+ * This function is invoked from the CAN isr(poll) to process the Rx frames. It
+ * does minimal processing and invokes "netif_receive_skb" to complete further
+ * processing.
+ * Return: 1 on success and 0 on failure.
+ */
+static int xcan_rx(struct net_device *ndev)
+{
+ struct xcan_priv *priv = netdev_priv(ndev);
+ struct net_device_stats *stats = &ndev->stats;
+ struct can_frame *cf;
+ struct sk_buff *skb;
+ u32 id_xcan, dlc, data[2] = {0, 0};
+
+ skb = alloc_can_skb(ndev, &cf);
+ if (unlikely(!skb)) {
+ stats->rx_dropped++;
+ return 0;
+ }
+
+ /* Read a frame from Xilinx zynq CANPS */
+ id_xcan = priv->read_reg(priv, XCAN_RXFIFO_ID_OFFSET);
+ dlc = priv->read_reg(priv, XCAN_RXFIFO_DLC_OFFSET) >>
+ XCAN_DLCR_DLC_SHIFT;
+
+ /* Change Xilinx CAN data length format to socketCAN data format */
+ cf->can_dlc = get_can_dlc(dlc);
+
+ /* Change Xilinx CAN ID format to socketCAN ID format */
+ if (id_xcan & XCAN_IDR_IDE_MASK) {
+ /* The received frame is an Extended format frame */
+ cf->can_id = (id_xcan & XCAN_IDR_ID1_MASK) >> 3;
+ cf->can_id |= (id_xcan & XCAN_IDR_ID2_MASK) >>
+ XCAN_IDR_ID2_SHIFT;
+ cf->can_id |= CAN_EFF_FLAG;
+ if (id_xcan & XCAN_IDR_RTR_MASK)
+ cf->can_id |= CAN_RTR_FLAG;
+ } else {
+ /* The received frame is a standard format frame */
+ cf->can_id = (id_xcan & XCAN_IDR_ID1_MASK) >>
+ XCAN_IDR_ID1_SHIFT;
+ if (id_xcan & XCAN_IDR_SRR_MASK)
+ cf->can_id |= CAN_RTR_FLAG;
+ }
+
+ if (!(id_xcan & XCAN_IDR_SRR_MASK)) {
+ data[0] = priv->read_reg(priv, XCAN_RXFIFO_DW1_OFFSET);
+ data[1] = priv->read_reg(priv, XCAN_RXFIFO_DW2_OFFSET);
+
+ /* Change Xilinx CAN data format to socketCAN data format */
+ if (cf->can_dlc > 0)
+ *(__be32 *)(cf->data) = cpu_to_be32(data[0]);
+ if (cf->can_dlc > 4)
+ *(__be32 *)(cf->data + 4) = cpu_to_be32(data[1]);
+ }
+
+ stats->rx_bytes += cf->can_dlc;
+ stats->rx_packets++;
+ netif_receive_skb(skb);
+
+ return 1;
+}
+
+/**
+ * xcan_err_interrupt - error frame Isr
+ * @ndev: net_device pointer
+ * @isr: interrupt status register value
+ *
+ * This is the CAN error interrupt and it will
+ * check the the type of error and forward the error
+ * frame to upper layers.
+ */
+static void xcan_err_interrupt(struct net_device *ndev, u32 isr)
+{
+ struct xcan_priv *priv = netdev_priv(ndev);
+ struct net_device_stats *stats = &ndev->stats;
+ struct can_frame *cf;
+ struct sk_buff *skb;
+ u32 err_status, status, txerr = 0, rxerr = 0;
+
+ skb = alloc_can_err_skb(ndev, &cf);
+
+ err_status = priv->read_reg(priv, XCAN_ESR_OFFSET);
+ priv->write_reg(priv, XCAN_ESR_OFFSET, err_status);
+ txerr = priv->read_reg(priv, XCAN_ECR_OFFSET) & XCAN_ECR_TEC_MASK;
+ rxerr = ((priv->read_reg(priv, XCAN_ECR_OFFSET) &
+ XCAN_ECR_REC_MASK) >> XCAN_ESR_REC_SHIFT);
+ status = priv->read_reg(priv, XCAN_SR_OFFSET);
+
+ if (isr & XCAN_IXR_BSOFF_MASK) {
+ priv->can.state = CAN_STATE_BUS_OFF;
+ priv->can.can_stats.bus_off++;
+ /* Leave device in Config Mode in bus-off state */
+ priv->write_reg(priv, XCAN_SRR_OFFSET, XCAN_SRR_RESET_MASK);
+ can_bus_off(ndev);
+ if (skb)
+ cf->can_id |= CAN_ERR_BUSOFF;
+ } else if ((status & XCAN_SR_ESTAT_MASK) == XCAN_SR_ESTAT_MASK) {
+ priv->can.state = CAN_STATE_ERROR_PASSIVE;
+ priv->can.can_stats.error_passive++;
+ if (skb) {
+ cf->can_id |= CAN_ERR_CRTL;
+ cf->data[1] = (rxerr > 127) ?
+ CAN_ERR_CRTL_RX_PASSIVE :
+ CAN_ERR_CRTL_TX_PASSIVE;
+ cf->data[6] = txerr;
+ cf->data[7] = rxerr;
+ }
+ } else if (status & XCAN_SR_ERRWRN_MASK) {
+ priv->can.state = CAN_STATE_ERROR_WARNING;
+ priv->can.can_stats.error_warning++;
+ if (skb) {
+ cf->can_id |= CAN_ERR_CRTL;
+ cf->data[1] |= (txerr > rxerr) ?
+ CAN_ERR_CRTL_TX_WARNING :
+ CAN_ERR_CRTL_RX_WARNING;
+ cf->data[6] = txerr;
+ cf->data[7] = rxerr;
+ }
+ }
+
+ /* Check for Arbitration lost interrupt */
+ if (isr & XCAN_IXR_ARBLST_MASK) {
+ priv->can.can_stats.arbitration_lost++;
+ if (skb) {
+ cf->can_id |= CAN_ERR_LOSTARB;
+ cf->data[0] = CAN_ERR_LOSTARB_UNSPEC;
+ }
+ }
+
+ /* Check for RX FIFO Overflow interrupt */
+ if (isr & XCAN_IXR_RXOFLW_MASK) {
+ stats->rx_over_errors++;
+ stats->rx_errors++;
+ priv->write_reg(priv, XCAN_SRR_OFFSET, XCAN_SRR_RESET_MASK);
+ if (skb) {
+ cf->can_id |= CAN_ERR_CRTL;
+ cf->data[1] |= CAN_ERR_CRTL_RX_OVERFLOW;
+ }
+ }
+
+ /* Check for error interrupt */
+ if (isr & XCAN_IXR_ERROR_MASK) {
+ if (skb) {
+ cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR;
+ cf->data[2] |= CAN_ERR_PROT_UNSPEC;
+ }
+
+ /* Check for Ack error interrupt */
+ if (err_status & XCAN_ESR_ACKER_MASK) {
+ stats->tx_errors++;
+ if (skb) {
+ cf->can_id |= CAN_ERR_ACK;
+ cf->data[3] |= CAN_ERR_PROT_LOC_ACK;
+ }
+ }
+
+ /* Check for Bit error interrupt */
+ if (err_status & XCAN_ESR_BERR_MASK) {
+ stats->tx_errors++;
+ if (skb) {
+ cf->can_id |= CAN_ERR_PROT;
+ cf->data[2] = CAN_ERR_PROT_BIT;
+ }
+ }
+
+ /* Check for Stuff error interrupt */
+ if (err_status & XCAN_ESR_STER_MASK) {
+ stats->rx_errors++;
+ if (skb) {
+ cf->can_id |= CAN_ERR_PROT;
+ cf->data[2] = CAN_ERR_PROT_STUFF;
+ }
+ }
+
+ /* Check for Form error interrupt */
+ if (err_status & XCAN_ESR_FMER_MASK) {
+ stats->rx_errors++;
+ if (skb) {
+ cf->can_id |= CAN_ERR_PROT;
+ cf->data[2] = CAN_ERR_PROT_FORM;
+ }
+ }
+
+ /* Check for CRC error interrupt */
+ if (err_status & XCAN_ESR_CRCER_MASK) {
+ stats->rx_errors++;
+ if (skb) {
+ cf->can_id |= CAN_ERR_PROT;
+ cf->data[3] = CAN_ERR_PROT_LOC_CRC_SEQ |
+ CAN_ERR_PROT_LOC_CRC_DEL;
+ }
+ }
+ priv->can.can_stats.bus_error++;
+ }
+
+ if (skb) {
+ stats->rx_packets++;
+ stats->rx_bytes += cf->can_dlc;
+ netif_rx(skb);
+ }
+
+ netdev_dbg(ndev, "%s: error status register:0x%x\n",
+ __func__, priv->read_reg(priv, XCAN_ESR_OFFSET));
+}
+
+/**
+ * xcan_state_interrupt - It will check the state of the CAN device
+ * @ndev: net_device pointer
+ * @isr: interrupt status register value
+ *
+ * This will checks the state of the CAN device
+ * and puts the device into appropriate state.
+ */
+static void xcan_state_interrupt(struct net_device *ndev, u32 isr)
+{
+ struct xcan_priv *priv = netdev_priv(ndev);
+
+ /* Check for Sleep interrupt if set put CAN device in sleep state */
+ if (isr & XCAN_IXR_SLP_MASK)
+ priv->can.state = CAN_STATE_SLEEPING;
+
+ /* Check for Wake up interrupt if set put CAN device in Active state */
+ if (isr & XCAN_IXR_WKUP_MASK)
+ priv->can.state = CAN_STATE_ERROR_ACTIVE;
+}
+
+/**
+ * xcan_rx_poll - Poll routine for rx packets (NAPI)
+ * @napi: napi structure pointer
+ * @quota: Max number of rx packets to be processed.
+ *
+ * This is the poll routine for rx part.
+ * It will process the packets maximux quota value.
+ *
+ * Return: number of packets received
+ */
+static int xcan_rx_poll(struct napi_struct *napi, int quota)
+{
+ struct net_device *ndev = napi->dev;
+ struct xcan_priv *priv = netdev_priv(ndev);
+ u32 isr, ier;
+ int work_done = 0;
+
+ isr = priv->read_reg(priv, XCAN_ISR_OFFSET);
+ while ((isr & XCAN_IXR_RXNEMP_MASK) && (work_done < quota)) {
+ if (isr & XCAN_IXR_RXOK_MASK) {
+ priv->write_reg(priv, XCAN_ICR_OFFSET,
+ XCAN_IXR_RXOK_MASK);
+ work_done += xcan_rx(ndev);
+ } else {
+ priv->write_reg(priv, XCAN_ICR_OFFSET,
+ XCAN_IXR_RXNEMP_MASK);
+ break;
+ }
+ priv->write_reg(priv, XCAN_ICR_OFFSET, XCAN_IXR_RXNEMP_MASK);
+ isr = priv->read_reg(priv, XCAN_ISR_OFFSET);
+ }
+
+ if (work_done)
+ can_led_event(ndev, CAN_LED_EVENT_RX);
+
+ if (work_done < quota) {
+ napi_complete(napi);
+ ier = priv->read_reg(priv, XCAN_IER_OFFSET);
+ ier |= (XCAN_IXR_RXOK_MASK | XCAN_IXR_RXNEMP_MASK);
+ priv->write_reg(priv, XCAN_IER_OFFSET, ier);
+ }
+ return work_done;
+}
+
+/**
+ * xcan_tx_interrupt - Tx Done Isr
+ * @ndev: net_device pointer
+ * @isr: Interrupt status register value
+ */
+static void xcan_tx_interrupt(struct net_device *ndev, u32 isr)
+{
+ struct xcan_priv *priv = netdev_priv(ndev);
+ struct net_device_stats *stats = &ndev->stats;
+
+ while ((priv->tx_head - priv->tx_tail > 0) &&
+ (isr & XCAN_IXR_TXOK_MASK)) {
+ priv->write_reg(priv, XCAN_ICR_OFFSET, XCAN_IXR_TXOK_MASK);
+ can_get_echo_skb(ndev, priv->tx_tail %
+ priv->tx_max);
+ priv->tx_tail++;
+ stats->tx_packets++;
+ isr = priv->read_reg(priv, XCAN_ISR_OFFSET);
+ }
+ can_led_event(ndev, CAN_LED_EVENT_TX);
+ netif_wake_queue(ndev);
+}
+
+/**
+ * xcan_interrupt - CAN Isr
+ * @irq: irq number
+ * @dev_id: device id poniter
+ *
+ * This is the xilinx CAN Isr. It checks for the type of interrupt
+ * and invokes the corresponding ISR.
+ *
+ * Return:
+ * IRQ_NONE - If CAN device is in sleep mode, IRQ_HANDLED otherwise
+ */
+static irqreturn_t xcan_interrupt(int irq, void *dev_id)
+{
+ struct net_device *ndev = (struct net_device *)dev_id;
+ struct xcan_priv *priv = netdev_priv(ndev);
+ u32 isr, ier;
+
+ /* Get the interrupt status from Xilinx CAN */
+ isr = priv->read_reg(priv, XCAN_ISR_OFFSET);
+ if (!isr)
+ return IRQ_NONE;
+
+ /* Check for the type of interrupt and Processing it */
+ if (isr & (XCAN_IXR_SLP_MASK | XCAN_IXR_WKUP_MASK)) {
+ priv->write_reg(priv, XCAN_ICR_OFFSET, (XCAN_IXR_SLP_MASK |
+ XCAN_IXR_WKUP_MASK));
+ xcan_state_interrupt(ndev, isr);
+ }
+
+ /* Check for Tx interrupt and Processing it */
+ if (isr & XCAN_IXR_TXOK_MASK)
+ xcan_tx_interrupt(ndev, isr);
+
+ /* Check for the type of error interrupt and Processing it */
+ if (isr & (XCAN_IXR_ERROR_MASK | XCAN_IXR_RXOFLW_MASK |
+ XCAN_IXR_BSOFF_MASK | XCAN_IXR_ARBLST_MASK)) {
+ priv->write_reg(priv, XCAN_ICR_OFFSET, (XCAN_IXR_ERROR_MASK |
+ XCAN_IXR_RXOFLW_MASK | XCAN_IXR_BSOFF_MASK |
+ XCAN_IXR_ARBLST_MASK));
+ xcan_err_interrupt(ndev, isr);
+ }
+
+ /* Check for the type of receive interrupt and Processing it */
+ if (isr & (XCAN_IXR_RXNEMP_MASK | XCAN_IXR_RXOK_MASK)) {
+ ier = priv->read_reg(priv, XCAN_IER_OFFSET);
+ ier &= ~(XCAN_IXR_RXNEMP_MASK | XCAN_IXR_RXOK_MASK);
+ priv->write_reg(priv, XCAN_IER_OFFSET, ier);
+ napi_schedule(&priv->napi);
+ }
+ return IRQ_HANDLED;
+}
+
+/**
+ * xcan_chip_stop - Driver stop routine
+ * @ndev: Pointer to net_device structure
+ *
+ * This is the drivers stop routine. It will disable the
+ * interrupts and put the device into configuration mode.
+ */
+static void xcan_chip_stop(struct net_device *ndev)
+{
+ struct xcan_priv *priv = netdev_priv(ndev);
+ u32 ier;
+
+ /* Disable interrupts and leave the can in configuration mode */
+ ier = priv->read_reg(priv, XCAN_IER_OFFSET);
+ ier &= ~XCAN_INTR_ALL;
+ priv->write_reg(priv, XCAN_IER_OFFSET, ier);
+ priv->write_reg(priv, XCAN_SRR_OFFSET, XCAN_SRR_RESET_MASK);
+ priv->can.state = CAN_STATE_STOPPED;
+}
+
+/**
+ * xcan_open - Driver open routine
+ * @ndev: Pointer to net_device structure
+ *
+ * This is the driver open routine.
+ * Return: 0 on success and failure value on error
+ */
+static int xcan_open(struct net_device *ndev)
+{
+ struct xcan_priv *priv = netdev_priv(ndev);
+ int ret;
+
+ ret = request_irq(ndev->irq, xcan_interrupt, priv->irq_flags,
+ ndev->name, ndev);
+ if (ret < 0) {
+ netdev_err(ndev, "irq allocation for CAN failed\n");
+ goto err;
+ }
+
+ ret = clk_prepare_enable(priv->can_clk);
+ if (ret) {
+ netdev_err(ndev, "unable to enable device clock\n");
+ goto err_irq;
+ }
+
+ ret = clk_prepare_enable(priv->bus_clk);
+ if (ret) {
+ netdev_err(ndev, "unable to enable bus clock\n");
+ goto err_can_clk;
+ }
+
+ /* Set chip into reset mode */
+ ret = set_reset_mode(ndev);
+ if (ret < 0) {
+ netdev_err(ndev, "mode resetting failed!\n");
+ goto err_bus_clk;
+ }
+
+ /* Common open */
+ ret = open_candev(ndev);
+ if (ret)
+ goto err_bus_clk;
+
+ ret = xcan_chip_start(ndev);
+ if (ret < 0) {
+ netdev_err(ndev, "xcan_chip_start failed!\n");
+ goto err_candev;
+ }
+
+ can_led_event(ndev, CAN_LED_EVENT_OPEN);
+ napi_enable(&priv->napi);
+ netif_start_queue(ndev);
+
+ return 0;
+
+err_candev:
+ close_candev(ndev);
+err_bus_clk:
+ clk_disable_unprepare(priv->bus_clk);
+err_can_clk:
+ clk_disable_unprepare(priv->can_clk);
+err_irq:
+ free_irq(ndev->irq, ndev);
+err:
+ return ret;
+}
+
+/**
+ * xcan_close - Driver close routine
+ * @ndev: Pointer to net_device structure
+ *
+ * Return: 0 always
+ */
+static int xcan_close(struct net_device *ndev)
+{
+ struct xcan_priv *priv = netdev_priv(ndev);
+
+ netif_stop_queue(ndev);
+ napi_disable(&priv->napi);
+ xcan_chip_stop(ndev);
+ clk_disable_unprepare(priv->bus_clk);
+ clk_disable_unprepare(priv->can_clk);
+ free_irq(ndev->irq, ndev);
+ close_candev(ndev);
+
+ can_led_event(ndev, CAN_LED_EVENT_STOP);
+
+ return 0;
+}
+
+/**
+ * xcan_get_berr_counter - error counter routine
+ * @ndev: Pointer to net_device structure
+ * @bec: Pointer to can_berr_counter structure
+ *
+ * This is the driver error counter routine.
+ * Return: 0 on success and failure value on error
+ */
+static int xcan_get_berr_counter(const struct net_device *ndev,
+ struct can_berr_counter *bec)
+{
+ struct xcan_priv *priv = netdev_priv(ndev);
+ int ret;
+
+ ret = clk_prepare_enable(priv->can_clk);
+ if (ret)
+ goto err;
+
+ ret = clk_prepare_enable(priv->bus_clk);
+ if (ret)
+ goto err_clk;
+
+ bec->txerr = priv->read_reg(priv, XCAN_ECR_OFFSET) & XCAN_ECR_TEC_MASK;
+ bec->rxerr = ((priv->read_reg(priv, XCAN_ECR_OFFSET) &
+ XCAN_ECR_REC_MASK) >> XCAN_ESR_REC_SHIFT);
+
+ clk_disable_unprepare(priv->bus_clk);
+ clk_disable_unprepare(priv->can_clk);
+
+ return 0;
+
+err_clk:
+ clk_disable_unprepare(priv->can_clk);
+err:
+ return ret;
+}
+
+
+static const struct net_device_ops xcan_netdev_ops = {
+ .ndo_open = xcan_open,
+ .ndo_stop = xcan_close,
+ .ndo_start_xmit = xcan_start_xmit,
+};
+
+/**
+ * xcan_suspend - Suspend method for the driver
+ * @dev: Address of the platform_device structure
+ *
+ * Put the driver into low power mode.
+ * Return: 0 always
+ */
+static int __maybe_unused xcan_suspend(struct device *dev)
+{
+ struct platform_device *pdev = dev_get_drvdata(dev);
+ struct net_device *ndev = platform_get_drvdata(pdev);
+ struct xcan_priv *priv = netdev_priv(ndev);
+
+ if (netif_running(ndev)) {
+ netif_stop_queue(ndev);
+ netif_device_detach(ndev);
+ }
+
+ priv->write_reg(priv, XCAN_MSR_OFFSET, XCAN_MSR_SLEEP_MASK);
+ priv->can.state = CAN_STATE_SLEEPING;
+
+ clk_disable(priv->bus_clk);
+ clk_disable(priv->can_clk);
+
+ return 0;
+}
+
+/**
+ * xcan_resume - Resume from suspend
+ * @dev: Address of the platformdevice structure
+ *
+ * Resume operation after suspend.
+ * Return: 0 on success and failure value on error
+ */
+static int __maybe_unused xcan_resume(struct device *dev)
+{
+ struct platform_device *pdev = dev_get_drvdata(dev);
+ struct net_device *ndev = platform_get_drvdata(pdev);
+ struct xcan_priv *priv = netdev_priv(ndev);
+ int ret;
+
+ ret = clk_enable(priv->bus_clk);
+ if (ret) {
+ dev_err(dev, "Cannot enable clock.\n");
+ return ret;
+ }
+ ret = clk_enable(priv->can_clk);
+ if (ret) {
+ dev_err(dev, "Cannot enable clock.\n");
+ clk_disable_unprepare(priv->bus_clk);
+ return ret;
+ }
+
+ priv->write_reg(priv, XCAN_MSR_OFFSET, 0);
+ priv->write_reg(priv, XCAN_SRR_OFFSET, XCAN_SRR_CEN_MASK);
+ priv->can.state = CAN_STATE_ERROR_ACTIVE;
+
+ if (netif_running(ndev)) {
+ netif_device_attach(ndev);
+ netif_start_queue(ndev);
+ }
+
+ return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(xcan_dev_pm_ops, xcan_suspend, xcan_resume);
+
+/**
+ * xcan_probe - Platform registration call
+ * @pdev: Handle to the platform device structure
+ *
+ * This function does all the memory allocation and registration for the CAN
+ * device.
+ *
+ * Return: 0 on success and failure value on error
+ */
+static int xcan_probe(struct platform_device *pdev)
+{
+ struct resource *res; /* IO mem resources */
+ struct net_device *ndev;
+ struct xcan_priv *priv;
+ void __iomem *addr;
+ int ret, rx_max, tx_max;
+
+ /* Get the virtual base address for the device */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ addr = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(addr)) {
+ ret = PTR_ERR(addr);
+ goto err;
+ }
+
+ ret = of_property_read_u32(pdev->dev.of_node, "tx-fifo-depth", &tx_max);
+ if (ret < 0)
+ goto err;
+
+ ret = of_property_read_u32(pdev->dev.of_node, "rx-fifo-depth", &rx_max);
+ if (ret < 0)
+ goto err;
+
+ /* Create a CAN device instance */
+ ndev = alloc_candev(sizeof(struct xcan_priv), tx_max);
+ if (!ndev)
+ return -ENOMEM;
+
+ priv = netdev_priv(ndev);
+ priv->dev = ndev;
+ priv->can.bittiming_const = &xcan_bittiming_const;
+ priv->can.do_set_mode = xcan_do_set_mode;
+ priv->can.do_get_berr_counter = xcan_get_berr_counter;
+ priv->can.ctrlmode_supported = CAN_CTRLMODE_LOOPBACK |
+ CAN_CTRLMODE_BERR_REPORTING;
+ priv->reg_base = addr;
+ priv->tx_max = tx_max;
+
+ /* Get IRQ for the device */
+ ndev->irq = platform_get_irq(pdev, 0);
+ ndev->flags |= IFF_ECHO; /* We support local echo */
+
+ platform_set_drvdata(pdev, ndev);
+ SET_NETDEV_DEV(ndev, &pdev->dev);
+ ndev->netdev_ops = &xcan_netdev_ops;
+
+ /* Getting the CAN can_clk info */
+ priv->can_clk = devm_clk_get(&pdev->dev, "can_clk");
+ if (IS_ERR(priv->can_clk)) {
+ dev_err(&pdev->dev, "Device clock not found.\n");
+ ret = PTR_ERR(priv->can_clk);
+ goto err_free;
+ }
+ /* Check for type of CAN device */
+ if (of_device_is_compatible(pdev->dev.of_node,
+ "xlnx,zynq-can-1.0")) {
+ priv->bus_clk = devm_clk_get(&pdev->dev, "pclk");
+ if (IS_ERR(priv->bus_clk)) {
+ dev_err(&pdev->dev, "bus clock not found\n");
+ ret = PTR_ERR(priv->bus_clk);
+ goto err_free;
+ }
+ } else {
+ priv->bus_clk = devm_clk_get(&pdev->dev, "s_axi_aclk");
+ if (IS_ERR(priv->bus_clk)) {
+ dev_err(&pdev->dev, "bus clock not found\n");
+ ret = PTR_ERR(priv->bus_clk);
+ goto err_free;
+ }
+ }
+
+ ret = clk_prepare_enable(priv->can_clk);
+ if (ret) {
+ dev_err(&pdev->dev, "unable to enable device clock\n");
+ goto err_free;
+ }
+
+ ret = clk_prepare_enable(priv->bus_clk);
+ if (ret) {
+ dev_err(&pdev->dev, "unable to enable bus clock\n");
+ goto err_unprepare_disable_dev;
+ }
+
+ priv->write_reg = xcan_write_reg_le;
+ priv->read_reg = xcan_read_reg_le;
+
+ if (priv->read_reg(priv, XCAN_SR_OFFSET) != XCAN_SR_CONFIG_MASK) {
+ priv->write_reg = xcan_write_reg_be;
+ priv->read_reg = xcan_read_reg_be;
+ }
+
+ priv->can.clock.freq = clk_get_rate(priv->can_clk);
+
+ netif_napi_add(ndev, &priv->napi, xcan_rx_poll, rx_max);
+
+ ret = register_candev(ndev);
+ if (ret) {
+ dev_err(&pdev->dev, "fail to register failed (err=%d)\n", ret);
+ goto err_unprepare_disable_busclk;
+ }
+
+ devm_can_led_init(ndev);
+ clk_disable_unprepare(priv->bus_clk);
+ clk_disable_unprepare(priv->can_clk);
+ netdev_dbg(ndev, "reg_base=0x%p irq=%d clock=%d, tx fifo depth:%d\n",
+ priv->reg_base, ndev->irq, priv->can.clock.freq,
+ priv->tx_max);
+
+ return 0;
+
+err_unprepare_disable_busclk:
+ clk_disable_unprepare(priv->bus_clk);
+err_unprepare_disable_dev:
+ clk_disable_unprepare(priv->can_clk);
+err_free:
+ free_candev(ndev);
+err:
+ return ret;
+}
+
+/**
+ * xcan_remove - Unregister the device after releasing the resources
+ * @pdev: Handle to the platform device structure
+ *
+ * This function frees all the resources allocated to the device.
+ * Return: 0 always
+ */
+static int xcan_remove(struct platform_device *pdev)
+{
+ struct net_device *ndev = platform_get_drvdata(pdev);
+ struct xcan_priv *priv = netdev_priv(ndev);
+
+ if (set_reset_mode(ndev) < 0)
+ netdev_err(ndev, "mode resetting failed!\n");
+
+ unregister_candev(ndev);
+ netif_napi_del(&priv->napi);
+ free_candev(ndev);
+
+ return 0;
+}
+
+/* Match table for OF platform binding */
+static struct of_device_id xcan_of_match[] = {
+ { .compatible = "xlnx,zynq-can-1.0", },
+ { .compatible = "xlnx,axi-can-1.00.a", },
+ { /* end of list */ },
+};
+MODULE_DEVICE_TABLE(of, xcan_of_match);
+
+static struct platform_driver xcan_driver = {
+ .probe = xcan_probe,
+ .remove = xcan_remove,
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = DRIVER_NAME,
+ .pm = &xcan_dev_pm_ops,
+ .of_match_table = xcan_of_match,
+ },
+};
+
+module_platform_driver(xcan_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Xilinx Inc");
+MODULE_DESCRIPTION("Xilinx CAN interface");
diff --git a/drivers/net/dsa/mv88e6123_61_65.c b/drivers/net/dsa/mv88e6123_61_65.c
index 41ee5b6ae917..69c42513dd72 100644
--- a/drivers/net/dsa/mv88e6123_61_65.c
+++ b/drivers/net/dsa/mv88e6123_61_65.c
@@ -289,7 +289,7 @@ static int mv88e6123_61_65_setup_port(struct dsa_switch *ds, int p)
static int mv88e6123_61_65_setup(struct dsa_switch *ds)
{
- struct mv88e6xxx_priv_state *ps = (void *)(ds + 1);
+ struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
int i;
int ret;
diff --git a/drivers/net/dsa/mv88e6131.c b/drivers/net/dsa/mv88e6131.c
index dadfafba64e9..953bc6a49e59 100644
--- a/drivers/net/dsa/mv88e6131.c
+++ b/drivers/net/dsa/mv88e6131.c
@@ -155,7 +155,7 @@ static int mv88e6131_setup_global(struct dsa_switch *ds)
static int mv88e6131_setup_port(struct dsa_switch *ds, int p)
{
- struct mv88e6xxx_priv_state *ps = (void *)(ds + 1);
+ struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
int addr = REG_PORT(p);
u16 val;
@@ -274,7 +274,7 @@ static int mv88e6131_setup_port(struct dsa_switch *ds, int p)
static int mv88e6131_setup(struct dsa_switch *ds)
{
- struct mv88e6xxx_priv_state *ps = (void *)(ds + 1);
+ struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
int i;
int ret;
diff --git a/drivers/net/dsa/mv88e6xxx.c b/drivers/net/dsa/mv88e6xxx.c
index 17314ed9456d..9ce2146346b6 100644
--- a/drivers/net/dsa/mv88e6xxx.c
+++ b/drivers/net/dsa/mv88e6xxx.c
@@ -74,7 +74,7 @@ int __mv88e6xxx_reg_read(struct mii_bus *bus, int sw_addr, int addr, int reg)
int mv88e6xxx_reg_read(struct dsa_switch *ds, int addr, int reg)
{
- struct mv88e6xxx_priv_state *ps = (void *)(ds + 1);
+ struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
int ret;
mutex_lock(&ps->smi_mutex);
@@ -118,7 +118,7 @@ int __mv88e6xxx_reg_write(struct mii_bus *bus, int sw_addr, int addr,
int mv88e6xxx_reg_write(struct dsa_switch *ds, int addr, int reg, u16 val)
{
- struct mv88e6xxx_priv_state *ps = (void *)(ds + 1);
+ struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
int ret;
mutex_lock(&ps->smi_mutex);
@@ -256,7 +256,7 @@ static void mv88e6xxx_ppu_reenable_timer(unsigned long _ps)
static int mv88e6xxx_ppu_access_get(struct dsa_switch *ds)
{
- struct mv88e6xxx_priv_state *ps = (void *)(ds + 1);
+ struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
int ret;
mutex_lock(&ps->ppu_mutex);
@@ -283,7 +283,7 @@ static int mv88e6xxx_ppu_access_get(struct dsa_switch *ds)
static void mv88e6xxx_ppu_access_put(struct dsa_switch *ds)
{
- struct mv88e6xxx_priv_state *ps = (void *)(ds + 1);
+ struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
/* Schedule a timer to re-enable the PHY polling unit. */
mod_timer(&ps->ppu_timer, jiffies + msecs_to_jiffies(10));
@@ -292,7 +292,7 @@ static void mv88e6xxx_ppu_access_put(struct dsa_switch *ds)
void mv88e6xxx_ppu_state_init(struct dsa_switch *ds)
{
- struct mv88e6xxx_priv_state *ps = (void *)(ds + 1);
+ struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
mutex_init(&ps->ppu_mutex);
INIT_WORK(&ps->ppu_work, mv88e6xxx_ppu_reenable_work);
@@ -463,7 +463,7 @@ void mv88e6xxx_get_ethtool_stats(struct dsa_switch *ds,
int nr_stats, struct mv88e6xxx_hw_stat *stats,
int port, uint64_t *data)
{
- struct mv88e6xxx_priv_state *ps = (void *)(ds + 1);
+ struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
int ret;
int i;
diff --git a/drivers/net/ethernet/3com/3c509.c b/drivers/net/ethernet/3com/3c509.c
index 35df0b9e6848..a968654b631d 100644
--- a/drivers/net/ethernet/3com/3c509.c
+++ b/drivers/net/ethernet/3com/3c509.c
@@ -534,7 +534,7 @@ static int el3_common_init(struct net_device *dev)
/* The EL3-specific entries in the device structure. */
dev->netdev_ops = &netdev_ops;
dev->watchdog_timeo = TX_TIMEOUT;
- SET_ETHTOOL_OPS(dev, &ethtool_ops);
+ dev->ethtool_ops = &ethtool_ops;
err = register_netdev(dev);
if (err) {
diff --git a/drivers/net/ethernet/3com/3c589_cs.c b/drivers/net/ethernet/3com/3c589_cs.c
index 063557e037f2..f18647c23559 100644
--- a/drivers/net/ethernet/3com/3c589_cs.c
+++ b/drivers/net/ethernet/3com/3c589_cs.c
@@ -218,7 +218,7 @@ static int tc589_probe(struct pcmcia_device *link)
dev->netdev_ops = &el3_netdev_ops;
dev->watchdog_timeo = TX_TIMEOUT;
- SET_ETHTOOL_OPS(dev, &netdev_ethtool_ops);
+ dev->ethtool_ops = &netdev_ethtool_ops;
return tc589_config(link);
}
diff --git a/drivers/net/ethernet/3com/typhoon.c b/drivers/net/ethernet/3com/typhoon.c
index 465cc7108d8a..e13b04624ded 100644
--- a/drivers/net/ethernet/3com/typhoon.c
+++ b/drivers/net/ethernet/3com/typhoon.c
@@ -2435,7 +2435,7 @@ typhoon_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
netif_napi_add(dev, &tp->napi, typhoon_poll, 16);
dev->watchdog_timeo = TX_TIMEOUT;
- SET_ETHTOOL_OPS(dev, &typhoon_ethtool_ops);
+ dev->ethtool_ops = &typhoon_ethtool_ops;
/* We can handle scatter gather, up to 16 entries, and
* we can do IP checksumming (only version 4, doh...)
diff --git a/drivers/net/ethernet/8390/ax88796.c b/drivers/net/ethernet/8390/ax88796.c
index 455d4c399b52..1d162ccb4733 100644
--- a/drivers/net/ethernet/8390/ax88796.c
+++ b/drivers/net/ethernet/8390/ax88796.c
@@ -157,7 +157,7 @@ static void ax_reset_8390(struct net_device *dev)
/* This check _should_not_ be necessary, omit eventually. */
while ((ei_inb(addr + EN0_ISR) & ENISR_RESET) == 0) {
- if (jiffies - reset_start_time > 2 * HZ / 100) {
+ if (time_after(jiffies, reset_start_time + 2 * HZ / 100)) {
netdev_warn(dev, "%s: did not complete.\n", __func__);
break;
}
@@ -293,7 +293,7 @@ static void ax_block_output(struct net_device *dev, int count,
dma_start = jiffies;
while ((ei_inb(nic_base + EN0_ISR) & ENISR_RDC) == 0) {
- if (jiffies - dma_start > 2 * HZ / 100) { /* 20ms */
+ if (time_after(jiffies, dma_start + 2 * HZ / 100)) { /* 20ms */
netdev_warn(dev, "timeout waiting for Tx RDC.\n");
ax_reset_8390(dev);
ax_NS8390_init(dev, 1);
diff --git a/drivers/net/ethernet/Kconfig b/drivers/net/ethernet/Kconfig
index 051349458462..edb718661850 100644
--- a/drivers/net/ethernet/Kconfig
+++ b/drivers/net/ethernet/Kconfig
@@ -68,6 +68,7 @@ source "drivers/net/ethernet/neterion/Kconfig"
source "drivers/net/ethernet/faraday/Kconfig"
source "drivers/net/ethernet/freescale/Kconfig"
source "drivers/net/ethernet/fujitsu/Kconfig"
+source "drivers/net/ethernet/hisilicon/Kconfig"
source "drivers/net/ethernet/hp/Kconfig"
source "drivers/net/ethernet/ibm/Kconfig"
source "drivers/net/ethernet/intel/Kconfig"
diff --git a/drivers/net/ethernet/Makefile b/drivers/net/ethernet/Makefile
index 35190e36c456..58de3339ab3c 100644
--- a/drivers/net/ethernet/Makefile
+++ b/drivers/net/ethernet/Makefile
@@ -31,6 +31,7 @@ obj-$(CONFIG_NET_VENDOR_EXAR) += neterion/
obj-$(CONFIG_NET_VENDOR_FARADAY) += faraday/
obj-$(CONFIG_NET_VENDOR_FREESCALE) += freescale/
obj-$(CONFIG_NET_VENDOR_FUJITSU) += fujitsu/
+obj-$(CONFIG_NET_VENDOR_HISILICON) += hisilicon/
obj-$(CONFIG_NET_VENDOR_HP) += hp/
obj-$(CONFIG_NET_VENDOR_IBM) += ibm/
obj-$(CONFIG_NET_VENDOR_INTEL) += intel/
diff --git a/drivers/net/ethernet/adaptec/starfire.c b/drivers/net/ethernet/adaptec/starfire.c
index 171d73c1d3c2..40dbbf740331 100644
--- a/drivers/net/ethernet/adaptec/starfire.c
+++ b/drivers/net/ethernet/adaptec/starfire.c
@@ -784,7 +784,7 @@ static int starfire_init_one(struct pci_dev *pdev,
dev->netdev_ops = &netdev_ops;
dev->watchdog_timeo = TX_TIMEOUT;
- SET_ETHTOOL_OPS(dev, &ethtool_ops);
+ dev->ethtool_ops = &ethtool_ops;
netif_napi_add(dev, &np->napi, netdev_poll, max_interrupt_work);
diff --git a/drivers/net/ethernet/alteon/acenic.c b/drivers/net/ethernet/alteon/acenic.c
index 1517e9df5ba1..9a6991be9749 100644
--- a/drivers/net/ethernet/alteon/acenic.c
+++ b/drivers/net/ethernet/alteon/acenic.c
@@ -476,7 +476,7 @@ static int acenic_probe_one(struct pci_dev *pdev,
dev->watchdog_timeo = 5*HZ;
dev->netdev_ops = &ace_netdev_ops;
- SET_ETHTOOL_OPS(dev, &ace_ethtool_ops);
+ dev->ethtool_ops = &ace_ethtool_ops;
/* we only display this string ONCE */
if (!boards_found)
diff --git a/drivers/net/ethernet/altera/altera_sgdma.c b/drivers/net/ethernet/altera/altera_sgdma.c
index 99cc56f451cf..580553d42d34 100644
--- a/drivers/net/ethernet/altera/altera_sgdma.c
+++ b/drivers/net/ethernet/altera/altera_sgdma.c
@@ -353,7 +353,6 @@ static int sgdma_async_read(struct altera_tse_private *priv)
struct sgdma_descrip __iomem *cdesc = &descbase[0];
struct sgdma_descrip __iomem *ndesc = &descbase[1];
-
struct tse_buffer *rxbuffer = NULL;
if (!sgdma_rxbusy(priv)) {
diff --git a/drivers/net/ethernet/altera/altera_tse_ethtool.c b/drivers/net/ethernet/altera/altera_tse_ethtool.c
index 54c25eff7952..be72e1e64525 100644
--- a/drivers/net/ethernet/altera/altera_tse_ethtool.c
+++ b/drivers/net/ethernet/altera/altera_tse_ethtool.c
@@ -271,5 +271,5 @@ static const struct ethtool_ops tse_ethtool_ops = {
void altera_tse_set_ethtool_ops(struct net_device *netdev)
{
- SET_ETHTOOL_OPS(netdev, &tse_ethtool_ops);
+ netdev->ethtool_ops = &tse_ethtool_ops;
}
diff --git a/drivers/net/ethernet/amd/Kconfig b/drivers/net/ethernet/amd/Kconfig
index 562df46e0a82..bbaf36d9f5e1 100644
--- a/drivers/net/ethernet/amd/Kconfig
+++ b/drivers/net/ethernet/amd/Kconfig
@@ -7,7 +7,7 @@ config NET_VENDOR_AMD
default y
depends on DIO || MACH_DECSTATION || MVME147 || ATARI || SUN3 || \
SUN3X || SBUS || PCI || ZORRO || (ISA && ISA_DMA_API) || \
- (ARM && ARCH_EBSA110) || ISA || EISA || PCMCIA
+ (ARM && ARCH_EBSA110) || ISA || EISA || PCMCIA || ARM64
---help---
If you have a network (Ethernet) chipset belonging to this class,
say Y.
@@ -177,4 +177,16 @@ config SUNLANCE
To compile this driver as a module, choose M here: the module
will be called sunlance.
+config AMD_XGBE
+ tristate "AMD 10GbE Ethernet driver"
+ depends on OF_NET
+ select PHYLIB
+ select AMD_XGBE_PHY
+ ---help---
+ This driver supports the AMD 10GbE Ethernet device found on an
+ AMD SoC.
+
+ To compile this driver as a module, choose M here: the module
+ will be called amd-xgbe.
+
endif # NET_VENDOR_AMD
diff --git a/drivers/net/ethernet/amd/Makefile b/drivers/net/ethernet/amd/Makefile
index cdd4301a973d..a38a2dce3eb3 100644
--- a/drivers/net/ethernet/amd/Makefile
+++ b/drivers/net/ethernet/amd/Makefile
@@ -17,3 +17,4 @@ obj-$(CONFIG_NI65) += ni65.o
obj-$(CONFIG_PCNET32) += pcnet32.o
obj-$(CONFIG_SUN3LANCE) += sun3lance.o
obj-$(CONFIG_SUNLANCE) += sunlance.o
+obj-$(CONFIG_AMD_XGBE) += xgbe/
diff --git a/drivers/net/ethernet/amd/amd8111e.c b/drivers/net/ethernet/amd/amd8111e.c
index 26efaaa5e73f..068dc7cad5fa 100644
--- a/drivers/net/ethernet/amd/amd8111e.c
+++ b/drivers/net/ethernet/amd/amd8111e.c
@@ -1900,7 +1900,7 @@ static int amd8111e_probe_one(struct pci_dev *pdev,
/* Initialize driver entry points */
dev->netdev_ops = &amd8111e_netdev_ops;
- SET_ETHTOOL_OPS(dev, &ops);
+ dev->ethtool_ops = &ops;
dev->irq =pdev->irq;
dev->watchdog_timeo = AMD8111E_TX_TIMEOUT;
netif_napi_add(dev, &lp->napi, amd8111e_rx_poll, 32);
diff --git a/drivers/net/ethernet/amd/ariadne.c b/drivers/net/ethernet/amd/ariadne.c
index b08101b31b8b..968b7bfac8fc 100644
--- a/drivers/net/ethernet/amd/ariadne.c
+++ b/drivers/net/ethernet/amd/ariadne.c
@@ -718,7 +718,6 @@ static int ariadne_init_one(struct zorro_dev *z,
unsigned long mem_start = board + ARIADNE_RAM;
struct resource *r1, *r2;
struct net_device *dev;
- struct ariadne_private *priv;
u32 serial;
int err;
@@ -738,8 +737,6 @@ static int ariadne_init_one(struct zorro_dev *z,
return -ENOMEM;
}
- priv = netdev_priv(dev);
-
r1->name = dev->name;
r2->name = dev->name;
diff --git a/drivers/net/ethernet/amd/au1000_eth.c b/drivers/net/ethernet/amd/au1000_eth.c
index a2bd91e3d302..a78e4c136959 100644
--- a/drivers/net/ethernet/amd/au1000_eth.c
+++ b/drivers/net/ethernet/amd/au1000_eth.c
@@ -1229,7 +1229,7 @@ static int au1000_probe(struct platform_device *pdev)
dev->base_addr = base->start;
dev->irq = irq;
dev->netdev_ops = &au1000_netdev_ops;
- SET_ETHTOOL_OPS(dev, &au1000_ethtool_ops);
+ dev->ethtool_ops = &au1000_ethtool_ops;
dev->watchdog_timeo = ETH_TX_TIMEOUT;
/*
diff --git a/drivers/net/ethernet/amd/hplance.c b/drivers/net/ethernet/amd/hplance.c
index 47ce57c2c893..6c9de117ffc6 100644
--- a/drivers/net/ethernet/amd/hplance.c
+++ b/drivers/net/ethernet/amd/hplance.c
@@ -27,9 +27,9 @@
#include "hplance.h"
-/* We have 16834 bytes of RAM for the init block and buffers. This places
+/* We have 16392 bytes of RAM for the init block and buffers. This places
* an upper limit on the number of buffers we can use. NetBSD uses 8 Rx
- * buffers and 2 Tx buffers.
+ * buffers and 2 Tx buffers, it takes (8 + 2) * 1544 bytes.
*/
#define LANCE_LOG_TX_BUFFERS 1
#define LANCE_LOG_RX_BUFFERS 3
diff --git a/drivers/net/ethernet/amd/mvme147.c b/drivers/net/ethernet/amd/mvme147.c
index 0e8399dec054..0660ac5846bb 100644
--- a/drivers/net/ethernet/amd/mvme147.c
+++ b/drivers/net/ethernet/amd/mvme147.c
@@ -26,9 +26,9 @@
#include <asm/pgtable.h>
#include <asm/mvme147hw.h>
-/* We have 16834 bytes of RAM for the init block and buffers. This places
+/* We have 32K of RAM for the init block and buffers. This places
* an upper limit on the number of buffers we can use. NetBSD uses 8 Rx
- * buffers and 2 Tx buffers.
+ * buffers and 2 Tx buffers, it takes (8 + 2) * 1544 bytes.
*/
#define LANCE_LOG_TX_BUFFERS 1
#define LANCE_LOG_RX_BUFFERS 3
@@ -111,7 +111,7 @@ struct net_device * __init mvme147lance_probe(int unit)
dev->dev_addr);
lp = netdev_priv(dev);
- lp->ram = __get_dma_pages(GFP_ATOMIC, 3); /* 16K */
+ lp->ram = __get_dma_pages(GFP_ATOMIC, 3); /* 32K */
if (!lp->ram) {
printk("%s: No memory for LANCE buffers\n", dev->name);
free_netdev(dev);
diff --git a/drivers/net/ethernet/amd/nmclan_cs.c b/drivers/net/ethernet/amd/nmclan_cs.c
index 08569fe2b182..abf3b1581c82 100644
--- a/drivers/net/ethernet/amd/nmclan_cs.c
+++ b/drivers/net/ethernet/amd/nmclan_cs.c
@@ -457,7 +457,7 @@ static int nmclan_probe(struct pcmcia_device *link)
lp->tx_free_frames=AM2150_MAX_TX_FRAMES;
dev->netdev_ops = &mace_netdev_ops;
- SET_ETHTOOL_OPS(dev, &netdev_ethtool_ops);
+ dev->ethtool_ops = &netdev_ethtool_ops;
dev->watchdog_timeo = TX_TIMEOUT;
return nmclan_config(link);
diff --git a/drivers/net/ethernet/amd/xgbe/Makefile b/drivers/net/ethernet/amd/xgbe/Makefile
new file mode 100644
index 000000000000..26cf9af1642f
--- /dev/null
+++ b/drivers/net/ethernet/amd/xgbe/Makefile
@@ -0,0 +1,6 @@
+obj-$(CONFIG_AMD_XGBE) += amd-xgbe.o
+
+amd-xgbe-objs := xgbe-main.o xgbe-drv.o xgbe-dev.o \
+ xgbe-desc.o xgbe-ethtool.o xgbe-mdio.o
+
+amd-xgbe-$(CONFIG_DEBUG_FS) += xgbe-debugfs.o
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-common.h b/drivers/net/ethernet/amd/xgbe/xgbe-common.h
new file mode 100644
index 000000000000..bf462ee86f5c
--- /dev/null
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-common.h
@@ -0,0 +1,1007 @@
+/*
+ * AMD 10Gb Ethernet driver
+ *
+ * This file is available to you under your choice of the following two
+ * licenses:
+ *
+ * License 1: GPLv2
+ *
+ * Copyright (c) 2014 Advanced Micro Devices, Inc.
+ *
+ * This file is free software; you may copy, redistribute and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 2 of the License, or (at
+ * your option) any later version.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ * This file incorporates work covered by the following copyright and
+ * permission notice:
+ * The Synopsys DWC ETHER XGMAC Software Driver and documentation
+ * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
+ * Inc. unless otherwise expressly agreed to in writing between Synopsys
+ * and you.
+ *
+ * The Software IS NOT an item of Licensed Software or Licensed Product
+ * under any End User Software License Agreement or Agreement for Licensed
+ * Product with Synopsys or any supplement thereto. Permission is hereby
+ * granted, free of charge, to any person obtaining a copy of this software
+ * annotated with this license and the Software, to deal in the Software
+ * without restriction, including without limitation the rights to use,
+ * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+ * of the Software, and to permit persons to whom the Software is furnished
+ * to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
+ * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
+ * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *
+ * License 2: Modified BSD
+ *
+ * Copyright (c) 2014 Advanced Micro Devices, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Advanced Micro Devices, Inc. nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * This file incorporates work covered by the following copyright and
+ * permission notice:
+ * The Synopsys DWC ETHER XGMAC Software Driver and documentation
+ * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
+ * Inc. unless otherwise expressly agreed to in writing between Synopsys
+ * and you.
+ *
+ * The Software IS NOT an item of Licensed Software or Licensed Product
+ * under any End User Software License Agreement or Agreement for Licensed
+ * Product with Synopsys or any supplement thereto. Permission is hereby
+ * granted, free of charge, to any person obtaining a copy of this software
+ * annotated with this license and the Software, to deal in the Software
+ * without restriction, including without limitation the rights to use,
+ * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+ * of the Software, and to permit persons to whom the Software is furnished
+ * to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
+ * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
+ * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __XGBE_COMMON_H__
+#define __XGBE_COMMON_H__
+
+/* DMA register offsets */
+#define DMA_MR 0x3000
+#define DMA_SBMR 0x3004
+#define DMA_ISR 0x3008
+#define DMA_AXIARCR 0x3010
+#define DMA_AXIAWCR 0x3018
+#define DMA_DSR0 0x3020
+#define DMA_DSR1 0x3024
+#define DMA_DSR2 0x3028
+#define DMA_DSR3 0x302c
+#define DMA_DSR4 0x3030
+
+/* DMA register entry bit positions and sizes */
+#define DMA_AXIARCR_DRC_INDEX 0
+#define DMA_AXIARCR_DRC_WIDTH 4
+#define DMA_AXIARCR_DRD_INDEX 4
+#define DMA_AXIARCR_DRD_WIDTH 2
+#define DMA_AXIARCR_TEC_INDEX 8
+#define DMA_AXIARCR_TEC_WIDTH 4
+#define DMA_AXIARCR_TED_INDEX 12
+#define DMA_AXIARCR_TED_WIDTH 2
+#define DMA_AXIARCR_THC_INDEX 16
+#define DMA_AXIARCR_THC_WIDTH 4
+#define DMA_AXIARCR_THD_INDEX 20
+#define DMA_AXIARCR_THD_WIDTH 2
+#define DMA_AXIAWCR_DWC_INDEX 0
+#define DMA_AXIAWCR_DWC_WIDTH 4
+#define DMA_AXIAWCR_DWD_INDEX 4
+#define DMA_AXIAWCR_DWD_WIDTH 2
+#define DMA_AXIAWCR_RPC_INDEX 8
+#define DMA_AXIAWCR_RPC_WIDTH 4
+#define DMA_AXIAWCR_RPD_INDEX 12
+#define DMA_AXIAWCR_RPD_WIDTH 2
+#define DMA_AXIAWCR_RHC_INDEX 16
+#define DMA_AXIAWCR_RHC_WIDTH 4
+#define DMA_AXIAWCR_RHD_INDEX 20
+#define DMA_AXIAWCR_RHD_WIDTH 2
+#define DMA_AXIAWCR_TDC_INDEX 24
+#define DMA_AXIAWCR_TDC_WIDTH 4
+#define DMA_AXIAWCR_TDD_INDEX 28
+#define DMA_AXIAWCR_TDD_WIDTH 2
+#define DMA_DSR0_RPS_INDEX 8
+#define DMA_DSR0_RPS_WIDTH 4
+#define DMA_DSR0_TPS_INDEX 12
+#define DMA_DSR0_TPS_WIDTH 4
+#define DMA_ISR_MACIS_INDEX 17
+#define DMA_ISR_MACIS_WIDTH 1
+#define DMA_ISR_MTLIS_INDEX 16
+#define DMA_ISR_MTLIS_WIDTH 1
+#define DMA_MR_SWR_INDEX 0
+#define DMA_MR_SWR_WIDTH 1
+#define DMA_SBMR_EAME_INDEX 11
+#define DMA_SBMR_EAME_WIDTH 1
+#define DMA_SBMR_UNDEF_INDEX 0
+#define DMA_SBMR_UNDEF_WIDTH 1
+
+/* DMA channel register offsets
+ * Multiple channels can be active. The first channel has registers
+ * that begin at 0x3100. Each subsequent channel has registers that
+ * are accessed using an offset of 0x80 from the previous channel.
+ */
+#define DMA_CH_BASE 0x3100
+#define DMA_CH_INC 0x80
+
+#define DMA_CH_CR 0x00
+#define DMA_CH_TCR 0x04
+#define DMA_CH_RCR 0x08
+#define DMA_CH_TDLR_HI 0x10
+#define DMA_CH_TDLR_LO 0x14
+#define DMA_CH_RDLR_HI 0x18
+#define DMA_CH_RDLR_LO 0x1c
+#define DMA_CH_TDTR_LO 0x24
+#define DMA_CH_RDTR_LO 0x2c
+#define DMA_CH_TDRLR 0x30
+#define DMA_CH_RDRLR 0x34
+#define DMA_CH_IER 0x38
+#define DMA_CH_RIWT 0x3c
+#define DMA_CH_CATDR_LO 0x44
+#define DMA_CH_CARDR_LO 0x4c
+#define DMA_CH_CATBR_HI 0x50
+#define DMA_CH_CATBR_LO 0x54
+#define DMA_CH_CARBR_HI 0x58
+#define DMA_CH_CARBR_LO 0x5c
+#define DMA_CH_SR 0x60
+
+/* DMA channel register entry bit positions and sizes */
+#define DMA_CH_CR_PBLX8_INDEX 16
+#define DMA_CH_CR_PBLX8_WIDTH 1
+#define DMA_CH_IER_AIE_INDEX 15
+#define DMA_CH_IER_AIE_WIDTH 1
+#define DMA_CH_IER_FBEE_INDEX 12
+#define DMA_CH_IER_FBEE_WIDTH 1
+#define DMA_CH_IER_NIE_INDEX 16
+#define DMA_CH_IER_NIE_WIDTH 1
+#define DMA_CH_IER_RBUE_INDEX 7
+#define DMA_CH_IER_RBUE_WIDTH 1
+#define DMA_CH_IER_RIE_INDEX 6
+#define DMA_CH_IER_RIE_WIDTH 1
+#define DMA_CH_IER_RSE_INDEX 8
+#define DMA_CH_IER_RSE_WIDTH 1
+#define DMA_CH_IER_TBUE_INDEX 2
+#define DMA_CH_IER_TBUE_WIDTH 1
+#define DMA_CH_IER_TIE_INDEX 0
+#define DMA_CH_IER_TIE_WIDTH 1
+#define DMA_CH_IER_TXSE_INDEX 1
+#define DMA_CH_IER_TXSE_WIDTH 1
+#define DMA_CH_RCR_PBL_INDEX 16
+#define DMA_CH_RCR_PBL_WIDTH 6
+#define DMA_CH_RCR_RBSZ_INDEX 1
+#define DMA_CH_RCR_RBSZ_WIDTH 14
+#define DMA_CH_RCR_SR_INDEX 0
+#define DMA_CH_RCR_SR_WIDTH 1
+#define DMA_CH_RIWT_RWT_INDEX 0
+#define DMA_CH_RIWT_RWT_WIDTH 8
+#define DMA_CH_SR_FBE_INDEX 12
+#define DMA_CH_SR_FBE_WIDTH 1
+#define DMA_CH_SR_RBU_INDEX 7
+#define DMA_CH_SR_RBU_WIDTH 1
+#define DMA_CH_SR_RI_INDEX 6
+#define DMA_CH_SR_RI_WIDTH 1
+#define DMA_CH_SR_RPS_INDEX 8
+#define DMA_CH_SR_RPS_WIDTH 1
+#define DMA_CH_SR_TBU_INDEX 2
+#define DMA_CH_SR_TBU_WIDTH 1
+#define DMA_CH_SR_TI_INDEX 0
+#define DMA_CH_SR_TI_WIDTH 1
+#define DMA_CH_SR_TPS_INDEX 1
+#define DMA_CH_SR_TPS_WIDTH 1
+#define DMA_CH_TCR_OSP_INDEX 4
+#define DMA_CH_TCR_OSP_WIDTH 1
+#define DMA_CH_TCR_PBL_INDEX 16
+#define DMA_CH_TCR_PBL_WIDTH 6
+#define DMA_CH_TCR_ST_INDEX 0
+#define DMA_CH_TCR_ST_WIDTH 1
+#define DMA_CH_TCR_TSE_INDEX 12
+#define DMA_CH_TCR_TSE_WIDTH 1
+
+/* DMA channel register values */
+#define DMA_OSP_DISABLE 0x00
+#define DMA_OSP_ENABLE 0x01
+#define DMA_PBL_1 1
+#define DMA_PBL_2 2
+#define DMA_PBL_4 4
+#define DMA_PBL_8 8
+#define DMA_PBL_16 16
+#define DMA_PBL_32 32
+#define DMA_PBL_64 64 /* 8 x 8 */
+#define DMA_PBL_128 128 /* 8 x 16 */
+#define DMA_PBL_256 256 /* 8 x 32 */
+#define DMA_PBL_X8_DISABLE 0x00
+#define DMA_PBL_X8_ENABLE 0x01
+
+
+/* MAC register offsets */
+#define MAC_TCR 0x0000
+#define MAC_RCR 0x0004
+#define MAC_PFR 0x0008
+#define MAC_WTR 0x000c
+#define MAC_HTR0 0x0010
+#define MAC_HTR1 0x0014
+#define MAC_HTR2 0x0018
+#define MAC_HTR3 0x001c
+#define MAC_HTR4 0x0020
+#define MAC_HTR5 0x0024
+#define MAC_HTR6 0x0028
+#define MAC_HTR7 0x002c
+#define MAC_VLANTR 0x0050
+#define MAC_VLANHTR 0x0058
+#define MAC_VLANIR 0x0060
+#define MAC_IVLANIR 0x0064
+#define MAC_RETMR 0x006c
+#define MAC_Q0TFCR 0x0070
+#define MAC_RFCR 0x0090
+#define MAC_RQC0R 0x00a0
+#define MAC_RQC1R 0x00a4
+#define MAC_RQC2R 0x00a8
+#define MAC_RQC3R 0x00ac
+#define MAC_ISR 0x00b0
+#define MAC_IER 0x00b4
+#define MAC_RTSR 0x00b8
+#define MAC_PMTCSR 0x00c0
+#define MAC_RWKPFR 0x00c4
+#define MAC_LPICSR 0x00d0
+#define MAC_LPITCR 0x00d4
+#define MAC_VR 0x0110
+#define MAC_DR 0x0114
+#define MAC_HWF0R 0x011c
+#define MAC_HWF1R 0x0120
+#define MAC_HWF2R 0x0124
+#define MAC_GPIOCR 0x0278
+#define MAC_GPIOSR 0x027c
+#define MAC_MACA0HR 0x0300
+#define MAC_MACA0LR 0x0304
+#define MAC_MACA1HR 0x0308
+#define MAC_MACA1LR 0x030c
+
+#define MAC_QTFCR_INC 4
+#define MAC_MACA_INC 4
+
+/* MAC register entry bit positions and sizes */
+#define MAC_HWF0R_ADDMACADRSEL_INDEX 18
+#define MAC_HWF0R_ADDMACADRSEL_WIDTH 5
+#define MAC_HWF0R_ARPOFFSEL_INDEX 9
+#define MAC_HWF0R_ARPOFFSEL_WIDTH 1
+#define MAC_HWF0R_EEESEL_INDEX 13
+#define MAC_HWF0R_EEESEL_WIDTH 1
+#define MAC_HWF0R_GMIISEL_INDEX 1
+#define MAC_HWF0R_GMIISEL_WIDTH 1
+#define MAC_HWF0R_MGKSEL_INDEX 7
+#define MAC_HWF0R_MGKSEL_WIDTH 1
+#define MAC_HWF0R_MMCSEL_INDEX 8
+#define MAC_HWF0R_MMCSEL_WIDTH 1
+#define MAC_HWF0R_RWKSEL_INDEX 6
+#define MAC_HWF0R_RWKSEL_WIDTH 1
+#define MAC_HWF0R_RXCOESEL_INDEX 16
+#define MAC_HWF0R_RXCOESEL_WIDTH 1
+#define MAC_HWF0R_SAVLANINS_INDEX 27
+#define MAC_HWF0R_SAVLANINS_WIDTH 1
+#define MAC_HWF0R_SMASEL_INDEX 5
+#define MAC_HWF0R_SMASEL_WIDTH 1
+#define MAC_HWF0R_TSSEL_INDEX 12
+#define MAC_HWF0R_TSSEL_WIDTH 1
+#define MAC_HWF0R_TSSTSSEL_INDEX 25
+#define MAC_HWF0R_TSSTSSEL_WIDTH 2
+#define MAC_HWF0R_TXCOESEL_INDEX 14
+#define MAC_HWF0R_TXCOESEL_WIDTH 1
+#define MAC_HWF0R_VLHASH_INDEX 4
+#define MAC_HWF0R_VLHASH_WIDTH 1
+#define MAC_HWF1R_ADVTHWORD_INDEX 13
+#define MAC_HWF1R_ADVTHWORD_WIDTH 1
+#define MAC_HWF1R_DBGMEMA_INDEX 19
+#define MAC_HWF1R_DBGMEMA_WIDTH 1
+#define MAC_HWF1R_DCBEN_INDEX 16
+#define MAC_HWF1R_DCBEN_WIDTH 1
+#define MAC_HWF1R_HASHTBLSZ_INDEX 24
+#define MAC_HWF1R_HASHTBLSZ_WIDTH 3
+#define MAC_HWF1R_L3L4FNUM_INDEX 27
+#define MAC_HWF1R_L3L4FNUM_WIDTH 4
+#define MAC_HWF1R_RSSEN_INDEX 20
+#define MAC_HWF1R_RSSEN_WIDTH 1
+#define MAC_HWF1R_RXFIFOSIZE_INDEX 0
+#define MAC_HWF1R_RXFIFOSIZE_WIDTH 5
+#define MAC_HWF1R_SPHEN_INDEX 17
+#define MAC_HWF1R_SPHEN_WIDTH 1
+#define MAC_HWF1R_TSOEN_INDEX 18
+#define MAC_HWF1R_TSOEN_WIDTH 1
+#define MAC_HWF1R_TXFIFOSIZE_INDEX 6
+#define MAC_HWF1R_TXFIFOSIZE_WIDTH 5
+#define MAC_HWF2R_AUXSNAPNUM_INDEX 28
+#define MAC_HWF2R_AUXSNAPNUM_WIDTH 3
+#define MAC_HWF2R_PPSOUTNUM_INDEX 24
+#define MAC_HWF2R_PPSOUTNUM_WIDTH 3
+#define MAC_HWF2R_RXCHCNT_INDEX 12
+#define MAC_HWF2R_RXCHCNT_WIDTH 4
+#define MAC_HWF2R_RXQCNT_INDEX 0
+#define MAC_HWF2R_RXQCNT_WIDTH 4
+#define MAC_HWF2R_TXCHCNT_INDEX 18
+#define MAC_HWF2R_TXCHCNT_WIDTH 4
+#define MAC_HWF2R_TXQCNT_INDEX 6
+#define MAC_HWF2R_TXQCNT_WIDTH 4
+#define MAC_ISR_MMCRXIS_INDEX 9
+#define MAC_ISR_MMCRXIS_WIDTH 1
+#define MAC_ISR_MMCTXIS_INDEX 10
+#define MAC_ISR_MMCTXIS_WIDTH 1
+#define MAC_ISR_PMTIS_INDEX 4
+#define MAC_ISR_PMTIS_WIDTH 1
+#define MAC_MACA1HR_AE_INDEX 31
+#define MAC_MACA1HR_AE_WIDTH 1
+#define MAC_PFR_HMC_INDEX 2
+#define MAC_PFR_HMC_WIDTH 1
+#define MAC_PFR_HUC_INDEX 1
+#define MAC_PFR_HUC_WIDTH 1
+#define MAC_PFR_PM_INDEX 4
+#define MAC_PFR_PM_WIDTH 1
+#define MAC_PFR_PR_INDEX 0
+#define MAC_PFR_PR_WIDTH 1
+#define MAC_PMTCSR_MGKPKTEN_INDEX 1
+#define MAC_PMTCSR_MGKPKTEN_WIDTH 1
+#define MAC_PMTCSR_PWRDWN_INDEX 0
+#define MAC_PMTCSR_PWRDWN_WIDTH 1
+#define MAC_PMTCSR_RWKFILTRST_INDEX 31
+#define MAC_PMTCSR_RWKFILTRST_WIDTH 1
+#define MAC_PMTCSR_RWKPKTEN_INDEX 2
+#define MAC_PMTCSR_RWKPKTEN_WIDTH 1
+#define MAC_Q0TFCR_PT_INDEX 16
+#define MAC_Q0TFCR_PT_WIDTH 16
+#define MAC_Q0TFCR_TFE_INDEX 1
+#define MAC_Q0TFCR_TFE_WIDTH 1
+#define MAC_RCR_ACS_INDEX 1
+#define MAC_RCR_ACS_WIDTH 1
+#define MAC_RCR_CST_INDEX 2
+#define MAC_RCR_CST_WIDTH 1
+#define MAC_RCR_DCRCC_INDEX 3
+#define MAC_RCR_DCRCC_WIDTH 1
+#define MAC_RCR_IPC_INDEX 9
+#define MAC_RCR_IPC_WIDTH 1
+#define MAC_RCR_JE_INDEX 8
+#define MAC_RCR_JE_WIDTH 1
+#define MAC_RCR_LM_INDEX 10
+#define MAC_RCR_LM_WIDTH 1
+#define MAC_RCR_RE_INDEX 0
+#define MAC_RCR_RE_WIDTH 1
+#define MAC_RFCR_RFE_INDEX 0
+#define MAC_RFCR_RFE_WIDTH 1
+#define MAC_RQC0R_RXQ0EN_INDEX 0
+#define MAC_RQC0R_RXQ0EN_WIDTH 2
+#define MAC_TCR_SS_INDEX 29
+#define MAC_TCR_SS_WIDTH 2
+#define MAC_TCR_TE_INDEX 0
+#define MAC_TCR_TE_WIDTH 1
+#define MAC_VLANTR_DOVLTC_INDEX 20
+#define MAC_VLANTR_DOVLTC_WIDTH 1
+#define MAC_VLANTR_ERSVLM_INDEX 19
+#define MAC_VLANTR_ERSVLM_WIDTH 1
+#define MAC_VLANTR_ESVL_INDEX 18
+#define MAC_VLANTR_ESVL_WIDTH 1
+#define MAC_VLANTR_EVLS_INDEX 21
+#define MAC_VLANTR_EVLS_WIDTH 2
+#define MAC_VLANTR_EVLRXS_INDEX 24
+#define MAC_VLANTR_EVLRXS_WIDTH 1
+#define MAC_VR_DEVID_INDEX 8
+#define MAC_VR_DEVID_WIDTH 8
+#define MAC_VR_SNPSVER_INDEX 0
+#define MAC_VR_SNPSVER_WIDTH 8
+#define MAC_VR_USERVER_INDEX 16
+#define MAC_VR_USERVER_WIDTH 8
+
+/* MMC register offsets */
+#define MMC_CR 0x0800
+#define MMC_RISR 0x0804
+#define MMC_TISR 0x0808
+#define MMC_RIER 0x080c
+#define MMC_TIER 0x0810
+#define MMC_TXOCTETCOUNT_GB_LO 0x0814
+#define MMC_TXOCTETCOUNT_GB_HI 0x0818
+#define MMC_TXFRAMECOUNT_GB_LO 0x081c
+#define MMC_TXFRAMECOUNT_GB_HI 0x0820
+#define MMC_TXBROADCASTFRAMES_G_LO 0x0824
+#define MMC_TXBROADCASTFRAMES_G_HI 0x0828
+#define MMC_TXMULTICASTFRAMES_G_LO 0x082c
+#define MMC_TXMULTICASTFRAMES_G_HI 0x0830
+#define MMC_TX64OCTETS_GB_LO 0x0834
+#define MMC_TX64OCTETS_GB_HI 0x0838
+#define MMC_TX65TO127OCTETS_GB_LO 0x083c
+#define MMC_TX65TO127OCTETS_GB_HI 0x0840
+#define MMC_TX128TO255OCTETS_GB_LO 0x0844
+#define MMC_TX128TO255OCTETS_GB_HI 0x0848
+#define MMC_TX256TO511OCTETS_GB_LO 0x084c
+#define MMC_TX256TO511OCTETS_GB_HI 0x0850
+#define MMC_TX512TO1023OCTETS_GB_LO 0x0854
+#define MMC_TX512TO1023OCTETS_GB_HI 0x0858
+#define MMC_TX1024TOMAXOCTETS_GB_LO 0x085c
+#define MMC_TX1024TOMAXOCTETS_GB_HI 0x0860
+#define MMC_TXUNICASTFRAMES_GB_LO 0x0864
+#define MMC_TXUNICASTFRAMES_GB_HI 0x0868
+#define MMC_TXMULTICASTFRAMES_GB_LO 0x086c
+#define MMC_TXMULTICASTFRAMES_GB_HI 0x0870
+#define MMC_TXBROADCASTFRAMES_GB_LO 0x0874
+#define MMC_TXBROADCASTFRAMES_GB_HI 0x0878
+#define MMC_TXUNDERFLOWERROR_LO 0x087c
+#define MMC_TXUNDERFLOWERROR_HI 0x0880
+#define MMC_TXOCTETCOUNT_G_LO 0x0884
+#define MMC_TXOCTETCOUNT_G_HI 0x0888
+#define MMC_TXFRAMECOUNT_G_LO 0x088c
+#define MMC_TXFRAMECOUNT_G_HI 0x0890
+#define MMC_TXPAUSEFRAMES_LO 0x0894
+#define MMC_TXPAUSEFRAMES_HI 0x0898
+#define MMC_TXVLANFRAMES_G_LO 0x089c
+#define MMC_TXVLANFRAMES_G_HI 0x08a0
+#define MMC_RXFRAMECOUNT_GB_LO 0x0900
+#define MMC_RXFRAMECOUNT_GB_HI 0x0904
+#define MMC_RXOCTETCOUNT_GB_LO 0x0908
+#define MMC_RXOCTETCOUNT_GB_HI 0x090c
+#define MMC_RXOCTETCOUNT_G_LO 0x0910
+#define MMC_RXOCTETCOUNT_G_HI 0x0914
+#define MMC_RXBROADCASTFRAMES_G_LO 0x0918
+#define MMC_RXBROADCASTFRAMES_G_HI 0x091c
+#define MMC_RXMULTICASTFRAMES_G_LO 0x0920
+#define MMC_RXMULTICASTFRAMES_G_HI 0x0924
+#define MMC_RXCRCERROR_LO 0x0928
+#define MMC_RXCRCERROR_HI 0x092c
+#define MMC_RXRUNTERROR 0x0930
+#define MMC_RXJABBERERROR 0x0934
+#define MMC_RXUNDERSIZE_G 0x0938
+#define MMC_RXOVERSIZE_G 0x093c
+#define MMC_RX64OCTETS_GB_LO 0x0940
+#define MMC_RX64OCTETS_GB_HI 0x0944
+#define MMC_RX65TO127OCTETS_GB_LO 0x0948
+#define MMC_RX65TO127OCTETS_GB_HI 0x094c
+#define MMC_RX128TO255OCTETS_GB_LO 0x0950
+#define MMC_RX128TO255OCTETS_GB_HI 0x0954
+#define MMC_RX256TO511OCTETS_GB_LO 0x0958
+#define MMC_RX256TO511OCTETS_GB_HI 0x095c
+#define MMC_RX512TO1023OCTETS_GB_LO 0x0960
+#define MMC_RX512TO1023OCTETS_GB_HI 0x0964
+#define MMC_RX1024TOMAXOCTETS_GB_LO 0x0968
+#define MMC_RX1024TOMAXOCTETS_GB_HI 0x096c
+#define MMC_RXUNICASTFRAMES_G_LO 0x0970
+#define MMC_RXUNICASTFRAMES_G_HI 0x0974
+#define MMC_RXLENGTHERROR_LO 0x0978
+#define MMC_RXLENGTHERROR_HI 0x097c
+#define MMC_RXOUTOFRANGETYPE_LO 0x0980
+#define MMC_RXOUTOFRANGETYPE_HI 0x0984
+#define MMC_RXPAUSEFRAMES_LO 0x0988
+#define MMC_RXPAUSEFRAMES_HI 0x098c
+#define MMC_RXFIFOOVERFLOW_LO 0x0990
+#define MMC_RXFIFOOVERFLOW_HI 0x0994
+#define MMC_RXVLANFRAMES_GB_LO 0x0998
+#define MMC_RXVLANFRAMES_GB_HI 0x099c
+#define MMC_RXWATCHDOGERROR 0x09a0
+
+/* MMC register entry bit positions and sizes */
+#define MMC_CR_CR_INDEX 0
+#define MMC_CR_CR_WIDTH 1
+#define MMC_CR_CSR_INDEX 1
+#define MMC_CR_CSR_WIDTH 1
+#define MMC_CR_ROR_INDEX 2
+#define MMC_CR_ROR_WIDTH 1
+#define MMC_CR_MCF_INDEX 3
+#define MMC_CR_MCF_WIDTH 1
+#define MMC_CR_MCT_INDEX 4
+#define MMC_CR_MCT_WIDTH 2
+#define MMC_RIER_ALL_INTERRUPTS_INDEX 0
+#define MMC_RIER_ALL_INTERRUPTS_WIDTH 23
+#define MMC_RISR_RXFRAMECOUNT_GB_INDEX 0
+#define MMC_RISR_RXFRAMECOUNT_GB_WIDTH 1
+#define MMC_RISR_RXOCTETCOUNT_GB_INDEX 1
+#define MMC_RISR_RXOCTETCOUNT_GB_WIDTH 1
+#define MMC_RISR_RXOCTETCOUNT_G_INDEX 2
+#define MMC_RISR_RXOCTETCOUNT_G_WIDTH 1
+#define MMC_RISR_RXBROADCASTFRAMES_G_INDEX 3
+#define MMC_RISR_RXBROADCASTFRAMES_G_WIDTH 1
+#define MMC_RISR_RXMULTICASTFRAMES_G_INDEX 4
+#define MMC_RISR_RXMULTICASTFRAMES_G_WIDTH 1
+#define MMC_RISR_RXCRCERROR_INDEX 5
+#define MMC_RISR_RXCRCERROR_WIDTH 1
+#define MMC_RISR_RXRUNTERROR_INDEX 6
+#define MMC_RISR_RXRUNTERROR_WIDTH 1
+#define MMC_RISR_RXJABBERERROR_INDEX 7
+#define MMC_RISR_RXJABBERERROR_WIDTH 1
+#define MMC_RISR_RXUNDERSIZE_G_INDEX 8
+#define MMC_RISR_RXUNDERSIZE_G_WIDTH 1
+#define MMC_RISR_RXOVERSIZE_G_INDEX 9
+#define MMC_RISR_RXOVERSIZE_G_WIDTH 1
+#define MMC_RISR_RX64OCTETS_GB_INDEX 10
+#define MMC_RISR_RX64OCTETS_GB_WIDTH 1
+#define MMC_RISR_RX65TO127OCTETS_GB_INDEX 11
+#define MMC_RISR_RX65TO127OCTETS_GB_WIDTH 1
+#define MMC_RISR_RX128TO255OCTETS_GB_INDEX 12
+#define MMC_RISR_RX128TO255OCTETS_GB_WIDTH 1
+#define MMC_RISR_RX256TO511OCTETS_GB_INDEX 13
+#define MMC_RISR_RX256TO511OCTETS_GB_WIDTH 1
+#define MMC_RISR_RX512TO1023OCTETS_GB_INDEX 14
+#define MMC_RISR_RX512TO1023OCTETS_GB_WIDTH 1
+#define MMC_RISR_RX1024TOMAXOCTETS_GB_INDEX 15
+#define MMC_RISR_RX1024TOMAXOCTETS_GB_WIDTH 1
+#define MMC_RISR_RXUNICASTFRAMES_G_INDEX 16
+#define MMC_RISR_RXUNICASTFRAMES_G_WIDTH 1
+#define MMC_RISR_RXLENGTHERROR_INDEX 17
+#define MMC_RISR_RXLENGTHERROR_WIDTH 1
+#define MMC_RISR_RXOUTOFRANGETYPE_INDEX 18
+#define MMC_RISR_RXOUTOFRANGETYPE_WIDTH 1
+#define MMC_RISR_RXPAUSEFRAMES_INDEX 19
+#define MMC_RISR_RXPAUSEFRAMES_WIDTH 1
+#define MMC_RISR_RXFIFOOVERFLOW_INDEX 20
+#define MMC_RISR_RXFIFOOVERFLOW_WIDTH 1
+#define MMC_RISR_RXVLANFRAMES_GB_INDEX 21
+#define MMC_RISR_RXVLANFRAMES_GB_WIDTH 1
+#define MMC_RISR_RXWATCHDOGERROR_INDEX 22
+#define MMC_RISR_RXWATCHDOGERROR_WIDTH 1
+#define MMC_TIER_ALL_INTERRUPTS_INDEX 0
+#define MMC_TIER_ALL_INTERRUPTS_WIDTH 18
+#define MMC_TISR_TXOCTETCOUNT_GB_INDEX 0
+#define MMC_TISR_TXOCTETCOUNT_GB_WIDTH 1
+#define MMC_TISR_TXFRAMECOUNT_GB_INDEX 1
+#define MMC_TISR_TXFRAMECOUNT_GB_WIDTH 1
+#define MMC_TISR_TXBROADCASTFRAMES_G_INDEX 2
+#define MMC_TISR_TXBROADCASTFRAMES_G_WIDTH 1
+#define MMC_TISR_TXMULTICASTFRAMES_G_INDEX 3
+#define MMC_TISR_TXMULTICASTFRAMES_G_WIDTH 1
+#define MMC_TISR_TX64OCTETS_GB_INDEX 4
+#define MMC_TISR_TX64OCTETS_GB_WIDTH 1
+#define MMC_TISR_TX65TO127OCTETS_GB_INDEX 5
+#define MMC_TISR_TX65TO127OCTETS_GB_WIDTH 1
+#define MMC_TISR_TX128TO255OCTETS_GB_INDEX 6
+#define MMC_TISR_TX128TO255OCTETS_GB_WIDTH 1
+#define MMC_TISR_TX256TO511OCTETS_GB_INDEX 7
+#define MMC_TISR_TX256TO511OCTETS_GB_WIDTH 1
+#define MMC_TISR_TX512TO1023OCTETS_GB_INDEX 8
+#define MMC_TISR_TX512TO1023OCTETS_GB_WIDTH 1
+#define MMC_TISR_TX1024TOMAXOCTETS_GB_INDEX 9
+#define MMC_TISR_TX1024TOMAXOCTETS_GB_WIDTH 1
+#define MMC_TISR_TXUNICASTFRAMES_GB_INDEX 10
+#define MMC_TISR_TXUNICASTFRAMES_GB_WIDTH 1
+#define MMC_TISR_TXMULTICASTFRAMES_GB_INDEX 11
+#define MMC_TISR_TXMULTICASTFRAMES_GB_WIDTH 1
+#define MMC_TISR_TXBROADCASTFRAMES_GB_INDEX 12
+#define MMC_TISR_TXBROADCASTFRAMES_GB_WIDTH 1
+#define MMC_TISR_TXUNDERFLOWERROR_INDEX 13
+#define MMC_TISR_TXUNDERFLOWERROR_WIDTH 1
+#define MMC_TISR_TXOCTETCOUNT_G_INDEX 14
+#define MMC_TISR_TXOCTETCOUNT_G_WIDTH 1
+#define MMC_TISR_TXFRAMECOUNT_G_INDEX 15
+#define MMC_TISR_TXFRAMECOUNT_G_WIDTH 1
+#define MMC_TISR_TXPAUSEFRAMES_INDEX 16
+#define MMC_TISR_TXPAUSEFRAMES_WIDTH 1
+#define MMC_TISR_TXVLANFRAMES_G_INDEX 17
+#define MMC_TISR_TXVLANFRAMES_G_WIDTH 1
+
+/* MTL register offsets */
+#define MTL_OMR 0x1000
+#define MTL_FDCR 0x1008
+#define MTL_FDSR 0x100c
+#define MTL_FDDR 0x1010
+#define MTL_ISR 0x1020
+#define MTL_RQDCM0R 0x1030
+#define MTL_TCPM0R 0x1040
+#define MTL_TCPM1R 0x1044
+
+#define MTL_RQDCM_INC 4
+#define MTL_RQDCM_Q_PER_REG 4
+
+/* MTL register entry bit positions and sizes */
+#define MTL_OMR_ETSALG_INDEX 5
+#define MTL_OMR_ETSALG_WIDTH 2
+#define MTL_OMR_RAA_INDEX 2
+#define MTL_OMR_RAA_WIDTH 1
+
+/* MTL queue register offsets
+ * Multiple queues can be active. The first queue has registers
+ * that begin at 0x1100. Each subsequent queue has registers that
+ * are accessed using an offset of 0x80 from the previous queue.
+ */
+#define MTL_Q_BASE 0x1100
+#define MTL_Q_INC 0x80
+
+#define MTL_Q_TQOMR 0x00
+#define MTL_Q_TQUR 0x04
+#define MTL_Q_TQDR 0x08
+#define MTL_Q_TCECR 0x10
+#define MTL_Q_TCESR 0x14
+#define MTL_Q_TCQWR 0x18
+#define MTL_Q_RQOMR 0x40
+#define MTL_Q_RQMPOCR 0x44
+#define MTL_Q_RQDR 0x4c
+#define MTL_Q_IER 0x70
+#define MTL_Q_ISR 0x74
+
+/* MTL queue register entry bit positions and sizes */
+#define MTL_Q_TCQWR_QW_INDEX 0
+#define MTL_Q_TCQWR_QW_WIDTH 21
+#define MTL_Q_RQOMR_EHFC_INDEX 7
+#define MTL_Q_RQOMR_EHFC_WIDTH 1
+#define MTL_Q_RQOMR_RFA_INDEX 8
+#define MTL_Q_RQOMR_RFA_WIDTH 3
+#define MTL_Q_RQOMR_RFD_INDEX 13
+#define MTL_Q_RQOMR_RFD_WIDTH 3
+#define MTL_Q_RQOMR_RQS_INDEX 16
+#define MTL_Q_RQOMR_RQS_WIDTH 9
+#define MTL_Q_RQOMR_RSF_INDEX 5
+#define MTL_Q_RQOMR_RSF_WIDTH 1
+#define MTL_Q_RQOMR_RTC_INDEX 0
+#define MTL_Q_RQOMR_RTC_WIDTH 2
+#define MTL_Q_TQOMR_FTQ_INDEX 0
+#define MTL_Q_TQOMR_FTQ_WIDTH 1
+#define MTL_Q_TQOMR_TQS_INDEX 16
+#define MTL_Q_TQOMR_TQS_WIDTH 10
+#define MTL_Q_TQOMR_TSF_INDEX 1
+#define MTL_Q_TQOMR_TSF_WIDTH 1
+#define MTL_Q_TQOMR_TTC_INDEX 4
+#define MTL_Q_TQOMR_TTC_WIDTH 3
+#define MTL_Q_TQOMR_TXQEN_INDEX 2
+#define MTL_Q_TQOMR_TXQEN_WIDTH 2
+
+/* MTL queue register value */
+#define MTL_RSF_DISABLE 0x00
+#define MTL_RSF_ENABLE 0x01
+#define MTL_TSF_DISABLE 0x00
+#define MTL_TSF_ENABLE 0x01
+
+#define MTL_RX_THRESHOLD_64 0x00
+#define MTL_RX_THRESHOLD_96 0x02
+#define MTL_RX_THRESHOLD_128 0x03
+#define MTL_TX_THRESHOLD_32 0x01
+#define MTL_TX_THRESHOLD_64 0x00
+#define MTL_TX_THRESHOLD_96 0x02
+#define MTL_TX_THRESHOLD_128 0x03
+#define MTL_TX_THRESHOLD_192 0x04
+#define MTL_TX_THRESHOLD_256 0x05
+#define MTL_TX_THRESHOLD_384 0x06
+#define MTL_TX_THRESHOLD_512 0x07
+
+#define MTL_ETSALG_WRR 0x00
+#define MTL_ETSALG_WFQ 0x01
+#define MTL_ETSALG_DWRR 0x02
+#define MTL_RAA_SP 0x00
+#define MTL_RAA_WSP 0x01
+
+#define MTL_Q_DISABLED 0x00
+#define MTL_Q_ENABLED 0x02
+
+
+/* MTL traffic class register offsets
+ * Multiple traffic classes can be active. The first class has registers
+ * that begin at 0x1100. Each subsequent queue has registers that
+ * are accessed using an offset of 0x80 from the previous queue.
+ */
+#define MTL_TC_BASE MTL_Q_BASE
+#define MTL_TC_INC MTL_Q_INC
+
+#define MTL_TC_ETSCR 0x10
+
+/* MTL traffic class register entry bit positions and sizes */
+#define MTL_TC_ETSCR_TSA_INDEX 0
+#define MTL_TC_ETSCR_TSA_WIDTH 2
+
+/* MTL traffic class register value */
+#define MTL_TSA_SP 0x00
+#define MTL_TSA_ETS 0x02
+
+
+/* PCS MMD select register offset
+ * The MMD select register is used for accessing PCS registers
+ * when the underlying APB3 interface is using indirect addressing.
+ * Indirect addressing requires accessing registers in two phases,
+ * an address phase and a data phase. The address phases requires
+ * writing an address selection value to the MMD select regiesters.
+ */
+#define PCS_MMD_SELECT 0xff
+
+
+/* Descriptor/Packet entry bit positions and sizes */
+#define RX_PACKET_ERRORS_CRC_INDEX 2
+#define RX_PACKET_ERRORS_CRC_WIDTH 1
+#define RX_PACKET_ERRORS_FRAME_INDEX 3
+#define RX_PACKET_ERRORS_FRAME_WIDTH 1
+#define RX_PACKET_ERRORS_LENGTH_INDEX 0
+#define RX_PACKET_ERRORS_LENGTH_WIDTH 1
+#define RX_PACKET_ERRORS_OVERRUN_INDEX 1
+#define RX_PACKET_ERRORS_OVERRUN_WIDTH 1
+
+#define RX_PACKET_ATTRIBUTES_CSUM_DONE_INDEX 0
+#define RX_PACKET_ATTRIBUTES_CSUM_DONE_WIDTH 1
+#define RX_PACKET_ATTRIBUTES_VLAN_CTAG_INDEX 1
+#define RX_PACKET_ATTRIBUTES_VLAN_CTAG_WIDTH 1
+#define RX_PACKET_ATTRIBUTES_INCOMPLETE_INDEX 2
+#define RX_PACKET_ATTRIBUTES_INCOMPLETE_WIDTH 1
+
+#define RX_NORMAL_DESC0_OVT_INDEX 0
+#define RX_NORMAL_DESC0_OVT_WIDTH 16
+#define RX_NORMAL_DESC3_ES_INDEX 15
+#define RX_NORMAL_DESC3_ES_WIDTH 1
+#define RX_NORMAL_DESC3_ETLT_INDEX 16
+#define RX_NORMAL_DESC3_ETLT_WIDTH 4
+#define RX_NORMAL_DESC3_INTE_INDEX 30
+#define RX_NORMAL_DESC3_INTE_WIDTH 1
+#define RX_NORMAL_DESC3_LD_INDEX 28
+#define RX_NORMAL_DESC3_LD_WIDTH 1
+#define RX_NORMAL_DESC3_OWN_INDEX 31
+#define RX_NORMAL_DESC3_OWN_WIDTH 1
+#define RX_NORMAL_DESC3_PL_INDEX 0
+#define RX_NORMAL_DESC3_PL_WIDTH 14
+
+#define TX_PACKET_ATTRIBUTES_CSUM_ENABLE_INDEX 0
+#define TX_PACKET_ATTRIBUTES_CSUM_ENABLE_WIDTH 1
+#define TX_PACKET_ATTRIBUTES_TSO_ENABLE_INDEX 1
+#define TX_PACKET_ATTRIBUTES_TSO_ENABLE_WIDTH 1
+#define TX_PACKET_ATTRIBUTES_VLAN_CTAG_INDEX 2
+#define TX_PACKET_ATTRIBUTES_VLAN_CTAG_WIDTH 1
+
+#define TX_CONTEXT_DESC2_MSS_INDEX 0
+#define TX_CONTEXT_DESC2_MSS_WIDTH 15
+#define TX_CONTEXT_DESC3_CTXT_INDEX 30
+#define TX_CONTEXT_DESC3_CTXT_WIDTH 1
+#define TX_CONTEXT_DESC3_TCMSSV_INDEX 26
+#define TX_CONTEXT_DESC3_TCMSSV_WIDTH 1
+#define TX_CONTEXT_DESC3_VLTV_INDEX 16
+#define TX_CONTEXT_DESC3_VLTV_WIDTH 1
+#define TX_CONTEXT_DESC3_VT_INDEX 0
+#define TX_CONTEXT_DESC3_VT_WIDTH 16
+
+#define TX_NORMAL_DESC2_HL_B1L_INDEX 0
+#define TX_NORMAL_DESC2_HL_B1L_WIDTH 14
+#define TX_NORMAL_DESC2_IC_INDEX 31
+#define TX_NORMAL_DESC2_IC_WIDTH 1
+#define TX_NORMAL_DESC2_VTIR_INDEX 14
+#define TX_NORMAL_DESC2_VTIR_WIDTH 2
+#define TX_NORMAL_DESC3_CIC_INDEX 16
+#define TX_NORMAL_DESC3_CIC_WIDTH 2
+#define TX_NORMAL_DESC3_CPC_INDEX 26
+#define TX_NORMAL_DESC3_CPC_WIDTH 2
+#define TX_NORMAL_DESC3_CTXT_INDEX 30
+#define TX_NORMAL_DESC3_CTXT_WIDTH 1
+#define TX_NORMAL_DESC3_FD_INDEX 29
+#define TX_NORMAL_DESC3_FD_WIDTH 1
+#define TX_NORMAL_DESC3_FL_INDEX 0
+#define TX_NORMAL_DESC3_FL_WIDTH 15
+#define TX_NORMAL_DESC3_LD_INDEX 28
+#define TX_NORMAL_DESC3_LD_WIDTH 1
+#define TX_NORMAL_DESC3_OWN_INDEX 31
+#define TX_NORMAL_DESC3_OWN_WIDTH 1
+#define TX_NORMAL_DESC3_TCPHDRLEN_INDEX 19
+#define TX_NORMAL_DESC3_TCPHDRLEN_WIDTH 4
+#define TX_NORMAL_DESC3_TCPPL_INDEX 0
+#define TX_NORMAL_DESC3_TCPPL_WIDTH 18
+#define TX_NORMAL_DESC3_TSE_INDEX 18
+#define TX_NORMAL_DESC3_TSE_WIDTH 1
+
+#define TX_NORMAL_DESC2_VLAN_INSERT 0x2
+
+/* MDIO undefined or vendor specific registers */
+#ifndef MDIO_AN_COMP_STAT
+#define MDIO_AN_COMP_STAT 0x0030
+#endif
+
+
+/* Bit setting and getting macros
+ * The get macro will extract the current bit field value from within
+ * the variable
+ *
+ * The set macro will clear the current bit field value within the
+ * variable and then set the bit field of the variable to the
+ * specified value
+ */
+#define GET_BITS(_var, _index, _width) \
+ (((_var) >> (_index)) & ((0x1 << (_width)) - 1))
+
+#define SET_BITS(_var, _index, _width, _val) \
+do { \
+ (_var) &= ~(((0x1 << (_width)) - 1) << (_index)); \
+ (_var) |= (((_val) & ((0x1 << (_width)) - 1)) << (_index)); \
+} while (0)
+
+#define GET_BITS_LE(_var, _index, _width) \
+ ((le32_to_cpu((_var)) >> (_index)) & ((0x1 << (_width)) - 1))
+
+#define SET_BITS_LE(_var, _index, _width, _val) \
+do { \
+ (_var) &= cpu_to_le32(~(((0x1 << (_width)) - 1) << (_index))); \
+ (_var) |= cpu_to_le32((((_val) & \
+ ((0x1 << (_width)) - 1)) << (_index))); \
+} while (0)
+
+
+/* Bit setting and getting macros based on register fields
+ * The get macro uses the bit field definitions formed using the input
+ * names to extract the current bit field value from within the
+ * variable
+ *
+ * The set macro uses the bit field definitions formed using the input
+ * names to set the bit field of the variable to the specified value
+ */
+#define XGMAC_GET_BITS(_var, _prefix, _field) \
+ GET_BITS((_var), \
+ _prefix##_##_field##_INDEX, \
+ _prefix##_##_field##_WIDTH)
+
+#define XGMAC_SET_BITS(_var, _prefix, _field, _val) \
+ SET_BITS((_var), \
+ _prefix##_##_field##_INDEX, \
+ _prefix##_##_field##_WIDTH, (_val))
+
+#define XGMAC_GET_BITS_LE(_var, _prefix, _field) \
+ GET_BITS_LE((_var), \
+ _prefix##_##_field##_INDEX, \
+ _prefix##_##_field##_WIDTH)
+
+#define XGMAC_SET_BITS_LE(_var, _prefix, _field, _val) \
+ SET_BITS_LE((_var), \
+ _prefix##_##_field##_INDEX, \
+ _prefix##_##_field##_WIDTH, (_val))
+
+
+/* Macros for reading or writing registers
+ * The ioread macros will get bit fields or full values using the
+ * register definitions formed using the input names
+ *
+ * The iowrite macros will set bit fields or full values using the
+ * register definitions formed using the input names
+ */
+#define XGMAC_IOREAD(_pdata, _reg) \
+ ioread32((_pdata)->xgmac_regs + _reg)
+
+#define XGMAC_IOREAD_BITS(_pdata, _reg, _field) \
+ GET_BITS(XGMAC_IOREAD((_pdata), _reg), \
+ _reg##_##_field##_INDEX, \
+ _reg##_##_field##_WIDTH)
+
+#define XGMAC_IOWRITE(_pdata, _reg, _val) \
+ iowrite32((_val), (_pdata)->xgmac_regs + _reg)
+
+#define XGMAC_IOWRITE_BITS(_pdata, _reg, _field, _val) \
+do { \
+ u32 reg_val = XGMAC_IOREAD((_pdata), _reg); \
+ SET_BITS(reg_val, \
+ _reg##_##_field##_INDEX, \
+ _reg##_##_field##_WIDTH, (_val)); \
+ XGMAC_IOWRITE((_pdata), _reg, reg_val); \
+} while (0)
+
+
+/* Macros for reading or writing MTL queue or traffic class registers
+ * Similar to the standard read and write macros except that the
+ * base register value is calculated by the queue or traffic class number
+ */
+#define XGMAC_MTL_IOREAD(_pdata, _n, _reg) \
+ ioread32((_pdata)->xgmac_regs + \
+ MTL_Q_BASE + ((_n) * MTL_Q_INC) + _reg)
+
+#define XGMAC_MTL_IOREAD_BITS(_pdata, _n, _reg, _field) \
+ GET_BITS(XGMAC_MTL_IOREAD((_pdata), (_n), _reg), \
+ _reg##_##_field##_INDEX, \
+ _reg##_##_field##_WIDTH)
+
+#define XGMAC_MTL_IOWRITE(_pdata, _n, _reg, _val) \
+ iowrite32((_val), (_pdata)->xgmac_regs + \
+ MTL_Q_BASE + ((_n) * MTL_Q_INC) + _reg)
+
+#define XGMAC_MTL_IOWRITE_BITS(_pdata, _n, _reg, _field, _val) \
+do { \
+ u32 reg_val = XGMAC_MTL_IOREAD((_pdata), (_n), _reg); \
+ SET_BITS(reg_val, \
+ _reg##_##_field##_INDEX, \
+ _reg##_##_field##_WIDTH, (_val)); \
+ XGMAC_MTL_IOWRITE((_pdata), (_n), _reg, reg_val); \
+} while (0)
+
+
+/* Macros for reading or writing DMA channel registers
+ * Similar to the standard read and write macros except that the
+ * base register value is obtained from the ring
+ */
+#define XGMAC_DMA_IOREAD(_channel, _reg) \
+ ioread32((_channel)->dma_regs + _reg)
+
+#define XGMAC_DMA_IOREAD_BITS(_channel, _reg, _field) \
+ GET_BITS(XGMAC_DMA_IOREAD((_channel), _reg), \
+ _reg##_##_field##_INDEX, \
+ _reg##_##_field##_WIDTH)
+
+#define XGMAC_DMA_IOWRITE(_channel, _reg, _val) \
+ iowrite32((_val), (_channel)->dma_regs + _reg)
+
+#define XGMAC_DMA_IOWRITE_BITS(_channel, _reg, _field, _val) \
+do { \
+ u32 reg_val = XGMAC_DMA_IOREAD((_channel), _reg); \
+ SET_BITS(reg_val, \
+ _reg##_##_field##_INDEX, \
+ _reg##_##_field##_WIDTH, (_val)); \
+ XGMAC_DMA_IOWRITE((_channel), _reg, reg_val); \
+} while (0)
+
+
+/* Macros for building, reading or writing register values or bits
+ * within the register values of XPCS registers.
+ */
+#define XPCS_IOWRITE(_pdata, _off, _val) \
+ iowrite32(_val, (_pdata)->xpcs_regs + (_off))
+
+#define XPCS_IOREAD(_pdata, _off) \
+ ioread32((_pdata)->xpcs_regs + (_off))
+
+
+/* Macros for building, reading or writing register values or bits
+ * using MDIO. Different from above because of the use of standardized
+ * Linux include values. No shifting is performed with the bit
+ * operations, everything works on mask values.
+ */
+#define XMDIO_READ(_pdata, _mmd, _reg) \
+ ((_pdata)->hw_if.read_mmd_regs((_pdata), 0, \
+ MII_ADDR_C45 | (_mmd << 16) | ((_reg) & 0xffff)))
+
+#define XMDIO_READ_BITS(_pdata, _mmd, _reg, _mask) \
+ (XMDIO_READ((_pdata), _mmd, _reg) & _mask)
+
+#define XMDIO_WRITE(_pdata, _mmd, _reg, _val) \
+ ((_pdata)->hw_if.write_mmd_regs((_pdata), 0, \
+ MII_ADDR_C45 | (_mmd << 16) | ((_reg) & 0xffff), (_val)))
+
+#define XMDIO_WRITE_BITS(_pdata, _mmd, _reg, _mask, _val) \
+do { \
+ u32 mmd_val = XMDIO_READ((_pdata), _mmd, _reg); \
+ mmd_val &= ~_mask; \
+ mmd_val |= (_val); \
+ XMDIO_WRITE((_pdata), _mmd, _reg, mmd_val); \
+} while (0)
+
+#endif
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c b/drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c
new file mode 100644
index 000000000000..6bb76d5c817b
--- /dev/null
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c
@@ -0,0 +1,375 @@
+/*
+ * AMD 10Gb Ethernet driver
+ *
+ * This file is available to you under your choice of the following two
+ * licenses:
+ *
+ * License 1: GPLv2
+ *
+ * Copyright (c) 2014 Advanced Micro Devices, Inc.
+ *
+ * This file is free software; you may copy, redistribute and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 2 of the License, or (at
+ * your option) any later version.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ * This file incorporates work covered by the following copyright and
+ * permission notice:
+ * The Synopsys DWC ETHER XGMAC Software Driver and documentation
+ * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
+ * Inc. unless otherwise expressly agreed to in writing between Synopsys
+ * and you.
+ *
+ * The Software IS NOT an item of Licensed Software or Licensed Product
+ * under any End User Software License Agreement or Agreement for Licensed
+ * Product with Synopsys or any supplement thereto. Permission is hereby
+ * granted, free of charge, to any person obtaining a copy of this software
+ * annotated with this license and the Software, to deal in the Software
+ * without restriction, including without limitation the rights to use,
+ * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+ * of the Software, and to permit persons to whom the Software is furnished
+ * to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
+ * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
+ * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *
+ * License 2: Modified BSD
+ *
+ * Copyright (c) 2014 Advanced Micro Devices, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Advanced Micro Devices, Inc. nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * This file incorporates work covered by the following copyright and
+ * permission notice:
+ * The Synopsys DWC ETHER XGMAC Software Driver and documentation
+ * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
+ * Inc. unless otherwise expressly agreed to in writing between Synopsys
+ * and you.
+ *
+ * The Software IS NOT an item of Licensed Software or Licensed Product
+ * under any End User Software License Agreement or Agreement for Licensed
+ * Product with Synopsys or any supplement thereto. Permission is hereby
+ * granted, free of charge, to any person obtaining a copy of this software
+ * annotated with this license and the Software, to deal in the Software
+ * without restriction, including without limitation the rights to use,
+ * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+ * of the Software, and to permit persons to whom the Software is furnished
+ * to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
+ * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
+ * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/debugfs.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+
+#include "xgbe.h"
+#include "xgbe-common.h"
+
+
+static ssize_t xgbe_common_read(char __user *buffer, size_t count,
+ loff_t *ppos, unsigned int value)
+{
+ char *buf;
+ ssize_t len;
+
+ if (*ppos != 0)
+ return 0;
+
+ buf = kasprintf(GFP_KERNEL, "0x%08x\n", value);
+ if (!buf)
+ return -ENOMEM;
+
+ if (count < strlen(buf)) {
+ kfree(buf);
+ return -ENOSPC;
+ }
+
+ len = simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf));
+ kfree(buf);
+
+ return len;
+}
+
+static ssize_t xgbe_common_write(const char __user *buffer, size_t count,
+ loff_t *ppos, unsigned int *value)
+{
+ char workarea[32];
+ ssize_t len;
+ unsigned int scan_value;
+
+ if (*ppos != 0)
+ return 0;
+
+ if (count >= sizeof(workarea))
+ return -ENOSPC;
+
+ len = simple_write_to_buffer(workarea, sizeof(workarea) - 1, ppos,
+ buffer, count);
+ if (len < 0)
+ return len;
+
+ workarea[len] = '\0';
+ if (sscanf(workarea, "%x", &scan_value) == 1)
+ *value = scan_value;
+ else
+ return -EIO;
+
+ return len;
+}
+
+static ssize_t xgmac_reg_addr_read(struct file *filp, char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ struct xgbe_prv_data *pdata = filp->private_data;
+
+ return xgbe_common_read(buffer, count, ppos, pdata->debugfs_xgmac_reg);
+}
+
+static ssize_t xgmac_reg_addr_write(struct file *filp,
+ const char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ struct xgbe_prv_data *pdata = filp->private_data;
+
+ return xgbe_common_write(buffer, count, ppos,
+ &pdata->debugfs_xgmac_reg);
+}
+
+static ssize_t xgmac_reg_value_read(struct file *filp, char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ struct xgbe_prv_data *pdata = filp->private_data;
+ unsigned int value;
+
+ value = XGMAC_IOREAD(pdata, pdata->debugfs_xgmac_reg);
+
+ return xgbe_common_read(buffer, count, ppos, value);
+}
+
+static ssize_t xgmac_reg_value_write(struct file *filp,
+ const char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ struct xgbe_prv_data *pdata = filp->private_data;
+ unsigned int value;
+ ssize_t len;
+
+ len = xgbe_common_write(buffer, count, ppos, &value);
+ if (len < 0)
+ return len;
+
+ XGMAC_IOWRITE(pdata, pdata->debugfs_xgmac_reg, value);
+
+ return len;
+}
+
+static const struct file_operations xgmac_reg_addr_fops = {
+ .owner = THIS_MODULE,
+ .open = simple_open,
+ .read = xgmac_reg_addr_read,
+ .write = xgmac_reg_addr_write,
+};
+
+static const struct file_operations xgmac_reg_value_fops = {
+ .owner = THIS_MODULE,
+ .open = simple_open,
+ .read = xgmac_reg_value_read,
+ .write = xgmac_reg_value_write,
+};
+
+static ssize_t xpcs_mmd_read(struct file *filp, char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ struct xgbe_prv_data *pdata = filp->private_data;
+
+ return xgbe_common_read(buffer, count, ppos, pdata->debugfs_xpcs_mmd);
+}
+
+static ssize_t xpcs_mmd_write(struct file *filp, const char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ struct xgbe_prv_data *pdata = filp->private_data;
+
+ return xgbe_common_write(buffer, count, ppos,
+ &pdata->debugfs_xpcs_mmd);
+}
+
+static ssize_t xpcs_reg_addr_read(struct file *filp, char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ struct xgbe_prv_data *pdata = filp->private_data;
+
+ return xgbe_common_read(buffer, count, ppos, pdata->debugfs_xpcs_reg);
+}
+
+static ssize_t xpcs_reg_addr_write(struct file *filp, const char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ struct xgbe_prv_data *pdata = filp->private_data;
+
+ return xgbe_common_write(buffer, count, ppos,
+ &pdata->debugfs_xpcs_reg);
+}
+
+static ssize_t xpcs_reg_value_read(struct file *filp, char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ struct xgbe_prv_data *pdata = filp->private_data;
+ unsigned int value;
+
+ value = pdata->hw_if.read_mmd_regs(pdata, pdata->debugfs_xpcs_mmd,
+ pdata->debugfs_xpcs_reg);
+
+ return xgbe_common_read(buffer, count, ppos, value);
+}
+
+static ssize_t xpcs_reg_value_write(struct file *filp,
+ const char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ struct xgbe_prv_data *pdata = filp->private_data;
+ unsigned int value;
+ ssize_t len;
+
+ len = xgbe_common_write(buffer, count, ppos, &value);
+ if (len < 0)
+ return len;
+
+ pdata->hw_if.write_mmd_regs(pdata, pdata->debugfs_xpcs_mmd,
+ pdata->debugfs_xpcs_reg, value);
+
+ return len;
+}
+
+static const struct file_operations xpcs_mmd_fops = {
+ .owner = THIS_MODULE,
+ .open = simple_open,
+ .read = xpcs_mmd_read,
+ .write = xpcs_mmd_write,
+};
+
+static const struct file_operations xpcs_reg_addr_fops = {
+ .owner = THIS_MODULE,
+ .open = simple_open,
+ .read = xpcs_reg_addr_read,
+ .write = xpcs_reg_addr_write,
+};
+
+static const struct file_operations xpcs_reg_value_fops = {
+ .owner = THIS_MODULE,
+ .open = simple_open,
+ .read = xpcs_reg_value_read,
+ .write = xpcs_reg_value_write,
+};
+
+void xgbe_debugfs_init(struct xgbe_prv_data *pdata)
+{
+ struct dentry *pfile;
+ char *buf;
+
+ /* Set defaults */
+ pdata->debugfs_xgmac_reg = 0;
+ pdata->debugfs_xpcs_mmd = 1;
+ pdata->debugfs_xpcs_reg = 0;
+
+ buf = kasprintf(GFP_KERNEL, "amd-xgbe-%s", pdata->netdev->name);
+ pdata->xgbe_debugfs = debugfs_create_dir(buf, NULL);
+ if (pdata->xgbe_debugfs == NULL) {
+ netdev_err(pdata->netdev, "debugfs_create_dir failed\n");
+ return;
+ }
+
+ pfile = debugfs_create_file("xgmac_register", 0600,
+ pdata->xgbe_debugfs, pdata,
+ &xgmac_reg_addr_fops);
+ if (!pfile)
+ netdev_err(pdata->netdev, "debugfs_create_file failed\n");
+
+ pfile = debugfs_create_file("xgmac_register_value", 0600,
+ pdata->xgbe_debugfs, pdata,
+ &xgmac_reg_value_fops);
+ if (!pfile)
+ netdev_err(pdata->netdev, "debugfs_create_file failed\n");
+
+ pfile = debugfs_create_file("xpcs_mmd", 0600,
+ pdata->xgbe_debugfs, pdata,
+ &xpcs_mmd_fops);
+ if (!pfile)
+ netdev_err(pdata->netdev, "debugfs_create_file failed\n");
+
+ pfile = debugfs_create_file("xpcs_register", 0600,
+ pdata->xgbe_debugfs, pdata,
+ &xpcs_reg_addr_fops);
+ if (!pfile)
+ netdev_err(pdata->netdev, "debugfs_create_file failed\n");
+
+ pfile = debugfs_create_file("xpcs_register_value", 0600,
+ pdata->xgbe_debugfs, pdata,
+ &xpcs_reg_value_fops);
+ if (!pfile)
+ netdev_err(pdata->netdev, "debugfs_create_file failed\n");
+
+ kfree(buf);
+}
+
+void xgbe_debugfs_exit(struct xgbe_prv_data *pdata)
+{
+ debugfs_remove_recursive(pdata->xgbe_debugfs);
+ pdata->xgbe_debugfs = NULL;
+}
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-desc.c b/drivers/net/ethernet/amd/xgbe/xgbe-desc.c
new file mode 100644
index 000000000000..6f1c85956d50
--- /dev/null
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-desc.c
@@ -0,0 +1,556 @@
+/*
+ * AMD 10Gb Ethernet driver
+ *
+ * This file is available to you under your choice of the following two
+ * licenses:
+ *
+ * License 1: GPLv2
+ *
+ * Copyright (c) 2014 Advanced Micro Devices, Inc.
+ *
+ * This file is free software; you may copy, redistribute and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 2 of the License, or (at
+ * your option) any later version.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ * This file incorporates work covered by the following copyright and
+ * permission notice:
+ * The Synopsys DWC ETHER XGMAC Software Driver and documentation
+ * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
+ * Inc. unless otherwise expressly agreed to in writing between Synopsys
+ * and you.
+ *
+ * The Software IS NOT an item of Licensed Software or Licensed Product
+ * under any End User Software License Agreement or Agreement for Licensed
+ * Product with Synopsys or any supplement thereto. Permission is hereby
+ * granted, free of charge, to any person obtaining a copy of this software
+ * annotated with this license and the Software, to deal in the Software
+ * without restriction, including without limitation the rights to use,
+ * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+ * of the Software, and to permit persons to whom the Software is furnished
+ * to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
+ * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
+ * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *
+ * License 2: Modified BSD
+ *
+ * Copyright (c) 2014 Advanced Micro Devices, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Advanced Micro Devices, Inc. nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * This file incorporates work covered by the following copyright and
+ * permission notice:
+ * The Synopsys DWC ETHER XGMAC Software Driver and documentation
+ * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
+ * Inc. unless otherwise expressly agreed to in writing between Synopsys
+ * and you.
+ *
+ * The Software IS NOT an item of Licensed Software or Licensed Product
+ * under any End User Software License Agreement or Agreement for Licensed
+ * Product with Synopsys or any supplement thereto. Permission is hereby
+ * granted, free of charge, to any person obtaining a copy of this software
+ * annotated with this license and the Software, to deal in the Software
+ * without restriction, including without limitation the rights to use,
+ * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+ * of the Software, and to permit persons to whom the Software is furnished
+ * to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
+ * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
+ * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "xgbe.h"
+#include "xgbe-common.h"
+
+
+static void xgbe_unmap_skb(struct xgbe_prv_data *, struct xgbe_ring_data *);
+
+static void xgbe_free_ring(struct xgbe_prv_data *pdata,
+ struct xgbe_ring *ring)
+{
+ struct xgbe_ring_data *rdata;
+ unsigned int i;
+
+ if (!ring)
+ return;
+
+ if (ring->rdata) {
+ for (i = 0; i < ring->rdesc_count; i++) {
+ rdata = GET_DESC_DATA(ring, i);
+ xgbe_unmap_skb(pdata, rdata);
+ }
+
+ kfree(ring->rdata);
+ ring->rdata = NULL;
+ }
+
+ if (ring->rdesc) {
+ dma_free_coherent(pdata->dev,
+ (sizeof(struct xgbe_ring_desc) *
+ ring->rdesc_count),
+ ring->rdesc, ring->rdesc_dma);
+ ring->rdesc = NULL;
+ }
+}
+
+static void xgbe_free_ring_resources(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_channel *channel;
+ unsigned int i;
+
+ DBGPR("-->xgbe_free_ring_resources\n");
+
+ channel = pdata->channel;
+ for (i = 0; i < pdata->channel_count; i++, channel++) {
+ xgbe_free_ring(pdata, channel->tx_ring);
+ xgbe_free_ring(pdata, channel->rx_ring);
+ }
+
+ DBGPR("<--xgbe_free_ring_resources\n");
+}
+
+static int xgbe_init_ring(struct xgbe_prv_data *pdata,
+ struct xgbe_ring *ring, unsigned int rdesc_count)
+{
+ DBGPR("-->xgbe_init_ring\n");
+
+ if (!ring)
+ return 0;
+
+ /* Descriptors */
+ ring->rdesc_count = rdesc_count;
+ ring->rdesc = dma_alloc_coherent(pdata->dev,
+ (sizeof(struct xgbe_ring_desc) *
+ rdesc_count), &ring->rdesc_dma,
+ GFP_KERNEL);
+ if (!ring->rdesc)
+ return -ENOMEM;
+
+ /* Descriptor information */
+ ring->rdata = kcalloc(rdesc_count, sizeof(struct xgbe_ring_data),
+ GFP_KERNEL);
+ if (!ring->rdata)
+ return -ENOMEM;
+
+ DBGPR(" rdesc=0x%p, rdesc_dma=0x%llx, rdata=0x%p\n",
+ ring->rdesc, ring->rdesc_dma, ring->rdata);
+
+ DBGPR("<--xgbe_init_ring\n");
+
+ return 0;
+}
+
+static int xgbe_alloc_ring_resources(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_channel *channel;
+ unsigned int i;
+ int ret;
+
+ DBGPR("-->xgbe_alloc_ring_resources\n");
+
+ channel = pdata->channel;
+ for (i = 0; i < pdata->channel_count; i++, channel++) {
+ DBGPR(" %s - tx_ring:\n", channel->name);
+ ret = xgbe_init_ring(pdata, channel->tx_ring,
+ pdata->tx_desc_count);
+ if (ret) {
+ netdev_alert(pdata->netdev,
+ "error initializing Tx ring\n");
+ goto err_ring;
+ }
+
+ DBGPR(" %s - rx_ring:\n", channel->name);
+ ret = xgbe_init_ring(pdata, channel->rx_ring,
+ pdata->rx_desc_count);
+ if (ret) {
+ netdev_alert(pdata->netdev,
+ "error initializing Tx ring\n");
+ goto err_ring;
+ }
+ }
+
+ DBGPR("<--xgbe_alloc_ring_resources\n");
+
+ return 0;
+
+err_ring:
+ xgbe_free_ring_resources(pdata);
+
+ return ret;
+}
+
+static void xgbe_wrapper_tx_descriptor_init(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_hw_if *hw_if = &pdata->hw_if;
+ struct xgbe_channel *channel;
+ struct xgbe_ring *ring;
+ struct xgbe_ring_data *rdata;
+ struct xgbe_ring_desc *rdesc;
+ dma_addr_t rdesc_dma;
+ unsigned int i, j;
+
+ DBGPR("-->xgbe_wrapper_tx_descriptor_init\n");
+
+ channel = pdata->channel;
+ for (i = 0; i < pdata->channel_count; i++, channel++) {
+ ring = channel->tx_ring;
+ if (!ring)
+ break;
+
+ rdesc = ring->rdesc;
+ rdesc_dma = ring->rdesc_dma;
+
+ for (j = 0; j < ring->rdesc_count; j++) {
+ rdata = GET_DESC_DATA(ring, j);
+
+ rdata->rdesc = rdesc;
+ rdata->rdesc_dma = rdesc_dma;
+
+ rdesc++;
+ rdesc_dma += sizeof(struct xgbe_ring_desc);
+ }
+
+ ring->cur = 0;
+ ring->dirty = 0;
+ ring->tx.queue_stopped = 0;
+
+ hw_if->tx_desc_init(channel);
+ }
+
+ DBGPR("<--xgbe_wrapper_tx_descriptor_init\n");
+}
+
+static void xgbe_wrapper_rx_descriptor_init(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_hw_if *hw_if = &pdata->hw_if;
+ struct xgbe_channel *channel;
+ struct xgbe_ring *ring;
+ struct xgbe_ring_desc *rdesc;
+ struct xgbe_ring_data *rdata;
+ dma_addr_t rdesc_dma, skb_dma;
+ struct sk_buff *skb = NULL;
+ unsigned int i, j;
+
+ DBGPR("-->xgbe_wrapper_rx_descriptor_init\n");
+
+ channel = pdata->channel;
+ for (i = 0; i < pdata->channel_count; i++, channel++) {
+ ring = channel->rx_ring;
+ if (!ring)
+ break;
+
+ rdesc = ring->rdesc;
+ rdesc_dma = ring->rdesc_dma;
+
+ for (j = 0; j < ring->rdesc_count; j++) {
+ rdata = GET_DESC_DATA(ring, j);
+
+ rdata->rdesc = rdesc;
+ rdata->rdesc_dma = rdesc_dma;
+
+ /* Allocate skb & assign to each rdesc */
+ skb = dev_alloc_skb(pdata->rx_buf_size);
+ if (skb == NULL)
+ break;
+ skb_dma = dma_map_single(pdata->dev, skb->data,
+ pdata->rx_buf_size,
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(pdata->dev, skb_dma)) {
+ netdev_alert(pdata->netdev,
+ "failed to do the dma map\n");
+ dev_kfree_skb_any(skb);
+ break;
+ }
+ rdata->skb = skb;
+ rdata->skb_dma = skb_dma;
+ rdata->skb_dma_len = pdata->rx_buf_size;
+
+ rdesc++;
+ rdesc_dma += sizeof(struct xgbe_ring_desc);
+ }
+
+ ring->cur = 0;
+ ring->dirty = 0;
+ ring->rx.realloc_index = 0;
+ ring->rx.realloc_threshold = 0;
+
+ hw_if->rx_desc_init(channel);
+ }
+
+ DBGPR("<--xgbe_wrapper_rx_descriptor_init\n");
+}
+
+static void xgbe_unmap_skb(struct xgbe_prv_data *pdata,
+ struct xgbe_ring_data *rdata)
+{
+ if (rdata->skb_dma) {
+ if (rdata->mapped_as_page) {
+ dma_unmap_page(pdata->dev, rdata->skb_dma,
+ rdata->skb_dma_len, DMA_TO_DEVICE);
+ } else {
+ dma_unmap_single(pdata->dev, rdata->skb_dma,
+ rdata->skb_dma_len, DMA_TO_DEVICE);
+ }
+ rdata->skb_dma = 0;
+ rdata->skb_dma_len = 0;
+ }
+
+ if (rdata->skb) {
+ dev_kfree_skb_any(rdata->skb);
+ rdata->skb = NULL;
+ }
+
+ rdata->tso_header = 0;
+ rdata->len = 0;
+ rdata->interrupt = 0;
+ rdata->mapped_as_page = 0;
+}
+
+static int xgbe_map_tx_skb(struct xgbe_channel *channel, struct sk_buff *skb)
+{
+ struct xgbe_prv_data *pdata = channel->pdata;
+ struct xgbe_ring *ring = channel->tx_ring;
+ struct xgbe_ring_data *rdata;
+ struct xgbe_packet_data *packet;
+ struct skb_frag_struct *frag;
+ dma_addr_t skb_dma;
+ unsigned int start_index, cur_index;
+ unsigned int offset, tso, vlan, datalen, len;
+ unsigned int i;
+
+ DBGPR("-->xgbe_map_tx_skb: cur = %d\n", ring->cur);
+
+ offset = 0;
+ start_index = ring->cur;
+ cur_index = ring->cur;
+
+ packet = &ring->packet_data;
+ packet->rdesc_count = 0;
+ packet->length = 0;
+
+ tso = XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES,
+ TSO_ENABLE);
+ vlan = XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES,
+ VLAN_CTAG);
+
+ /* Save space for a context descriptor if needed */
+ if ((tso && (packet->mss != ring->tx.cur_mss)) ||
+ (vlan && (packet->vlan_ctag != ring->tx.cur_vlan_ctag)))
+ cur_index++;
+ rdata = GET_DESC_DATA(ring, cur_index);
+
+ if (tso) {
+ DBGPR(" TSO packet\n");
+
+ /* Map the TSO header */
+ skb_dma = dma_map_single(pdata->dev, skb->data,
+ packet->header_len, DMA_TO_DEVICE);
+ if (dma_mapping_error(pdata->dev, skb_dma)) {
+ netdev_alert(pdata->netdev, "dma_map_single failed\n");
+ goto err_out;
+ }
+ rdata->skb_dma = skb_dma;
+ rdata->skb_dma_len = packet->header_len;
+ rdata->tso_header = 1;
+
+ offset = packet->header_len;
+
+ packet->length += packet->header_len;
+
+ cur_index++;
+ rdata = GET_DESC_DATA(ring, cur_index);
+ }
+
+ /* Map the (remainder of the) packet */
+ for (datalen = skb_headlen(skb) - offset; datalen; ) {
+ len = min_t(unsigned int, datalen, TX_MAX_BUF_SIZE);
+
+ skb_dma = dma_map_single(pdata->dev, skb->data + offset, len,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(pdata->dev, skb_dma)) {
+ netdev_alert(pdata->netdev, "dma_map_single failed\n");
+ goto err_out;
+ }
+ rdata->skb_dma = skb_dma;
+ rdata->skb_dma_len = len;
+ DBGPR(" skb data: index=%u, dma=0x%llx, len=%u\n",
+ cur_index, skb_dma, len);
+
+ datalen -= len;
+ offset += len;
+
+ packet->length += len;
+
+ cur_index++;
+ rdata = GET_DESC_DATA(ring, cur_index);
+ }
+
+ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ DBGPR(" mapping frag %u\n", i);
+
+ frag = &skb_shinfo(skb)->frags[i];
+ offset = 0;
+
+ for (datalen = skb_frag_size(frag); datalen; ) {
+ len = min_t(unsigned int, datalen, TX_MAX_BUF_SIZE);
+
+ skb_dma = skb_frag_dma_map(pdata->dev, frag, offset,
+ len, DMA_TO_DEVICE);
+ if (dma_mapping_error(pdata->dev, skb_dma)) {
+ netdev_alert(pdata->netdev,
+ "skb_frag_dma_map failed\n");
+ goto err_out;
+ }
+ rdata->skb_dma = skb_dma;
+ rdata->skb_dma_len = len;
+ rdata->mapped_as_page = 1;
+ DBGPR(" skb data: index=%u, dma=0x%llx, len=%u\n",
+ cur_index, skb_dma, len);
+
+ datalen -= len;
+ offset += len;
+
+ packet->length += len;
+
+ cur_index++;
+ rdata = GET_DESC_DATA(ring, cur_index);
+ }
+ }
+
+ /* Save the skb address in the last entry */
+ rdata->skb = skb;
+
+ /* Save the number of descriptor entries used */
+ packet->rdesc_count = cur_index - start_index;
+
+ DBGPR("<--xgbe_map_tx_skb: count=%u\n", packet->rdesc_count);
+
+ return packet->rdesc_count;
+
+err_out:
+ while (start_index < cur_index) {
+ rdata = GET_DESC_DATA(ring, start_index++);
+ xgbe_unmap_skb(pdata, rdata);
+ }
+
+ DBGPR("<--xgbe_map_tx_skb: count=0\n");
+
+ return 0;
+}
+
+static void xgbe_realloc_skb(struct xgbe_channel *channel)
+{
+ struct xgbe_prv_data *pdata = channel->pdata;
+ struct xgbe_hw_if *hw_if = &pdata->hw_if;
+ struct xgbe_ring *ring = channel->rx_ring;
+ struct xgbe_ring_data *rdata;
+ struct sk_buff *skb = NULL;
+ dma_addr_t skb_dma;
+ int i;
+
+ DBGPR("-->xgbe_realloc_skb: rx_ring->rx.realloc_index = %u\n",
+ ring->rx.realloc_index);
+
+ for (i = 0; i < ring->dirty; i++) {
+ rdata = GET_DESC_DATA(ring, ring->rx.realloc_index);
+
+ /* Reset rdata values */
+ xgbe_unmap_skb(pdata, rdata);
+
+ /* Allocate skb & assign to each rdesc */
+ skb = dev_alloc_skb(pdata->rx_buf_size);
+ if (skb == NULL) {
+ netdev_alert(pdata->netdev,
+ "failed to allocate skb\n");
+ break;
+ }
+ skb_dma = dma_map_single(pdata->dev, skb->data,
+ pdata->rx_buf_size, DMA_FROM_DEVICE);
+ if (dma_mapping_error(pdata->dev, skb_dma)) {
+ netdev_alert(pdata->netdev,
+ "failed to do the dma map\n");
+ dev_kfree_skb_any(skb);
+ break;
+ }
+ rdata->skb = skb;
+ rdata->skb_dma = skb_dma;
+ rdata->skb_dma_len = pdata->rx_buf_size;
+
+ hw_if->rx_desc_reset(rdata);
+
+ ring->rx.realloc_index++;
+ }
+ ring->dirty = 0;
+
+ DBGPR("<--xgbe_realloc_skb\n");
+}
+
+void xgbe_init_function_ptrs_desc(struct xgbe_desc_if *desc_if)
+{
+ DBGPR("-->xgbe_init_function_ptrs_desc\n");
+
+ desc_if->alloc_ring_resources = xgbe_alloc_ring_resources;
+ desc_if->free_ring_resources = xgbe_free_ring_resources;
+ desc_if->map_tx_skb = xgbe_map_tx_skb;
+ desc_if->realloc_skb = xgbe_realloc_skb;
+ desc_if->unmap_skb = xgbe_unmap_skb;
+ desc_if->wrapper_tx_desc_init = xgbe_wrapper_tx_descriptor_init;
+ desc_if->wrapper_rx_desc_init = xgbe_wrapper_rx_descriptor_init;
+
+ DBGPR("<--xgbe_init_function_ptrs_desc\n");
+}
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
new file mode 100644
index 000000000000..002293b0819d
--- /dev/null
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
@@ -0,0 +1,2182 @@
+/*
+ * AMD 10Gb Ethernet driver
+ *
+ * This file is available to you under your choice of the following two
+ * licenses:
+ *
+ * License 1: GPLv2
+ *
+ * Copyright (c) 2014 Advanced Micro Devices, Inc.
+ *
+ * This file is free software; you may copy, redistribute and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 2 of the License, or (at
+ * your option) any later version.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ * This file incorporates work covered by the following copyright and
+ * permission notice:
+ * The Synopsys DWC ETHER XGMAC Software Driver and documentation
+ * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
+ * Inc. unless otherwise expressly agreed to in writing between Synopsys
+ * and you.
+ *
+ * The Software IS NOT an item of Licensed Software or Licensed Product
+ * under any End User Software License Agreement or Agreement for Licensed
+ * Product with Synopsys or any supplement thereto. Permission is hereby
+ * granted, free of charge, to any person obtaining a copy of this software
+ * annotated with this license and the Software, to deal in the Software
+ * without restriction, including without limitation the rights to use,
+ * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+ * of the Software, and to permit persons to whom the Software is furnished
+ * to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
+ * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
+ * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *
+ * License 2: Modified BSD
+ *
+ * Copyright (c) 2014 Advanced Micro Devices, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Advanced Micro Devices, Inc. nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * This file incorporates work covered by the following copyright and
+ * permission notice:
+ * The Synopsys DWC ETHER XGMAC Software Driver and documentation
+ * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
+ * Inc. unless otherwise expressly agreed to in writing between Synopsys
+ * and you.
+ *
+ * The Software IS NOT an item of Licensed Software or Licensed Product
+ * under any End User Software License Agreement or Agreement for Licensed
+ * Product with Synopsys or any supplement thereto. Permission is hereby
+ * granted, free of charge, to any person obtaining a copy of this software
+ * annotated with this license and the Software, to deal in the Software
+ * without restriction, including without limitation the rights to use,
+ * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+ * of the Software, and to permit persons to whom the Software is furnished
+ * to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
+ * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
+ * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/phy.h>
+#include <linux/clk.h>
+
+#include "xgbe.h"
+#include "xgbe-common.h"
+
+
+static unsigned int xgbe_usec_to_riwt(struct xgbe_prv_data *pdata,
+ unsigned int usec)
+{
+ unsigned long rate;
+ unsigned int ret;
+
+ DBGPR("-->xgbe_usec_to_riwt\n");
+
+ rate = clk_get_rate(pdata->sysclock);
+
+ /*
+ * Convert the input usec value to the watchdog timer value. Each
+ * watchdog timer value is equivalent to 256 clock cycles.
+ * Calculate the required value as:
+ * ( usec * ( system_clock_mhz / 10^6 ) / 256
+ */
+ ret = (usec * (rate / 1000000)) / 256;
+
+ DBGPR("<--xgbe_usec_to_riwt\n");
+
+ return ret;
+}
+
+static unsigned int xgbe_riwt_to_usec(struct xgbe_prv_data *pdata,
+ unsigned int riwt)
+{
+ unsigned long rate;
+ unsigned int ret;
+
+ DBGPR("-->xgbe_riwt_to_usec\n");
+
+ rate = clk_get_rate(pdata->sysclock);
+
+ /*
+ * Convert the input watchdog timer value to the usec value. Each
+ * watchdog timer value is equivalent to 256 clock cycles.
+ * Calculate the required value as:
+ * ( riwt * 256 ) / ( system_clock_mhz / 10^6 )
+ */
+ ret = (riwt * 256) / (rate / 1000000);
+
+ DBGPR("<--xgbe_riwt_to_usec\n");
+
+ return ret;
+}
+
+static int xgbe_config_pblx8(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_channel *channel;
+ unsigned int i;
+
+ channel = pdata->channel;
+ for (i = 0; i < pdata->channel_count; i++, channel++)
+ XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_CR, PBLX8,
+ pdata->pblx8);
+
+ return 0;
+}
+
+static int xgbe_get_tx_pbl_val(struct xgbe_prv_data *pdata)
+{
+ return XGMAC_DMA_IOREAD_BITS(pdata->channel, DMA_CH_TCR, PBL);
+}
+
+static int xgbe_config_tx_pbl_val(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_channel *channel;
+ unsigned int i;
+
+ channel = pdata->channel;
+ for (i = 0; i < pdata->channel_count; i++, channel++) {
+ if (!channel->tx_ring)
+ break;
+
+ XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_TCR, PBL,
+ pdata->tx_pbl);
+ }
+
+ return 0;
+}
+
+static int xgbe_get_rx_pbl_val(struct xgbe_prv_data *pdata)
+{
+ return XGMAC_DMA_IOREAD_BITS(pdata->channel, DMA_CH_RCR, PBL);
+}
+
+static int xgbe_config_rx_pbl_val(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_channel *channel;
+ unsigned int i;
+
+ channel = pdata->channel;
+ for (i = 0; i < pdata->channel_count; i++, channel++) {
+ if (!channel->rx_ring)
+ break;
+
+ XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_RCR, PBL,
+ pdata->rx_pbl);
+ }
+
+ return 0;
+}
+
+static int xgbe_config_osp_mode(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_channel *channel;
+ unsigned int i;
+
+ channel = pdata->channel;
+ for (i = 0; i < pdata->channel_count; i++, channel++) {
+ if (!channel->tx_ring)
+ break;
+
+ XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_TCR, OSP,
+ pdata->tx_osp_mode);
+ }
+
+ return 0;
+}
+
+static int xgbe_config_rsf_mode(struct xgbe_prv_data *pdata, unsigned int val)
+{
+ unsigned int i;
+
+ for (i = 0; i < pdata->hw_feat.rx_q_cnt; i++)
+ XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, RSF, val);
+
+ return 0;
+}
+
+static int xgbe_config_tsf_mode(struct xgbe_prv_data *pdata, unsigned int val)
+{
+ unsigned int i;
+
+ for (i = 0; i < pdata->hw_feat.tx_q_cnt; i++)
+ XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TSF, val);
+
+ return 0;
+}
+
+static int xgbe_config_rx_threshold(struct xgbe_prv_data *pdata,
+ unsigned int val)
+{
+ unsigned int i;
+
+ for (i = 0; i < pdata->hw_feat.rx_q_cnt; i++)
+ XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, RTC, val);
+
+ return 0;
+}
+
+static int xgbe_config_tx_threshold(struct xgbe_prv_data *pdata,
+ unsigned int val)
+{
+ unsigned int i;
+
+ for (i = 0; i < pdata->hw_feat.tx_q_cnt; i++)
+ XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TTC, val);
+
+ return 0;
+}
+
+static int xgbe_config_rx_coalesce(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_channel *channel;
+ unsigned int i;
+
+ channel = pdata->channel;
+ for (i = 0; i < pdata->channel_count; i++, channel++) {
+ if (!channel->rx_ring)
+ break;
+
+ XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_RIWT, RWT,
+ pdata->rx_riwt);
+ }
+
+ return 0;
+}
+
+static int xgbe_config_tx_coalesce(struct xgbe_prv_data *pdata)
+{
+ return 0;
+}
+
+static void xgbe_config_rx_buffer_size(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_channel *channel;
+ unsigned int i;
+
+ channel = pdata->channel;
+ for (i = 0; i < pdata->channel_count; i++, channel++) {
+ if (!channel->rx_ring)
+ break;
+
+ XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_RCR, RBSZ,
+ pdata->rx_buf_size);
+ }
+}
+
+static void xgbe_config_tso_mode(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_channel *channel;
+ unsigned int i;
+
+ channel = pdata->channel;
+ for (i = 0; i < pdata->channel_count; i++, channel++) {
+ if (!channel->tx_ring)
+ break;
+
+ XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_TCR, TSE, 1);
+ }
+}
+
+static int xgbe_disable_tx_flow_control(struct xgbe_prv_data *pdata)
+{
+ unsigned int max_q_count, q_count;
+ unsigned int reg, reg_val;
+ unsigned int i;
+
+ /* Clear MTL flow control */
+ for (i = 0; i < pdata->hw_feat.rx_q_cnt; i++)
+ XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, EHFC, 0);
+
+ /* Clear MAC flow control */
+ max_q_count = XGMAC_MAX_FLOW_CONTROL_QUEUES;
+ q_count = min_t(unsigned int, pdata->hw_feat.rx_q_cnt, max_q_count);
+ reg = MAC_Q0TFCR;
+ for (i = 0; i < q_count; i++) {
+ reg_val = XGMAC_IOREAD(pdata, reg);
+ XGMAC_SET_BITS(reg_val, MAC_Q0TFCR, TFE, 0);
+ XGMAC_IOWRITE(pdata, reg, reg_val);
+
+ reg += MAC_QTFCR_INC;
+ }
+
+ return 0;
+}
+
+static int xgbe_enable_tx_flow_control(struct xgbe_prv_data *pdata)
+{
+ unsigned int max_q_count, q_count;
+ unsigned int reg, reg_val;
+ unsigned int i;
+
+ /* Set MTL flow control */
+ for (i = 0; i < pdata->hw_feat.rx_q_cnt; i++)
+ XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, EHFC, 1);
+
+ /* Set MAC flow control */
+ max_q_count = XGMAC_MAX_FLOW_CONTROL_QUEUES;
+ q_count = min_t(unsigned int, pdata->hw_feat.rx_q_cnt, max_q_count);
+ reg = MAC_Q0TFCR;
+ for (i = 0; i < q_count; i++) {
+ reg_val = XGMAC_IOREAD(pdata, reg);
+
+ /* Enable transmit flow control */
+ XGMAC_SET_BITS(reg_val, MAC_Q0TFCR, TFE, 1);
+ /* Set pause time */
+ XGMAC_SET_BITS(reg_val, MAC_Q0TFCR, PT, 0xffff);
+
+ XGMAC_IOWRITE(pdata, reg, reg_val);
+
+ reg += MAC_QTFCR_INC;
+ }
+
+ return 0;
+}
+
+static int xgbe_disable_rx_flow_control(struct xgbe_prv_data *pdata)
+{
+ XGMAC_IOWRITE_BITS(pdata, MAC_RFCR, RFE, 0);
+
+ return 0;
+}
+
+static int xgbe_enable_rx_flow_control(struct xgbe_prv_data *pdata)
+{
+ XGMAC_IOWRITE_BITS(pdata, MAC_RFCR, RFE, 1);
+
+ return 0;
+}
+
+static int xgbe_config_tx_flow_control(struct xgbe_prv_data *pdata)
+{
+ if (pdata->tx_pause)
+ xgbe_enable_tx_flow_control(pdata);
+ else
+ xgbe_disable_tx_flow_control(pdata);
+
+ return 0;
+}
+
+static int xgbe_config_rx_flow_control(struct xgbe_prv_data *pdata)
+{
+ if (pdata->rx_pause)
+ xgbe_enable_rx_flow_control(pdata);
+ else
+ xgbe_disable_rx_flow_control(pdata);
+
+ return 0;
+}
+
+static void xgbe_config_flow_control(struct xgbe_prv_data *pdata)
+{
+ xgbe_config_tx_flow_control(pdata);
+ xgbe_config_rx_flow_control(pdata);
+}
+
+static void xgbe_enable_dma_interrupts(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_channel *channel;
+ unsigned int dma_ch_isr, dma_ch_ier;
+ unsigned int i;
+
+ channel = pdata->channel;
+ for (i = 0; i < pdata->channel_count; i++, channel++) {
+ /* Clear all the interrupts which are set */
+ dma_ch_isr = XGMAC_DMA_IOREAD(channel, DMA_CH_SR);
+ XGMAC_DMA_IOWRITE(channel, DMA_CH_SR, dma_ch_isr);
+
+ /* Clear all interrupt enable bits */
+ dma_ch_ier = 0;
+
+ /* Enable following interrupts
+ * NIE - Normal Interrupt Summary Enable
+ * AIE - Abnormal Interrupt Summary Enable
+ * FBEE - Fatal Bus Error Enable
+ */
+ XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, NIE, 1);
+ XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, AIE, 1);
+ XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, FBEE, 1);
+
+ if (channel->tx_ring) {
+ /* Enable the following Tx interrupts
+ * TIE - Transmit Interrupt Enable (unless polling)
+ */
+ XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, TIE, 1);
+ }
+ if (channel->rx_ring) {
+ /* Enable following Rx interrupts
+ * RBUE - Receive Buffer Unavailable Enable
+ * RIE - Receive Interrupt Enable
+ */
+ XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, RBUE, 1);
+ XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, RIE, 1);
+ }
+
+ XGMAC_DMA_IOWRITE(channel, DMA_CH_IER, dma_ch_ier);
+ }
+}
+
+static void xgbe_enable_mtl_interrupts(struct xgbe_prv_data *pdata)
+{
+ unsigned int mtl_q_isr;
+ unsigned int q_count, i;
+
+ q_count = max(pdata->hw_feat.tx_q_cnt, pdata->hw_feat.rx_q_cnt);
+ for (i = 0; i < q_count; i++) {
+ /* Clear all the interrupts which are set */
+ mtl_q_isr = XGMAC_MTL_IOREAD(pdata, i, MTL_Q_ISR);
+ XGMAC_MTL_IOWRITE(pdata, i, MTL_Q_ISR, mtl_q_isr);
+
+ /* No MTL interrupts to be enabled */
+ XGMAC_MTL_IOWRITE(pdata, i, MTL_Q_ISR, 0);
+ }
+}
+
+static void xgbe_enable_mac_interrupts(struct xgbe_prv_data *pdata)
+{
+ /* No MAC interrupts to be enabled */
+ XGMAC_IOWRITE(pdata, MAC_IER, 0);
+
+ /* Enable all counter interrupts */
+ XGMAC_IOWRITE_BITS(pdata, MMC_RIER, ALL_INTERRUPTS, 0xff);
+ XGMAC_IOWRITE_BITS(pdata, MMC_TIER, ALL_INTERRUPTS, 0xff);
+}
+
+static int xgbe_set_gmii_speed(struct xgbe_prv_data *pdata)
+{
+ XGMAC_IOWRITE_BITS(pdata, MAC_TCR, SS, 0x3);
+
+ return 0;
+}
+
+static int xgbe_set_gmii_2500_speed(struct xgbe_prv_data *pdata)
+{
+ XGMAC_IOWRITE_BITS(pdata, MAC_TCR, SS, 0x2);
+
+ return 0;
+}
+
+static int xgbe_set_xgmii_speed(struct xgbe_prv_data *pdata)
+{
+ XGMAC_IOWRITE_BITS(pdata, MAC_TCR, SS, 0);
+
+ return 0;
+}
+
+static int xgbe_set_promiscuous_mode(struct xgbe_prv_data *pdata,
+ unsigned int enable)
+{
+ unsigned int val = enable ? 1 : 0;
+
+ if (XGMAC_IOREAD_BITS(pdata, MAC_PFR, PR) == val)
+ return 0;
+
+ DBGPR(" %s promiscuous mode\n", enable ? "entering" : "leaving");
+ XGMAC_IOWRITE_BITS(pdata, MAC_PFR, PR, val);
+
+ return 0;
+}
+
+static int xgbe_set_all_multicast_mode(struct xgbe_prv_data *pdata,
+ unsigned int enable)
+{
+ unsigned int val = enable ? 1 : 0;
+
+ if (XGMAC_IOREAD_BITS(pdata, MAC_PFR, PM) == val)
+ return 0;
+
+ DBGPR(" %s allmulti mode\n", enable ? "entering" : "leaving");
+ XGMAC_IOWRITE_BITS(pdata, MAC_PFR, PM, val);
+
+ return 0;
+}
+
+static int xgbe_set_addn_mac_addrs(struct xgbe_prv_data *pdata,
+ unsigned int am_mode)
+{
+ struct netdev_hw_addr *ha;
+ unsigned int mac_reg;
+ unsigned int mac_addr_hi, mac_addr_lo;
+ u8 *mac_addr;
+ unsigned int i;
+
+ XGMAC_IOWRITE_BITS(pdata, MAC_PFR, HUC, 0);
+ XGMAC_IOWRITE_BITS(pdata, MAC_PFR, HMC, 0);
+
+ i = 0;
+ mac_reg = MAC_MACA1HR;
+
+ netdev_for_each_uc_addr(ha, pdata->netdev) {
+ mac_addr_lo = 0;
+ mac_addr_hi = 0;
+ mac_addr = (u8 *)&mac_addr_lo;
+ mac_addr[0] = ha->addr[0];
+ mac_addr[1] = ha->addr[1];
+ mac_addr[2] = ha->addr[2];
+ mac_addr[3] = ha->addr[3];
+ mac_addr = (u8 *)&mac_addr_hi;
+ mac_addr[0] = ha->addr[4];
+ mac_addr[1] = ha->addr[5];
+
+ DBGPR(" adding unicast address %pM at 0x%04x\n",
+ ha->addr, mac_reg);
+
+ XGMAC_SET_BITS(mac_addr_hi, MAC_MACA1HR, AE, 1);
+
+ XGMAC_IOWRITE(pdata, mac_reg, mac_addr_hi);
+ mac_reg += MAC_MACA_INC;
+ XGMAC_IOWRITE(pdata, mac_reg, mac_addr_lo);
+ mac_reg += MAC_MACA_INC;
+
+ i++;
+ }
+
+ if (!am_mode) {
+ netdev_for_each_mc_addr(ha, pdata->netdev) {
+ mac_addr_lo = 0;
+ mac_addr_hi = 0;
+ mac_addr = (u8 *)&mac_addr_lo;
+ mac_addr[0] = ha->addr[0];
+ mac_addr[1] = ha->addr[1];
+ mac_addr[2] = ha->addr[2];
+ mac_addr[3] = ha->addr[3];
+ mac_addr = (u8 *)&mac_addr_hi;
+ mac_addr[0] = ha->addr[4];
+ mac_addr[1] = ha->addr[5];
+
+ DBGPR(" adding multicast address %pM at 0x%04x\n",
+ ha->addr, mac_reg);
+
+ XGMAC_SET_BITS(mac_addr_hi, MAC_MACA1HR, AE, 1);
+
+ XGMAC_IOWRITE(pdata, mac_reg, mac_addr_hi);
+ mac_reg += MAC_MACA_INC;
+ XGMAC_IOWRITE(pdata, mac_reg, mac_addr_lo);
+ mac_reg += MAC_MACA_INC;
+
+ i++;
+ }
+ }
+
+ /* Clear remaining additional MAC address entries */
+ for (; i < pdata->hw_feat.addn_mac; i++) {
+ XGMAC_IOWRITE(pdata, mac_reg, 0);
+ mac_reg += MAC_MACA_INC;
+ XGMAC_IOWRITE(pdata, mac_reg, 0);
+ mac_reg += MAC_MACA_INC;
+ }
+
+ return 0;
+}
+
+static int xgbe_set_mac_address(struct xgbe_prv_data *pdata, u8 *addr)
+{
+ unsigned int mac_addr_hi, mac_addr_lo;
+
+ mac_addr_hi = (addr[5] << 8) | (addr[4] << 0);
+ mac_addr_lo = (addr[3] << 24) | (addr[2] << 16) |
+ (addr[1] << 8) | (addr[0] << 0);
+
+ XGMAC_IOWRITE(pdata, MAC_MACA0HR, mac_addr_hi);
+ XGMAC_IOWRITE(pdata, MAC_MACA0LR, mac_addr_lo);
+
+ return 0;
+}
+
+static int xgbe_read_mmd_regs(struct xgbe_prv_data *pdata, int prtad,
+ int mmd_reg)
+{
+ unsigned int mmd_address;
+ int mmd_data;
+
+ if (mmd_reg & MII_ADDR_C45)
+ mmd_address = mmd_reg & ~MII_ADDR_C45;
+ else
+ mmd_address = (pdata->mdio_mmd << 16) | (mmd_reg & 0xffff);
+
+ /* The PCS registers are accessed using mmio. The underlying APB3
+ * management interface uses indirect addressing to access the MMD
+ * register sets. This requires accessing of the PCS register in two
+ * phases, an address phase and a data phase.
+ *
+ * The mmio interface is based on 32-bit offsets and values. All
+ * register offsets must therefore be adjusted by left shifting the
+ * offset 2 bits and reading 32 bits of data.
+ */
+ mutex_lock(&pdata->xpcs_mutex);
+ XPCS_IOWRITE(pdata, PCS_MMD_SELECT << 2, mmd_address >> 8);
+ mmd_data = XPCS_IOREAD(pdata, (mmd_address & 0xff) << 2);
+ mutex_unlock(&pdata->xpcs_mutex);
+
+ return mmd_data;
+}
+
+static void xgbe_write_mmd_regs(struct xgbe_prv_data *pdata, int prtad,
+ int mmd_reg, int mmd_data)
+{
+ unsigned int mmd_address;
+
+ if (mmd_reg & MII_ADDR_C45)
+ mmd_address = mmd_reg & ~MII_ADDR_C45;
+ else
+ mmd_address = (pdata->mdio_mmd << 16) | (mmd_reg & 0xffff);
+
+ /* The PCS registers are accessed using mmio. The underlying APB3
+ * management interface uses indirect addressing to access the MMD
+ * register sets. This requires accessing of the PCS register in two
+ * phases, an address phase and a data phase.
+ *
+ * The mmio interface is based on 32-bit offsets and values. All
+ * register offsets must therefore be adjusted by left shifting the
+ * offset 2 bits and reading 32 bits of data.
+ */
+ mutex_lock(&pdata->xpcs_mutex);
+ XPCS_IOWRITE(pdata, PCS_MMD_SELECT << 2, mmd_address >> 8);
+ XPCS_IOWRITE(pdata, (mmd_address & 0xff) << 2, mmd_data);
+ mutex_unlock(&pdata->xpcs_mutex);
+}
+
+static int xgbe_tx_complete(struct xgbe_ring_desc *rdesc)
+{
+ return !XGMAC_GET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, OWN);
+}
+
+static int xgbe_disable_rx_csum(struct xgbe_prv_data *pdata)
+{
+ XGMAC_IOWRITE_BITS(pdata, MAC_RCR, IPC, 0);
+
+ return 0;
+}
+
+static int xgbe_enable_rx_csum(struct xgbe_prv_data *pdata)
+{
+ XGMAC_IOWRITE_BITS(pdata, MAC_RCR, IPC, 1);
+
+ return 0;
+}
+
+static int xgbe_enable_rx_vlan_stripping(struct xgbe_prv_data *pdata)
+{
+ /* Put the VLAN tag in the Rx descriptor */
+ XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, EVLRXS, 1);
+
+ /* Don't check the VLAN type */
+ XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, DOVLTC, 1);
+
+ /* Check only C-TAG (0x8100) packets */
+ XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, ERSVLM, 0);
+
+ /* Don't consider an S-TAG (0x88A8) packet as a VLAN packet */
+ XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, ESVL, 0);
+
+ /* Enable VLAN tag stripping */
+ XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, EVLS, 0x3);
+
+ return 0;
+}
+
+static int xgbe_disable_rx_vlan_stripping(struct xgbe_prv_data *pdata)
+{
+ XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, EVLS, 0);
+
+ return 0;
+}
+
+static void xgbe_tx_desc_reset(struct xgbe_ring_data *rdata)
+{
+ struct xgbe_ring_desc *rdesc = rdata->rdesc;
+
+ /* Reset the Tx descriptor
+ * Set buffer 1 (lo) address to zero
+ * Set buffer 1 (hi) address to zero
+ * Reset all other control bits (IC, TTSE, B2L & B1L)
+ * Reset all other control bits (OWN, CTXT, FD, LD, CPC, CIC, etc)
+ */
+ rdesc->desc0 = 0;
+ rdesc->desc1 = 0;
+ rdesc->desc2 = 0;
+ rdesc->desc3 = 0;
+}
+
+static void xgbe_tx_desc_init(struct xgbe_channel *channel)
+{
+ struct xgbe_ring *ring = channel->tx_ring;
+ struct xgbe_ring_data *rdata;
+ struct xgbe_ring_desc *rdesc;
+ int i;
+ int start_index = ring->cur;
+
+ DBGPR("-->tx_desc_init\n");
+
+ /* Initialze all descriptors */
+ for (i = 0; i < ring->rdesc_count; i++) {
+ rdata = GET_DESC_DATA(ring, i);
+ rdesc = rdata->rdesc;
+
+ /* Initialize Tx descriptor
+ * Set buffer 1 (lo) address to zero
+ * Set buffer 1 (hi) address to zero
+ * Reset all other control bits (IC, TTSE, B2L & B1L)
+ * Reset all other control bits (OWN, CTXT, FD, LD, CPC, CIC,
+ * etc)
+ */
+ rdesc->desc0 = 0;
+ rdesc->desc1 = 0;
+ rdesc->desc2 = 0;
+ rdesc->desc3 = 0;
+ }
+
+ /* Make sure everything is written to the descriptor(s) before
+ * telling the device about them
+ */
+ wmb();
+
+ /* Update the total number of Tx descriptors */
+ XGMAC_DMA_IOWRITE(channel, DMA_CH_TDRLR, ring->rdesc_count - 1);
+
+ /* Update the starting address of descriptor ring */
+ rdata = GET_DESC_DATA(ring, start_index);
+ XGMAC_DMA_IOWRITE(channel, DMA_CH_TDLR_HI,
+ upper_32_bits(rdata->rdesc_dma));
+ XGMAC_DMA_IOWRITE(channel, DMA_CH_TDLR_LO,
+ lower_32_bits(rdata->rdesc_dma));
+
+ DBGPR("<--tx_desc_init\n");
+}
+
+static void xgbe_rx_desc_reset(struct xgbe_ring_data *rdata)
+{
+ struct xgbe_ring_desc *rdesc = rdata->rdesc;
+
+ /* Reset the Rx descriptor
+ * Set buffer 1 (lo) address to dma address (lo)
+ * Set buffer 1 (hi) address to dma address (hi)
+ * Set buffer 2 (lo) address to zero
+ * Set buffer 2 (hi) address to zero and set control bits
+ * OWN and INTE
+ */
+ rdesc->desc0 = cpu_to_le32(lower_32_bits(rdata->skb_dma));
+ rdesc->desc1 = cpu_to_le32(upper_32_bits(rdata->skb_dma));
+ rdesc->desc2 = 0;
+
+ rdesc->desc3 = 0;
+ if (rdata->interrupt)
+ XGMAC_SET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, INTE, 1);
+
+ /* Since the Rx DMA engine is likely running, make sure everything
+ * is written to the descriptor(s) before setting the OWN bit
+ * for the descriptor
+ */
+ wmb();
+
+ XGMAC_SET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, OWN, 1);
+
+ /* Make sure ownership is written to the descriptor */
+ wmb();
+}
+
+static void xgbe_rx_desc_init(struct xgbe_channel *channel)
+{
+ struct xgbe_prv_data *pdata = channel->pdata;
+ struct xgbe_ring *ring = channel->rx_ring;
+ struct xgbe_ring_data *rdata;
+ struct xgbe_ring_desc *rdesc;
+ unsigned int start_index = ring->cur;
+ unsigned int rx_coalesce, rx_frames;
+ unsigned int i;
+
+ DBGPR("-->rx_desc_init\n");
+
+ rx_coalesce = (pdata->rx_riwt || pdata->rx_frames) ? 1 : 0;
+ rx_frames = pdata->rx_frames;
+
+ /* Initialize all descriptors */
+ for (i = 0; i < ring->rdesc_count; i++) {
+ rdata = GET_DESC_DATA(ring, i);
+ rdesc = rdata->rdesc;
+
+ /* Initialize Rx descriptor
+ * Set buffer 1 (lo) address to dma address (lo)
+ * Set buffer 1 (hi) address to dma address (hi)
+ * Set buffer 2 (lo) address to zero
+ * Set buffer 2 (hi) address to zero and set control
+ * bits OWN and INTE appropriateley
+ */
+ rdesc->desc0 = cpu_to_le32(lower_32_bits(rdata->skb_dma));
+ rdesc->desc1 = cpu_to_le32(upper_32_bits(rdata->skb_dma));
+ rdesc->desc2 = 0;
+ rdesc->desc3 = 0;
+ XGMAC_SET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, OWN, 1);
+ XGMAC_SET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, INTE, 1);
+ rdata->interrupt = 1;
+ if (rx_coalesce && (!rx_frames || ((i + 1) % rx_frames))) {
+ /* Clear interrupt on completion bit */
+ XGMAC_SET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, INTE,
+ 0);
+ rdata->interrupt = 0;
+ }
+ }
+
+ /* Make sure everything is written to the descriptors before
+ * telling the device about them
+ */
+ wmb();
+
+ /* Update the total number of Rx descriptors */
+ XGMAC_DMA_IOWRITE(channel, DMA_CH_RDRLR, ring->rdesc_count - 1);
+
+ /* Update the starting address of descriptor ring */
+ rdata = GET_DESC_DATA(ring, start_index);
+ XGMAC_DMA_IOWRITE(channel, DMA_CH_RDLR_HI,
+ upper_32_bits(rdata->rdesc_dma));
+ XGMAC_DMA_IOWRITE(channel, DMA_CH_RDLR_LO,
+ lower_32_bits(rdata->rdesc_dma));
+
+ /* Update the Rx Descriptor Tail Pointer */
+ rdata = GET_DESC_DATA(ring, start_index + ring->rdesc_count - 1);
+ XGMAC_DMA_IOWRITE(channel, DMA_CH_RDTR_LO,
+ lower_32_bits(rdata->rdesc_dma));
+
+ DBGPR("<--rx_desc_init\n");
+}
+
+static void xgbe_pre_xmit(struct xgbe_channel *channel)
+{
+ struct xgbe_prv_data *pdata = channel->pdata;
+ struct xgbe_ring *ring = channel->tx_ring;
+ struct xgbe_ring_data *rdata;
+ struct xgbe_ring_desc *rdesc;
+ struct xgbe_packet_data *packet = &ring->packet_data;
+ unsigned int csum, tso, vlan;
+ unsigned int tso_context, vlan_context;
+ unsigned int tx_coalesce, tx_frames;
+ int start_index = ring->cur;
+ int i;
+
+ DBGPR("-->xgbe_pre_xmit\n");
+
+ csum = XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES,
+ CSUM_ENABLE);
+ tso = XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES,
+ TSO_ENABLE);
+ vlan = XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES,
+ VLAN_CTAG);
+
+ if (tso && (packet->mss != ring->tx.cur_mss))
+ tso_context = 1;
+ else
+ tso_context = 0;
+
+ if (vlan && (packet->vlan_ctag != ring->tx.cur_vlan_ctag))
+ vlan_context = 1;
+ else
+ vlan_context = 0;
+
+ tx_coalesce = (pdata->tx_usecs || pdata->tx_frames) ? 1 : 0;
+ tx_frames = pdata->tx_frames;
+ if (tx_coalesce && !channel->tx_timer_active)
+ ring->coalesce_count = 0;
+
+ rdata = GET_DESC_DATA(ring, ring->cur);
+ rdesc = rdata->rdesc;
+
+ /* Create a context descriptor if this is a TSO packet */
+ if (tso_context || vlan_context) {
+ if (tso_context) {
+ DBGPR(" TSO context descriptor, mss=%u\n",
+ packet->mss);
+
+ /* Set the MSS size */
+ XGMAC_SET_BITS_LE(rdesc->desc2, TX_CONTEXT_DESC2,
+ MSS, packet->mss);
+
+ /* Mark it as a CONTEXT descriptor */
+ XGMAC_SET_BITS_LE(rdesc->desc3, TX_CONTEXT_DESC3,
+ CTXT, 1);
+
+ /* Indicate this descriptor contains the MSS */
+ XGMAC_SET_BITS_LE(rdesc->desc3, TX_CONTEXT_DESC3,
+ TCMSSV, 1);
+
+ ring->tx.cur_mss = packet->mss;
+ }
+
+ if (vlan_context) {
+ DBGPR(" VLAN context descriptor, ctag=%u\n",
+ packet->vlan_ctag);
+
+ /* Mark it as a CONTEXT descriptor */
+ XGMAC_SET_BITS_LE(rdesc->desc3, TX_CONTEXT_DESC3,
+ CTXT, 1);
+
+ /* Set the VLAN tag */
+ XGMAC_SET_BITS_LE(rdesc->desc3, TX_CONTEXT_DESC3,
+ VT, packet->vlan_ctag);
+
+ /* Indicate this descriptor contains the VLAN tag */
+ XGMAC_SET_BITS_LE(rdesc->desc3, TX_CONTEXT_DESC3,
+ VLTV, 1);
+
+ ring->tx.cur_vlan_ctag = packet->vlan_ctag;
+ }
+
+ ring->cur++;
+ rdata = GET_DESC_DATA(ring, ring->cur);
+ rdesc = rdata->rdesc;
+ }
+
+ /* Update buffer address (for TSO this is the header) */
+ rdesc->desc0 = cpu_to_le32(lower_32_bits(rdata->skb_dma));
+ rdesc->desc1 = cpu_to_le32(upper_32_bits(rdata->skb_dma));
+
+ /* Update the buffer length */
+ XGMAC_SET_BITS_LE(rdesc->desc2, TX_NORMAL_DESC2, HL_B1L,
+ rdata->skb_dma_len);
+
+ /* VLAN tag insertion check */
+ if (vlan)
+ XGMAC_SET_BITS_LE(rdesc->desc2, TX_NORMAL_DESC2, VTIR,
+ TX_NORMAL_DESC2_VLAN_INSERT);
+
+ /* Set IC bit based on Tx coalescing settings */
+ XGMAC_SET_BITS_LE(rdesc->desc2, TX_NORMAL_DESC2, IC, 1);
+ if (tx_coalesce && (!tx_frames ||
+ (++ring->coalesce_count % tx_frames)))
+ /* Clear IC bit */
+ XGMAC_SET_BITS_LE(rdesc->desc2, TX_NORMAL_DESC2, IC, 0);
+
+ /* Mark it as First Descriptor */
+ XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, FD, 1);
+
+ /* Mark it as a NORMAL descriptor */
+ XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, CTXT, 0);
+
+ /* Set OWN bit if not the first descriptor */
+ if (ring->cur != start_index)
+ XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, OWN, 1);
+
+ if (tso) {
+ /* Enable TSO */
+ XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, TSE, 1);
+ XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, TCPPL,
+ packet->tcp_payload_len);
+ XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, TCPHDRLEN,
+ packet->tcp_header_len / 4);
+ } else {
+ /* Enable CRC and Pad Insertion */
+ XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, CPC, 0);
+
+ /* Enable HW CSUM */
+ if (csum)
+ XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3,
+ CIC, 0x3);
+
+ /* Set the total length to be transmitted */
+ XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, FL,
+ packet->length);
+ }
+
+ for (i = ring->cur - start_index + 1; i < packet->rdesc_count; i++) {
+ ring->cur++;
+ rdata = GET_DESC_DATA(ring, ring->cur);
+ rdesc = rdata->rdesc;
+
+ /* Update buffer address */
+ rdesc->desc0 = cpu_to_le32(lower_32_bits(rdata->skb_dma));
+ rdesc->desc1 = cpu_to_le32(upper_32_bits(rdata->skb_dma));
+
+ /* Update the buffer length */
+ XGMAC_SET_BITS_LE(rdesc->desc2, TX_NORMAL_DESC2, HL_B1L,
+ rdata->skb_dma_len);
+
+ /* Set IC bit based on Tx coalescing settings */
+ XGMAC_SET_BITS_LE(rdesc->desc2, TX_NORMAL_DESC2, IC, 1);
+ if (tx_coalesce && (!tx_frames ||
+ (++ring->coalesce_count % tx_frames)))
+ /* Clear IC bit */
+ XGMAC_SET_BITS_LE(rdesc->desc2, TX_NORMAL_DESC2, IC, 0);
+
+ /* Set OWN bit */
+ XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, OWN, 1);
+
+ /* Mark it as NORMAL descriptor */
+ XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, CTXT, 0);
+
+ /* Enable HW CSUM */
+ if (csum)
+ XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3,
+ CIC, 0x3);
+ }
+
+ /* Set LAST bit for the last descriptor */
+ XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, LD, 1);
+
+ /* In case the Tx DMA engine is running, make sure everything
+ * is written to the descriptor(s) before setting the OWN bit
+ * for the first descriptor
+ */
+ wmb();
+
+ /* Set OWN bit for the first descriptor */
+ rdata = GET_DESC_DATA(ring, start_index);
+ rdesc = rdata->rdesc;
+ XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, OWN, 1);
+
+#ifdef XGMAC_ENABLE_TX_DESC_DUMP
+ xgbe_dump_tx_desc(ring, start_index, packet->rdesc_count, 1);
+#endif
+
+ /* Make sure ownership is written to the descriptor */
+ wmb();
+
+ /* Issue a poll command to Tx DMA by writing address
+ * of next immediate free descriptor */
+ ring->cur++;
+ rdata = GET_DESC_DATA(ring, ring->cur);
+ XGMAC_DMA_IOWRITE(channel, DMA_CH_TDTR_LO,
+ lower_32_bits(rdata->rdesc_dma));
+
+ /* Start the Tx coalescing timer */
+ if (tx_coalesce && !channel->tx_timer_active) {
+ channel->tx_timer_active = 1;
+ hrtimer_start(&channel->tx_timer,
+ ktime_set(0, pdata->tx_usecs * NSEC_PER_USEC),
+ HRTIMER_MODE_REL);
+ }
+
+ DBGPR(" %s: descriptors %u to %u written\n",
+ channel->name, start_index & (ring->rdesc_count - 1),
+ (ring->cur - 1) & (ring->rdesc_count - 1));
+
+ DBGPR("<--xgbe_pre_xmit\n");
+}
+
+static int xgbe_dev_read(struct xgbe_channel *channel)
+{
+ struct xgbe_ring *ring = channel->rx_ring;
+ struct xgbe_ring_data *rdata;
+ struct xgbe_ring_desc *rdesc;
+ struct xgbe_packet_data *packet = &ring->packet_data;
+ unsigned int err, etlt;
+
+ DBGPR("-->xgbe_dev_read: cur = %d\n", ring->cur);
+
+ rdata = GET_DESC_DATA(ring, ring->cur);
+ rdesc = rdata->rdesc;
+
+ /* Check for data availability */
+ if (XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, OWN))
+ return 1;
+
+#ifdef XGMAC_ENABLE_RX_DESC_DUMP
+ xgbe_dump_rx_desc(ring, rdesc, ring->cur);
+#endif
+
+ /* Get the packet length */
+ rdata->len = XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, PL);
+
+ if (!XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, LD)) {
+ /* Not all the data has been transferred for this packet */
+ XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
+ INCOMPLETE, 1);
+ return 0;
+ }
+
+ /* This is the last of the data for this packet */
+ XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
+ INCOMPLETE, 0);
+
+ /* Set checksum done indicator as appropriate */
+ if (channel->pdata->netdev->features & NETIF_F_RXCSUM)
+ XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
+ CSUM_DONE, 1);
+
+ /* Check for errors (only valid in last descriptor) */
+ err = XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, ES);
+ etlt = XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, ETLT);
+ DBGPR(" err=%u, etlt=%#x\n", err, etlt);
+
+ if (!err || (err && !etlt)) {
+ if (etlt == 0x09) {
+ XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
+ VLAN_CTAG, 1);
+ packet->vlan_ctag = XGMAC_GET_BITS_LE(rdesc->desc0,
+ RX_NORMAL_DESC0,
+ OVT);
+ DBGPR(" vlan-ctag=0x%04x\n", packet->vlan_ctag);
+ }
+ } else {
+ if ((etlt == 0x05) || (etlt == 0x06))
+ XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
+ CSUM_DONE, 0);
+ else
+ XGMAC_SET_BITS(packet->errors, RX_PACKET_ERRORS,
+ FRAME, 1);
+ }
+
+ DBGPR("<--xgbe_dev_read: %s - descriptor=%u (cur=%d)\n", channel->name,
+ ring->cur & (ring->rdesc_count - 1), ring->cur);
+
+ return 0;
+}
+
+static int xgbe_is_context_desc(struct xgbe_ring_desc *rdesc)
+{
+ /* Rx and Tx share CTXT bit, so check TDES3.CTXT bit */
+ return XGMAC_GET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, CTXT);
+}
+
+static int xgbe_is_last_desc(struct xgbe_ring_desc *rdesc)
+{
+ /* Rx and Tx share LD bit, so check TDES3.LD bit */
+ return XGMAC_GET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, LD);
+}
+
+static void xgbe_save_interrupt_status(struct xgbe_channel *channel,
+ enum xgbe_int_state int_state)
+{
+ unsigned int dma_ch_ier;
+
+ if (int_state == XGMAC_INT_STATE_SAVE) {
+ channel->saved_ier = XGMAC_DMA_IOREAD(channel, DMA_CH_IER);
+ channel->saved_ier &= DMA_INTERRUPT_MASK;
+ } else {
+ dma_ch_ier = XGMAC_DMA_IOREAD(channel, DMA_CH_IER);
+ dma_ch_ier |= channel->saved_ier;
+ XGMAC_DMA_IOWRITE(channel, DMA_CH_IER, dma_ch_ier);
+ }
+}
+
+static int xgbe_enable_int(struct xgbe_channel *channel,
+ enum xgbe_int int_id)
+{
+ switch (int_id) {
+ case XGMAC_INT_DMA_ISR_DC0IS:
+ XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_IER, TIE, 1);
+ break;
+ case XGMAC_INT_DMA_CH_SR_TI:
+ XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_IER, TIE, 1);
+ break;
+ case XGMAC_INT_DMA_CH_SR_TPS:
+ XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_IER, TXSE, 1);
+ break;
+ case XGMAC_INT_DMA_CH_SR_TBU:
+ XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_IER, TBUE, 1);
+ break;
+ case XGMAC_INT_DMA_CH_SR_RI:
+ XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_IER, RIE, 1);
+ break;
+ case XGMAC_INT_DMA_CH_SR_RBU:
+ XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_IER, RBUE, 1);
+ break;
+ case XGMAC_INT_DMA_CH_SR_RPS:
+ XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_IER, RSE, 1);
+ break;
+ case XGMAC_INT_DMA_CH_SR_FBE:
+ XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_IER, FBEE, 1);
+ break;
+ case XGMAC_INT_DMA_ALL:
+ xgbe_save_interrupt_status(channel, XGMAC_INT_STATE_RESTORE);
+ break;
+ default:
+ return -1;
+ }
+
+ return 0;
+}
+
+static int xgbe_disable_int(struct xgbe_channel *channel,
+ enum xgbe_int int_id)
+{
+ unsigned int dma_ch_ier;
+
+ switch (int_id) {
+ case XGMAC_INT_DMA_ISR_DC0IS:
+ XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_IER, TIE, 0);
+ break;
+ case XGMAC_INT_DMA_CH_SR_TI:
+ XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_IER, TIE, 0);
+ break;
+ case XGMAC_INT_DMA_CH_SR_TPS:
+ XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_IER, TXSE, 0);
+ break;
+ case XGMAC_INT_DMA_CH_SR_TBU:
+ XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_IER, TBUE, 0);
+ break;
+ case XGMAC_INT_DMA_CH_SR_RI:
+ XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_IER, RIE, 0);
+ break;
+ case XGMAC_INT_DMA_CH_SR_RBU:
+ XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_IER, RBUE, 0);
+ break;
+ case XGMAC_INT_DMA_CH_SR_RPS:
+ XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_IER, RSE, 0);
+ break;
+ case XGMAC_INT_DMA_CH_SR_FBE:
+ XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_IER, FBEE, 0);
+ break;
+ case XGMAC_INT_DMA_ALL:
+ xgbe_save_interrupt_status(channel, XGMAC_INT_STATE_SAVE);
+
+ dma_ch_ier = XGMAC_DMA_IOREAD(channel, DMA_CH_IER);
+ dma_ch_ier &= ~DMA_INTERRUPT_MASK;
+ XGMAC_DMA_IOWRITE(channel, DMA_CH_IER, dma_ch_ier);
+ break;
+ default:
+ return -1;
+ }
+
+ return 0;
+}
+
+static int xgbe_exit(struct xgbe_prv_data *pdata)
+{
+ unsigned int count = 2000;
+
+ DBGPR("-->xgbe_exit\n");
+
+ /* Issue a software reset */
+ XGMAC_IOWRITE_BITS(pdata, DMA_MR, SWR, 1);
+ usleep_range(10, 15);
+
+ /* Poll Until Poll Condition */
+ while (count-- && XGMAC_IOREAD_BITS(pdata, DMA_MR, SWR))
+ usleep_range(500, 600);
+
+ if (!count)
+ return -EBUSY;
+
+ DBGPR("<--xgbe_exit\n");
+
+ return 0;
+}
+
+static int xgbe_flush_tx_queues(struct xgbe_prv_data *pdata)
+{
+ unsigned int i, count;
+
+ for (i = 0; i < pdata->hw_feat.tx_q_cnt; i++)
+ XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, FTQ, 1);
+
+ /* Poll Until Poll Condition */
+ for (i = 0; i < pdata->hw_feat.tx_q_cnt; i++) {
+ count = 2000;
+ while (count-- && XGMAC_MTL_IOREAD_BITS(pdata, i,
+ MTL_Q_TQOMR, FTQ))
+ usleep_range(500, 600);
+
+ if (!count)
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+static void xgbe_config_dma_bus(struct xgbe_prv_data *pdata)
+{
+ /* Set enhanced addressing mode */
+ XGMAC_IOWRITE_BITS(pdata, DMA_SBMR, EAME, 1);
+
+ /* Set the System Bus mode */
+ XGMAC_IOWRITE_BITS(pdata, DMA_SBMR, UNDEF, 1);
+}
+
+static void xgbe_config_dma_cache(struct xgbe_prv_data *pdata)
+{
+ unsigned int arcache, awcache;
+
+ arcache = 0;
+ XGMAC_SET_BITS(arcache, DMA_AXIARCR, DRC, DMA_ARCACHE_SETTING);
+ XGMAC_SET_BITS(arcache, DMA_AXIARCR, DRD, DMA_ARDOMAIN_SETTING);
+ XGMAC_SET_BITS(arcache, DMA_AXIARCR, TEC, DMA_ARCACHE_SETTING);
+ XGMAC_SET_BITS(arcache, DMA_AXIARCR, TED, DMA_ARDOMAIN_SETTING);
+ XGMAC_SET_BITS(arcache, DMA_AXIARCR, THC, DMA_ARCACHE_SETTING);
+ XGMAC_SET_BITS(arcache, DMA_AXIARCR, THD, DMA_ARDOMAIN_SETTING);
+ XGMAC_IOWRITE(pdata, DMA_AXIARCR, arcache);
+
+ awcache = 0;
+ XGMAC_SET_BITS(awcache, DMA_AXIAWCR, DWC, DMA_AWCACHE_SETTING);
+ XGMAC_SET_BITS(awcache, DMA_AXIAWCR, DWD, DMA_AWDOMAIN_SETTING);
+ XGMAC_SET_BITS(awcache, DMA_AXIAWCR, RPC, DMA_AWCACHE_SETTING);
+ XGMAC_SET_BITS(awcache, DMA_AXIAWCR, RPD, DMA_AWDOMAIN_SETTING);
+ XGMAC_SET_BITS(awcache, DMA_AXIAWCR, RHC, DMA_AWCACHE_SETTING);
+ XGMAC_SET_BITS(awcache, DMA_AXIAWCR, RHD, DMA_AWDOMAIN_SETTING);
+ XGMAC_SET_BITS(awcache, DMA_AXIAWCR, TDC, DMA_AWCACHE_SETTING);
+ XGMAC_SET_BITS(awcache, DMA_AXIAWCR, TDD, DMA_AWDOMAIN_SETTING);
+ XGMAC_IOWRITE(pdata, DMA_AXIAWCR, awcache);
+}
+
+static void xgbe_config_mtl_mode(struct xgbe_prv_data *pdata)
+{
+ unsigned int i;
+
+ /* Set Tx to weighted round robin scheduling algorithm (when
+ * traffic class is using ETS algorithm)
+ */
+ XGMAC_IOWRITE_BITS(pdata, MTL_OMR, ETSALG, MTL_ETSALG_WRR);
+
+ /* Set Tx traffic classes to strict priority algorithm */
+ for (i = 0; i < XGBE_TC_CNT; i++)
+ XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_TC_ETSCR, TSA, MTL_TSA_SP);
+
+ /* Set Rx to strict priority algorithm */
+ XGMAC_IOWRITE_BITS(pdata, MTL_OMR, RAA, MTL_RAA_SP);
+}
+
+static unsigned int xgbe_calculate_per_queue_fifo(unsigned long fifo_size,
+ unsigned char queue_count)
+{
+ unsigned int q_fifo_size = 0;
+ enum xgbe_mtl_fifo_size p_fifo = XGMAC_MTL_FIFO_SIZE_256;
+
+ /* Calculate Tx/Rx fifo share per queue */
+ switch (fifo_size) {
+ case 0:
+ q_fifo_size = FIFO_SIZE_B(128);
+ break;
+ case 1:
+ q_fifo_size = FIFO_SIZE_B(256);
+ break;
+ case 2:
+ q_fifo_size = FIFO_SIZE_B(512);
+ break;
+ case 3:
+ q_fifo_size = FIFO_SIZE_KB(1);
+ break;
+ case 4:
+ q_fifo_size = FIFO_SIZE_KB(2);
+ break;
+ case 5:
+ q_fifo_size = FIFO_SIZE_KB(4);
+ break;
+ case 6:
+ q_fifo_size = FIFO_SIZE_KB(8);
+ break;
+ case 7:
+ q_fifo_size = FIFO_SIZE_KB(16);
+ break;
+ case 8:
+ q_fifo_size = FIFO_SIZE_KB(32);
+ break;
+ case 9:
+ q_fifo_size = FIFO_SIZE_KB(64);
+ break;
+ case 10:
+ q_fifo_size = FIFO_SIZE_KB(128);
+ break;
+ case 11:
+ q_fifo_size = FIFO_SIZE_KB(256);
+ break;
+ }
+ q_fifo_size = q_fifo_size / queue_count;
+
+ /* Set the queue fifo size programmable value */
+ if (q_fifo_size >= FIFO_SIZE_KB(256))
+ p_fifo = XGMAC_MTL_FIFO_SIZE_256K;
+ else if (q_fifo_size >= FIFO_SIZE_KB(128))
+ p_fifo = XGMAC_MTL_FIFO_SIZE_128K;
+ else if (q_fifo_size >= FIFO_SIZE_KB(64))
+ p_fifo = XGMAC_MTL_FIFO_SIZE_64K;
+ else if (q_fifo_size >= FIFO_SIZE_KB(32))
+ p_fifo = XGMAC_MTL_FIFO_SIZE_32K;
+ else if (q_fifo_size >= FIFO_SIZE_KB(16))
+ p_fifo = XGMAC_MTL_FIFO_SIZE_16K;
+ else if (q_fifo_size >= FIFO_SIZE_KB(8))
+ p_fifo = XGMAC_MTL_FIFO_SIZE_8K;
+ else if (q_fifo_size >= FIFO_SIZE_KB(4))
+ p_fifo = XGMAC_MTL_FIFO_SIZE_4K;
+ else if (q_fifo_size >= FIFO_SIZE_KB(2))
+ p_fifo = XGMAC_MTL_FIFO_SIZE_2K;
+ else if (q_fifo_size >= FIFO_SIZE_KB(1))
+ p_fifo = XGMAC_MTL_FIFO_SIZE_1K;
+ else if (q_fifo_size >= FIFO_SIZE_B(512))
+ p_fifo = XGMAC_MTL_FIFO_SIZE_512;
+ else if (q_fifo_size >= FIFO_SIZE_B(256))
+ p_fifo = XGMAC_MTL_FIFO_SIZE_256;
+
+ return p_fifo;
+}
+
+static void xgbe_config_tx_fifo_size(struct xgbe_prv_data *pdata)
+{
+ enum xgbe_mtl_fifo_size fifo_size;
+ unsigned int i;
+
+ fifo_size = xgbe_calculate_per_queue_fifo(pdata->hw_feat.tx_fifo_size,
+ pdata->hw_feat.tx_q_cnt);
+
+ for (i = 0; i < pdata->hw_feat.tx_q_cnt; i++)
+ XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TQS, fifo_size);
+
+ netdev_notice(pdata->netdev, "%d Tx queues, %d byte fifo per queue\n",
+ pdata->hw_feat.tx_q_cnt, ((fifo_size + 1) * 256));
+}
+
+static void xgbe_config_rx_fifo_size(struct xgbe_prv_data *pdata)
+{
+ enum xgbe_mtl_fifo_size fifo_size;
+ unsigned int i;
+
+ fifo_size = xgbe_calculate_per_queue_fifo(pdata->hw_feat.rx_fifo_size,
+ pdata->hw_feat.rx_q_cnt);
+
+ for (i = 0; i < pdata->hw_feat.rx_q_cnt; i++)
+ XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, RQS, fifo_size);
+
+ netdev_notice(pdata->netdev, "%d Rx queues, %d byte fifo per queue\n",
+ pdata->hw_feat.rx_q_cnt, ((fifo_size + 1) * 256));
+}
+
+static void xgbe_config_rx_queue_mapping(struct xgbe_prv_data *pdata)
+{
+ unsigned int i, reg, reg_val;
+ unsigned int q_count = pdata->hw_feat.rx_q_cnt;
+
+ /* Select dynamic mapping of MTL Rx queue to DMA Rx channel */
+ reg = MTL_RQDCM0R;
+ reg_val = 0;
+ for (i = 0; i < q_count;) {
+ reg_val |= (0x80 << ((i++ % MTL_RQDCM_Q_PER_REG) << 3));
+
+ if ((i % MTL_RQDCM_Q_PER_REG) && (i != q_count))
+ continue;
+
+ XGMAC_IOWRITE(pdata, reg, reg_val);
+
+ reg += MTL_RQDCM_INC;
+ reg_val = 0;
+ }
+}
+
+static void xgbe_config_flow_control_threshold(struct xgbe_prv_data *pdata)
+{
+ unsigned int i;
+
+ for (i = 0; i < pdata->hw_feat.rx_q_cnt; i++) {
+ /* Activate flow control when less than 4k left in fifo */
+ XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, RFA, 2);
+
+ /* De-activate flow control when more than 6k left in fifo */
+ XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, RFD, 4);
+ }
+}
+
+static void xgbe_config_mac_address(struct xgbe_prv_data *pdata)
+{
+ xgbe_set_mac_address(pdata, pdata->netdev->dev_addr);
+}
+
+static void xgbe_config_jumbo_enable(struct xgbe_prv_data *pdata)
+{
+ unsigned int val;
+
+ val = (pdata->netdev->mtu > XGMAC_STD_PACKET_MTU) ? 1 : 0;
+
+ XGMAC_IOWRITE_BITS(pdata, MAC_RCR, JE, val);
+}
+
+static void xgbe_config_checksum_offload(struct xgbe_prv_data *pdata)
+{
+ if (pdata->netdev->features & NETIF_F_RXCSUM)
+ xgbe_enable_rx_csum(pdata);
+ else
+ xgbe_disable_rx_csum(pdata);
+}
+
+static void xgbe_config_vlan_support(struct xgbe_prv_data *pdata)
+{
+ if (pdata->netdev->features & NETIF_F_HW_VLAN_CTAG_RX)
+ xgbe_enable_rx_vlan_stripping(pdata);
+ else
+ xgbe_disable_rx_vlan_stripping(pdata);
+}
+
+static void xgbe_tx_mmc_int(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_mmc_stats *stats = &pdata->mmc_stats;
+ unsigned int mmc_isr = XGMAC_IOREAD(pdata, MMC_TISR);
+
+ if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXOCTETCOUNT_GB))
+ stats->txoctetcount_gb +=
+ XGMAC_IOREAD(pdata, MMC_TXOCTETCOUNT_GB_LO);
+
+ if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXFRAMECOUNT_GB))
+ stats->txframecount_gb +=
+ XGMAC_IOREAD(pdata, MMC_TXFRAMECOUNT_GB_LO);
+
+ if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXBROADCASTFRAMES_G))
+ stats->txbroadcastframes_g +=
+ XGMAC_IOREAD(pdata, MMC_TXBROADCASTFRAMES_G_LO);
+
+ if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXMULTICASTFRAMES_G))
+ stats->txmulticastframes_g +=
+ XGMAC_IOREAD(pdata, MMC_TXMULTICASTFRAMES_G_LO);
+
+ if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TX64OCTETS_GB))
+ stats->tx64octets_gb +=
+ XGMAC_IOREAD(pdata, MMC_TX64OCTETS_GB_LO);
+
+ if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TX65TO127OCTETS_GB))
+ stats->tx65to127octets_gb +=
+ XGMAC_IOREAD(pdata, MMC_TX65TO127OCTETS_GB_LO);
+
+ if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TX128TO255OCTETS_GB))
+ stats->tx128to255octets_gb +=
+ XGMAC_IOREAD(pdata, MMC_TX128TO255OCTETS_GB_LO);
+
+ if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TX256TO511OCTETS_GB))
+ stats->tx256to511octets_gb +=
+ XGMAC_IOREAD(pdata, MMC_TX256TO511OCTETS_GB_LO);
+
+ if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TX512TO1023OCTETS_GB))
+ stats->tx512to1023octets_gb +=
+ XGMAC_IOREAD(pdata, MMC_TX512TO1023OCTETS_GB_LO);
+
+ if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TX1024TOMAXOCTETS_GB))
+ stats->tx1024tomaxoctets_gb +=
+ XGMAC_IOREAD(pdata, MMC_TX1024TOMAXOCTETS_GB_LO);
+
+ if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXUNICASTFRAMES_GB))
+ stats->txunicastframes_gb +=
+ XGMAC_IOREAD(pdata, MMC_TXUNICASTFRAMES_GB_LO);
+
+ if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXMULTICASTFRAMES_GB))
+ stats->txmulticastframes_gb +=
+ XGMAC_IOREAD(pdata, MMC_TXMULTICASTFRAMES_GB_LO);
+
+ if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXBROADCASTFRAMES_GB))
+ stats->txbroadcastframes_g +=
+ XGMAC_IOREAD(pdata, MMC_TXBROADCASTFRAMES_GB_LO);
+
+ if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXUNDERFLOWERROR))
+ stats->txunderflowerror +=
+ XGMAC_IOREAD(pdata, MMC_TXUNDERFLOWERROR_LO);
+
+ if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXOCTETCOUNT_G))
+ stats->txoctetcount_g +=
+ XGMAC_IOREAD(pdata, MMC_TXOCTETCOUNT_G_LO);
+
+ if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXFRAMECOUNT_G))
+ stats->txframecount_g +=
+ XGMAC_IOREAD(pdata, MMC_TXFRAMECOUNT_G_LO);
+
+ if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXPAUSEFRAMES))
+ stats->txpauseframes +=
+ XGMAC_IOREAD(pdata, MMC_TXPAUSEFRAMES_LO);
+
+ if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXVLANFRAMES_G))
+ stats->txvlanframes_g +=
+ XGMAC_IOREAD(pdata, MMC_TXVLANFRAMES_G_LO);
+}
+
+static void xgbe_rx_mmc_int(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_mmc_stats *stats = &pdata->mmc_stats;
+ unsigned int mmc_isr = XGMAC_IOREAD(pdata, MMC_RISR);
+
+ if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXFRAMECOUNT_GB))
+ stats->rxframecount_gb +=
+ XGMAC_IOREAD(pdata, MMC_RXFRAMECOUNT_GB_LO);
+
+ if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXOCTETCOUNT_GB))
+ stats->rxoctetcount_gb +=
+ XGMAC_IOREAD(pdata, MMC_RXOCTETCOUNT_GB_LO);
+
+ if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXOCTETCOUNT_G))
+ stats->rxoctetcount_g +=
+ XGMAC_IOREAD(pdata, MMC_RXOCTETCOUNT_G_LO);
+
+ if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXBROADCASTFRAMES_G))
+ stats->rxbroadcastframes_g +=
+ XGMAC_IOREAD(pdata, MMC_RXBROADCASTFRAMES_G_LO);
+
+ if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXMULTICASTFRAMES_G))
+ stats->rxmulticastframes_g +=
+ XGMAC_IOREAD(pdata, MMC_RXMULTICASTFRAMES_G_LO);
+
+ if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXCRCERROR))
+ stats->rxcrcerror +=
+ XGMAC_IOREAD(pdata, MMC_RXCRCERROR_LO);
+
+ if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXRUNTERROR))
+ stats->rxrunterror +=
+ XGMAC_IOREAD(pdata, MMC_RXRUNTERROR);
+
+ if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXJABBERERROR))
+ stats->rxjabbererror +=
+ XGMAC_IOREAD(pdata, MMC_RXJABBERERROR);
+
+ if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXUNDERSIZE_G))
+ stats->rxundersize_g +=
+ XGMAC_IOREAD(pdata, MMC_RXUNDERSIZE_G);
+
+ if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXOVERSIZE_G))
+ stats->rxoversize_g +=
+ XGMAC_IOREAD(pdata, MMC_RXOVERSIZE_G);
+
+ if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RX64OCTETS_GB))
+ stats->rx64octets_gb +=
+ XGMAC_IOREAD(pdata, MMC_RX64OCTETS_GB_LO);
+
+ if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RX65TO127OCTETS_GB))
+ stats->rx65to127octets_gb +=
+ XGMAC_IOREAD(pdata, MMC_RX65TO127OCTETS_GB_LO);
+
+ if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RX128TO255OCTETS_GB))
+ stats->rx128to255octets_gb +=
+ XGMAC_IOREAD(pdata, MMC_RX128TO255OCTETS_GB_LO);
+
+ if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RX256TO511OCTETS_GB))
+ stats->rx256to511octets_gb +=
+ XGMAC_IOREAD(pdata, MMC_RX256TO511OCTETS_GB_LO);
+
+ if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RX512TO1023OCTETS_GB))
+ stats->rx512to1023octets_gb +=
+ XGMAC_IOREAD(pdata, MMC_RX512TO1023OCTETS_GB_LO);
+
+ if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RX1024TOMAXOCTETS_GB))
+ stats->rx1024tomaxoctets_gb +=
+ XGMAC_IOREAD(pdata, MMC_RX1024TOMAXOCTETS_GB_LO);
+
+ if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXUNICASTFRAMES_G))
+ stats->rxunicastframes_g +=
+ XGMAC_IOREAD(pdata, MMC_RXUNICASTFRAMES_G_LO);
+
+ if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXLENGTHERROR))
+ stats->rxlengtherror +=
+ XGMAC_IOREAD(pdata, MMC_RXLENGTHERROR_LO);
+
+ if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXOUTOFRANGETYPE))
+ stats->rxoutofrangetype +=
+ XGMAC_IOREAD(pdata, MMC_RXOUTOFRANGETYPE_LO);
+
+ if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXPAUSEFRAMES))
+ stats->rxpauseframes +=
+ XGMAC_IOREAD(pdata, MMC_RXPAUSEFRAMES_LO);
+
+ if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXFIFOOVERFLOW))
+ stats->rxfifooverflow +=
+ XGMAC_IOREAD(pdata, MMC_RXFIFOOVERFLOW_LO);
+
+ if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXVLANFRAMES_GB))
+ stats->rxvlanframes_gb +=
+ XGMAC_IOREAD(pdata, MMC_RXVLANFRAMES_GB_LO);
+
+ if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXWATCHDOGERROR))
+ stats->rxwatchdogerror +=
+ XGMAC_IOREAD(pdata, MMC_RXWATCHDOGERROR);
+}
+
+static void xgbe_read_mmc_stats(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_mmc_stats *stats = &pdata->mmc_stats;
+
+ /* Freeze counters */
+ XGMAC_IOWRITE_BITS(pdata, MMC_CR, MCF, 1);
+
+ stats->txoctetcount_gb +=
+ XGMAC_IOREAD(pdata, MMC_TXOCTETCOUNT_GB_LO);
+
+ stats->txframecount_gb +=
+ XGMAC_IOREAD(pdata, MMC_TXFRAMECOUNT_GB_LO);
+
+ stats->txbroadcastframes_g +=
+ XGMAC_IOREAD(pdata, MMC_TXBROADCASTFRAMES_G_LO);
+
+ stats->txmulticastframes_g +=
+ XGMAC_IOREAD(pdata, MMC_TXMULTICASTFRAMES_G_LO);
+
+ stats->tx64octets_gb +=
+ XGMAC_IOREAD(pdata, MMC_TX64OCTETS_GB_LO);
+
+ stats->tx65to127octets_gb +=
+ XGMAC_IOREAD(pdata, MMC_TX65TO127OCTETS_GB_LO);
+
+ stats->tx128to255octets_gb +=
+ XGMAC_IOREAD(pdata, MMC_TX128TO255OCTETS_GB_LO);
+
+ stats->tx256to511octets_gb +=
+ XGMAC_IOREAD(pdata, MMC_TX256TO511OCTETS_GB_LO);
+
+ stats->tx512to1023octets_gb +=
+ XGMAC_IOREAD(pdata, MMC_TX512TO1023OCTETS_GB_LO);
+
+ stats->tx1024tomaxoctets_gb +=
+ XGMAC_IOREAD(pdata, MMC_TX1024TOMAXOCTETS_GB_LO);
+
+ stats->txunicastframes_gb +=
+ XGMAC_IOREAD(pdata, MMC_TXUNICASTFRAMES_GB_LO);
+
+ stats->txmulticastframes_gb +=
+ XGMAC_IOREAD(pdata, MMC_TXMULTICASTFRAMES_GB_LO);
+
+ stats->txbroadcastframes_g +=
+ XGMAC_IOREAD(pdata, MMC_TXBROADCASTFRAMES_GB_LO);
+
+ stats->txunderflowerror +=
+ XGMAC_IOREAD(pdata, MMC_TXUNDERFLOWERROR_LO);
+
+ stats->txoctetcount_g +=
+ XGMAC_IOREAD(pdata, MMC_TXOCTETCOUNT_G_LO);
+
+ stats->txframecount_g +=
+ XGMAC_IOREAD(pdata, MMC_TXFRAMECOUNT_G_LO);
+
+ stats->txpauseframes +=
+ XGMAC_IOREAD(pdata, MMC_TXPAUSEFRAMES_LO);
+
+ stats->txvlanframes_g +=
+ XGMAC_IOREAD(pdata, MMC_TXVLANFRAMES_G_LO);
+
+ stats->rxframecount_gb +=
+ XGMAC_IOREAD(pdata, MMC_RXFRAMECOUNT_GB_LO);
+
+ stats->rxoctetcount_gb +=
+ XGMAC_IOREAD(pdata, MMC_RXOCTETCOUNT_GB_LO);
+
+ stats->rxoctetcount_g +=
+ XGMAC_IOREAD(pdata, MMC_RXOCTETCOUNT_G_LO);
+
+ stats->rxbroadcastframes_g +=
+ XGMAC_IOREAD(pdata, MMC_RXBROADCASTFRAMES_G_LO);
+
+ stats->rxmulticastframes_g +=
+ XGMAC_IOREAD(pdata, MMC_RXMULTICASTFRAMES_G_LO);
+
+ stats->rxcrcerror +=
+ XGMAC_IOREAD(pdata, MMC_RXCRCERROR_LO);
+
+ stats->rxrunterror +=
+ XGMAC_IOREAD(pdata, MMC_RXRUNTERROR);
+
+ stats->rxjabbererror +=
+ XGMAC_IOREAD(pdata, MMC_RXJABBERERROR);
+
+ stats->rxundersize_g +=
+ XGMAC_IOREAD(pdata, MMC_RXUNDERSIZE_G);
+
+ stats->rxoversize_g +=
+ XGMAC_IOREAD(pdata, MMC_RXOVERSIZE_G);
+
+ stats->rx64octets_gb +=
+ XGMAC_IOREAD(pdata, MMC_RX64OCTETS_GB_LO);
+
+ stats->rx65to127octets_gb +=
+ XGMAC_IOREAD(pdata, MMC_RX65TO127OCTETS_GB_LO);
+
+ stats->rx128to255octets_gb +=
+ XGMAC_IOREAD(pdata, MMC_RX128TO255OCTETS_GB_LO);
+
+ stats->rx256to511octets_gb +=
+ XGMAC_IOREAD(pdata, MMC_RX256TO511OCTETS_GB_LO);
+
+ stats->rx512to1023octets_gb +=
+ XGMAC_IOREAD(pdata, MMC_RX512TO1023OCTETS_GB_LO);
+
+ stats->rx1024tomaxoctets_gb +=
+ XGMAC_IOREAD(pdata, MMC_RX1024TOMAXOCTETS_GB_LO);
+
+ stats->rxunicastframes_g +=
+ XGMAC_IOREAD(pdata, MMC_RXUNICASTFRAMES_G_LO);
+
+ stats->rxlengtherror +=
+ XGMAC_IOREAD(pdata, MMC_RXLENGTHERROR_LO);
+
+ stats->rxoutofrangetype +=
+ XGMAC_IOREAD(pdata, MMC_RXOUTOFRANGETYPE_LO);
+
+ stats->rxpauseframes +=
+ XGMAC_IOREAD(pdata, MMC_RXPAUSEFRAMES_LO);
+
+ stats->rxfifooverflow +=
+ XGMAC_IOREAD(pdata, MMC_RXFIFOOVERFLOW_LO);
+
+ stats->rxvlanframes_gb +=
+ XGMAC_IOREAD(pdata, MMC_RXVLANFRAMES_GB_LO);
+
+ stats->rxwatchdogerror +=
+ XGMAC_IOREAD(pdata, MMC_RXWATCHDOGERROR);
+
+ /* Un-freeze counters */
+ XGMAC_IOWRITE_BITS(pdata, MMC_CR, MCF, 0);
+}
+
+static void xgbe_config_mmc(struct xgbe_prv_data *pdata)
+{
+ /* Set counters to reset on read */
+ XGMAC_IOWRITE_BITS(pdata, MMC_CR, ROR, 1);
+
+ /* Reset the counters */
+ XGMAC_IOWRITE_BITS(pdata, MMC_CR, CR, 1);
+}
+
+static void xgbe_enable_tx(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_channel *channel;
+ unsigned int i;
+
+ /* Enable each Tx DMA channel */
+ channel = pdata->channel;
+ for (i = 0; i < pdata->channel_count; i++, channel++) {
+ if (!channel->tx_ring)
+ break;
+
+ XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_TCR, ST, 1);
+ }
+
+ /* Enable each Tx queue */
+ for (i = 0; i < pdata->hw_feat.tx_q_cnt; i++)
+ XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TXQEN,
+ MTL_Q_ENABLED);
+
+ /* Enable MAC Tx */
+ XGMAC_IOWRITE_BITS(pdata, MAC_TCR, TE, 1);
+}
+
+static void xgbe_disable_tx(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_channel *channel;
+ unsigned int i;
+
+ /* Disable MAC Tx */
+ XGMAC_IOWRITE_BITS(pdata, MAC_TCR, TE, 0);
+
+ /* Disable each Tx queue */
+ for (i = 0; i < pdata->hw_feat.tx_q_cnt; i++)
+ XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TXQEN, 0);
+
+ /* Disable each Tx DMA channel */
+ channel = pdata->channel;
+ for (i = 0; i < pdata->channel_count; i++, channel++) {
+ if (!channel->tx_ring)
+ break;
+
+ XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_TCR, ST, 0);
+ }
+}
+
+static void xgbe_enable_rx(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_channel *channel;
+ unsigned int reg_val, i;
+
+ /* Enable each Rx DMA channel */
+ channel = pdata->channel;
+ for (i = 0; i < pdata->channel_count; i++, channel++) {
+ if (!channel->rx_ring)
+ break;
+
+ XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_RCR, SR, 1);
+ }
+
+ /* Enable each Rx queue */
+ reg_val = 0;
+ for (i = 0; i < pdata->hw_feat.rx_q_cnt; i++)
+ reg_val |= (0x02 << (i << 1));
+ XGMAC_IOWRITE(pdata, MAC_RQC0R, reg_val);
+
+ /* Enable MAC Rx */
+ XGMAC_IOWRITE_BITS(pdata, MAC_RCR, DCRCC, 1);
+ XGMAC_IOWRITE_BITS(pdata, MAC_RCR, CST, 1);
+ XGMAC_IOWRITE_BITS(pdata, MAC_RCR, ACS, 1);
+ XGMAC_IOWRITE_BITS(pdata, MAC_RCR, RE, 1);
+}
+
+static void xgbe_disable_rx(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_channel *channel;
+ unsigned int i;
+
+ /* Disable MAC Rx */
+ XGMAC_IOWRITE_BITS(pdata, MAC_RCR, DCRCC, 0);
+ XGMAC_IOWRITE_BITS(pdata, MAC_RCR, CST, 0);
+ XGMAC_IOWRITE_BITS(pdata, MAC_RCR, ACS, 0);
+ XGMAC_IOWRITE_BITS(pdata, MAC_RCR, RE, 0);
+
+ /* Disable each Rx queue */
+ XGMAC_IOWRITE(pdata, MAC_RQC0R, 0);
+
+ /* Disable each Rx DMA channel */
+ channel = pdata->channel;
+ for (i = 0; i < pdata->channel_count; i++, channel++) {
+ if (!channel->rx_ring)
+ break;
+
+ XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_RCR, SR, 0);
+ }
+}
+
+static void xgbe_powerup_tx(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_channel *channel;
+ unsigned int i;
+
+ /* Enable each Tx DMA channel */
+ channel = pdata->channel;
+ for (i = 0; i < pdata->channel_count; i++, channel++) {
+ if (!channel->tx_ring)
+ break;
+
+ XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_TCR, ST, 1);
+ }
+
+ /* Enable MAC Tx */
+ XGMAC_IOWRITE_BITS(pdata, MAC_TCR, TE, 1);
+}
+
+static void xgbe_powerdown_tx(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_channel *channel;
+ unsigned int i;
+
+ /* Disable MAC Tx */
+ XGMAC_IOWRITE_BITS(pdata, MAC_TCR, TE, 0);
+
+ /* Disable each Tx DMA channel */
+ channel = pdata->channel;
+ for (i = 0; i < pdata->channel_count; i++, channel++) {
+ if (!channel->tx_ring)
+ break;
+
+ XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_TCR, ST, 0);
+ }
+}
+
+static void xgbe_powerup_rx(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_channel *channel;
+ unsigned int i;
+
+ /* Enable each Rx DMA channel */
+ channel = pdata->channel;
+ for (i = 0; i < pdata->channel_count; i++, channel++) {
+ if (!channel->rx_ring)
+ break;
+
+ XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_RCR, SR, 1);
+ }
+}
+
+static void xgbe_powerdown_rx(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_channel *channel;
+ unsigned int i;
+
+ /* Disable each Rx DMA channel */
+ channel = pdata->channel;
+ for (i = 0; i < pdata->channel_count; i++, channel++) {
+ if (!channel->rx_ring)
+ break;
+
+ XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_RCR, SR, 0);
+ }
+}
+
+static int xgbe_init(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_desc_if *desc_if = &pdata->desc_if;
+ int ret;
+
+ DBGPR("-->xgbe_init\n");
+
+ /* Flush Tx queues */
+ ret = xgbe_flush_tx_queues(pdata);
+ if (ret)
+ return ret;
+
+ /*
+ * Initialize DMA related features
+ */
+ xgbe_config_dma_bus(pdata);
+ xgbe_config_dma_cache(pdata);
+ xgbe_config_osp_mode(pdata);
+ xgbe_config_pblx8(pdata);
+ xgbe_config_tx_pbl_val(pdata);
+ xgbe_config_rx_pbl_val(pdata);
+ xgbe_config_rx_coalesce(pdata);
+ xgbe_config_tx_coalesce(pdata);
+ xgbe_config_rx_buffer_size(pdata);
+ xgbe_config_tso_mode(pdata);
+ desc_if->wrapper_tx_desc_init(pdata);
+ desc_if->wrapper_rx_desc_init(pdata);
+ xgbe_enable_dma_interrupts(pdata);
+
+ /*
+ * Initialize MTL related features
+ */
+ xgbe_config_mtl_mode(pdata);
+ xgbe_config_rx_queue_mapping(pdata);
+ /*TODO: Program the priorities mapped to the Selected Traffic Classes
+ in MTL_TC_Prty_Map0-3 registers */
+ xgbe_config_tsf_mode(pdata, pdata->tx_sf_mode);
+ xgbe_config_rsf_mode(pdata, pdata->rx_sf_mode);
+ xgbe_config_tx_threshold(pdata, pdata->tx_threshold);
+ xgbe_config_rx_threshold(pdata, pdata->rx_threshold);
+ xgbe_config_tx_fifo_size(pdata);
+ xgbe_config_rx_fifo_size(pdata);
+ xgbe_config_flow_control_threshold(pdata);
+ /*TODO: Queue to Traffic Class Mapping (Q2TCMAP) */
+ /*TODO: Error Packet and undersized good Packet forwarding enable
+ (FEP and FUP)
+ */
+ xgbe_enable_mtl_interrupts(pdata);
+
+ /* Transmit Class Weight */
+ XGMAC_IOWRITE_BITS(pdata, MTL_Q_TCQWR, QW, 0x10);
+
+ /*
+ * Initialize MAC related features
+ */
+ xgbe_config_mac_address(pdata);
+ xgbe_config_jumbo_enable(pdata);
+ xgbe_config_flow_control(pdata);
+ xgbe_config_checksum_offload(pdata);
+ xgbe_config_vlan_support(pdata);
+ xgbe_config_mmc(pdata);
+ xgbe_enable_mac_interrupts(pdata);
+
+ DBGPR("<--xgbe_init\n");
+
+ return 0;
+}
+
+void xgbe_init_function_ptrs_dev(struct xgbe_hw_if *hw_if)
+{
+ DBGPR("-->xgbe_init_function_ptrs\n");
+
+ hw_if->tx_complete = xgbe_tx_complete;
+
+ hw_if->set_promiscuous_mode = xgbe_set_promiscuous_mode;
+ hw_if->set_all_multicast_mode = xgbe_set_all_multicast_mode;
+ hw_if->set_addn_mac_addrs = xgbe_set_addn_mac_addrs;
+ hw_if->set_mac_address = xgbe_set_mac_address;
+
+ hw_if->enable_rx_csum = xgbe_enable_rx_csum;
+ hw_if->disable_rx_csum = xgbe_disable_rx_csum;
+
+ hw_if->enable_rx_vlan_stripping = xgbe_enable_rx_vlan_stripping;
+ hw_if->disable_rx_vlan_stripping = xgbe_disable_rx_vlan_stripping;
+
+ hw_if->read_mmd_regs = xgbe_read_mmd_regs;
+ hw_if->write_mmd_regs = xgbe_write_mmd_regs;
+
+ hw_if->set_gmii_speed = xgbe_set_gmii_speed;
+ hw_if->set_gmii_2500_speed = xgbe_set_gmii_2500_speed;
+ hw_if->set_xgmii_speed = xgbe_set_xgmii_speed;
+
+ hw_if->enable_tx = xgbe_enable_tx;
+ hw_if->disable_tx = xgbe_disable_tx;
+ hw_if->enable_rx = xgbe_enable_rx;
+ hw_if->disable_rx = xgbe_disable_rx;
+
+ hw_if->powerup_tx = xgbe_powerup_tx;
+ hw_if->powerdown_tx = xgbe_powerdown_tx;
+ hw_if->powerup_rx = xgbe_powerup_rx;
+ hw_if->powerdown_rx = xgbe_powerdown_rx;
+
+ hw_if->pre_xmit = xgbe_pre_xmit;
+ hw_if->dev_read = xgbe_dev_read;
+ hw_if->enable_int = xgbe_enable_int;
+ hw_if->disable_int = xgbe_disable_int;
+ hw_if->init = xgbe_init;
+ hw_if->exit = xgbe_exit;
+
+ /* Descriptor related Sequences have to be initialized here */
+ hw_if->tx_desc_init = xgbe_tx_desc_init;
+ hw_if->rx_desc_init = xgbe_rx_desc_init;
+ hw_if->tx_desc_reset = xgbe_tx_desc_reset;
+ hw_if->rx_desc_reset = xgbe_rx_desc_reset;
+ hw_if->is_last_desc = xgbe_is_last_desc;
+ hw_if->is_context_desc = xgbe_is_context_desc;
+
+ /* For FLOW ctrl */
+ hw_if->config_tx_flow_control = xgbe_config_tx_flow_control;
+ hw_if->config_rx_flow_control = xgbe_config_rx_flow_control;
+
+ /* For RX coalescing */
+ hw_if->config_rx_coalesce = xgbe_config_rx_coalesce;
+ hw_if->config_tx_coalesce = xgbe_config_tx_coalesce;
+ hw_if->usec_to_riwt = xgbe_usec_to_riwt;
+ hw_if->riwt_to_usec = xgbe_riwt_to_usec;
+
+ /* For RX and TX threshold config */
+ hw_if->config_rx_threshold = xgbe_config_rx_threshold;
+ hw_if->config_tx_threshold = xgbe_config_tx_threshold;
+
+ /* For RX and TX Store and Forward Mode config */
+ hw_if->config_rsf_mode = xgbe_config_rsf_mode;
+ hw_if->config_tsf_mode = xgbe_config_tsf_mode;
+
+ /* For TX DMA Operating on Second Frame config */
+ hw_if->config_osp_mode = xgbe_config_osp_mode;
+
+ /* For RX and TX PBL config */
+ hw_if->config_rx_pbl_val = xgbe_config_rx_pbl_val;
+ hw_if->get_rx_pbl_val = xgbe_get_rx_pbl_val;
+ hw_if->config_tx_pbl_val = xgbe_config_tx_pbl_val;
+ hw_if->get_tx_pbl_val = xgbe_get_tx_pbl_val;
+ hw_if->config_pblx8 = xgbe_config_pblx8;
+
+ /* For MMC statistics support */
+ hw_if->tx_mmc_int = xgbe_tx_mmc_int;
+ hw_if->rx_mmc_int = xgbe_rx_mmc_int;
+ hw_if->read_mmc_stats = xgbe_read_mmc_stats;
+
+ DBGPR("<--xgbe_init_function_ptrs\n");
+}
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
new file mode 100644
index 000000000000..cfe3d93b5f52
--- /dev/null
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
@@ -0,0 +1,1351 @@
+/*
+ * AMD 10Gb Ethernet driver
+ *
+ * This file is available to you under your choice of the following two
+ * licenses:
+ *
+ * License 1: GPLv2
+ *
+ * Copyright (c) 2014 Advanced Micro Devices, Inc.
+ *
+ * This file is free software; you may copy, redistribute and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 2 of the License, or (at
+ * your option) any later version.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ * This file incorporates work covered by the following copyright and
+ * permission notice:
+ * The Synopsys DWC ETHER XGMAC Software Driver and documentation
+ * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
+ * Inc. unless otherwise expressly agreed to in writing between Synopsys
+ * and you.
+ *
+ * The Software IS NOT an item of Licensed Software or Licensed Product
+ * under any End User Software License Agreement or Agreement for Licensed
+ * Product with Synopsys or any supplement thereto. Permission is hereby
+ * granted, free of charge, to any person obtaining a copy of this software
+ * annotated with this license and the Software, to deal in the Software
+ * without restriction, including without limitation the rights to use,
+ * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+ * of the Software, and to permit persons to whom the Software is furnished
+ * to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
+ * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
+ * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *
+ * License 2: Modified BSD
+ *
+ * Copyright (c) 2014 Advanced Micro Devices, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Advanced Micro Devices, Inc. nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * This file incorporates work covered by the following copyright and
+ * permission notice:
+ * The Synopsys DWC ETHER XGMAC Software Driver and documentation
+ * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
+ * Inc. unless otherwise expressly agreed to in writing between Synopsys
+ * and you.
+ *
+ * The Software IS NOT an item of Licensed Software or Licensed Product
+ * under any End User Software License Agreement or Agreement for Licensed
+ * Product with Synopsys or any supplement thereto. Permission is hereby
+ * granted, free of charge, to any person obtaining a copy of this software
+ * annotated with this license and the Software, to deal in the Software
+ * without restriction, including without limitation the rights to use,
+ * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+ * of the Software, and to permit persons to whom the Software is furnished
+ * to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
+ * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
+ * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/spinlock.h>
+#include <linux/tcp.h>
+#include <linux/if_vlan.h>
+#include <linux/phy.h>
+#include <net/busy_poll.h>
+#include <linux/clk.h>
+#include <linux/if_ether.h>
+
+#include "xgbe.h"
+#include "xgbe-common.h"
+
+
+static int xgbe_poll(struct napi_struct *, int);
+static void xgbe_set_rx_mode(struct net_device *);
+
+static inline unsigned int xgbe_tx_avail_desc(struct xgbe_ring *ring)
+{
+ return (ring->rdesc_count - (ring->cur - ring->dirty));
+}
+
+static int xgbe_calc_rx_buf_size(struct net_device *netdev, unsigned int mtu)
+{
+ unsigned int rx_buf_size;
+
+ if (mtu > XGMAC_JUMBO_PACKET_MTU) {
+ netdev_alert(netdev, "MTU exceeds maximum supported value\n");
+ return -EINVAL;
+ }
+
+ rx_buf_size = mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
+ if (rx_buf_size < RX_MIN_BUF_SIZE)
+ rx_buf_size = RX_MIN_BUF_SIZE;
+ rx_buf_size = (rx_buf_size + RX_BUF_ALIGN - 1) & ~(RX_BUF_ALIGN - 1);
+
+ return rx_buf_size;
+}
+
+static void xgbe_enable_rx_tx_ints(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_hw_if *hw_if = &pdata->hw_if;
+ struct xgbe_channel *channel;
+ unsigned int i;
+
+ channel = pdata->channel;
+ for (i = 0; i < pdata->channel_count; i++, channel++) {
+ if (channel->tx_ring)
+ hw_if->enable_int(channel,
+ XGMAC_INT_DMA_CH_SR_TI);
+ if (channel->rx_ring)
+ hw_if->enable_int(channel,
+ XGMAC_INT_DMA_CH_SR_RI);
+ }
+}
+
+static void xgbe_disable_rx_tx_ints(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_hw_if *hw_if = &pdata->hw_if;
+ struct xgbe_channel *channel;
+ unsigned int i;
+
+ channel = pdata->channel;
+ for (i = 0; i < pdata->channel_count; i++, channel++) {
+ if (channel->tx_ring)
+ hw_if->disable_int(channel,
+ XGMAC_INT_DMA_CH_SR_TI);
+ if (channel->rx_ring)
+ hw_if->disable_int(channel,
+ XGMAC_INT_DMA_CH_SR_RI);
+ }
+}
+
+static irqreturn_t xgbe_isr(int irq, void *data)
+{
+ struct xgbe_prv_data *pdata = data;
+ struct xgbe_hw_if *hw_if = &pdata->hw_if;
+ struct xgbe_channel *channel;
+ unsigned int dma_isr, dma_ch_isr;
+ unsigned int mac_isr;
+ unsigned int i;
+
+ /* The DMA interrupt status register also reports MAC and MTL
+ * interrupts. So for polling mode, we just need to check for
+ * this register to be non-zero
+ */
+ dma_isr = XGMAC_IOREAD(pdata, DMA_ISR);
+ if (!dma_isr)
+ goto isr_done;
+
+ DBGPR("-->xgbe_isr\n");
+
+ DBGPR(" DMA_ISR = %08x\n", dma_isr);
+ DBGPR(" DMA_DS0 = %08x\n", XGMAC_IOREAD(pdata, DMA_DSR0));
+ DBGPR(" DMA_DS1 = %08x\n", XGMAC_IOREAD(pdata, DMA_DSR1));
+
+ for (i = 0; i < pdata->channel_count; i++) {
+ if (!(dma_isr & (1 << i)))
+ continue;
+
+ channel = pdata->channel + i;
+
+ dma_ch_isr = XGMAC_DMA_IOREAD(channel, DMA_CH_SR);
+ DBGPR(" DMA_CH%u_ISR = %08x\n", i, dma_ch_isr);
+
+ if (XGMAC_GET_BITS(dma_ch_isr, DMA_CH_SR, TI) ||
+ XGMAC_GET_BITS(dma_ch_isr, DMA_CH_SR, RI)) {
+ if (napi_schedule_prep(&pdata->napi)) {
+ /* Disable Tx and Rx interrupts */
+ xgbe_disable_rx_tx_ints(pdata);
+
+ /* Turn on polling */
+ __napi_schedule(&pdata->napi);
+ }
+ }
+
+ /* Restart the device on a Fatal Bus Error */
+ if (XGMAC_GET_BITS(dma_ch_isr, DMA_CH_SR, FBE))
+ schedule_work(&pdata->restart_work);
+
+ /* Clear all interrupt signals */
+ XGMAC_DMA_IOWRITE(channel, DMA_CH_SR, dma_ch_isr);
+ }
+
+ if (XGMAC_GET_BITS(dma_isr, DMA_ISR, MACIS)) {
+ mac_isr = XGMAC_IOREAD(pdata, MAC_ISR);
+
+ if (XGMAC_GET_BITS(mac_isr, MAC_ISR, MMCTXIS))
+ hw_if->tx_mmc_int(pdata);
+
+ if (XGMAC_GET_BITS(mac_isr, MAC_ISR, MMCRXIS))
+ hw_if->rx_mmc_int(pdata);
+ }
+
+ DBGPR(" DMA_ISR = %08x\n", XGMAC_IOREAD(pdata, DMA_ISR));
+
+ DBGPR("<--xgbe_isr\n");
+
+isr_done:
+ return IRQ_HANDLED;
+}
+
+static enum hrtimer_restart xgbe_tx_timer(struct hrtimer *timer)
+{
+ struct xgbe_channel *channel = container_of(timer,
+ struct xgbe_channel,
+ tx_timer);
+ struct xgbe_ring *ring = channel->tx_ring;
+ struct xgbe_prv_data *pdata = channel->pdata;
+ unsigned long flags;
+
+ DBGPR("-->xgbe_tx_timer\n");
+
+ spin_lock_irqsave(&ring->lock, flags);
+
+ if (napi_schedule_prep(&pdata->napi)) {
+ /* Disable Tx and Rx interrupts */
+ xgbe_disable_rx_tx_ints(pdata);
+
+ /* Turn on polling */
+ __napi_schedule(&pdata->napi);
+ }
+
+ channel->tx_timer_active = 0;
+
+ spin_unlock_irqrestore(&ring->lock, flags);
+
+ DBGPR("<--xgbe_tx_timer\n");
+
+ return HRTIMER_NORESTART;
+}
+
+static void xgbe_init_tx_timers(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_channel *channel;
+ unsigned int i;
+
+ DBGPR("-->xgbe_init_tx_timers\n");
+
+ channel = pdata->channel;
+ for (i = 0; i < pdata->channel_count; i++, channel++) {
+ if (!channel->tx_ring)
+ break;
+
+ DBGPR(" %s adding tx timer\n", channel->name);
+ hrtimer_init(&channel->tx_timer, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL);
+ channel->tx_timer.function = xgbe_tx_timer;
+ }
+
+ DBGPR("<--xgbe_init_tx_timers\n");
+}
+
+static void xgbe_stop_tx_timers(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_channel *channel;
+ unsigned int i;
+
+ DBGPR("-->xgbe_stop_tx_timers\n");
+
+ channel = pdata->channel;
+ for (i = 0; i < pdata->channel_count; i++, channel++) {
+ if (!channel->tx_ring)
+ break;
+
+ DBGPR(" %s deleting tx timer\n", channel->name);
+ channel->tx_timer_active = 0;
+ hrtimer_cancel(&channel->tx_timer);
+ }
+
+ DBGPR("<--xgbe_stop_tx_timers\n");
+}
+
+void xgbe_get_all_hw_features(struct xgbe_prv_data *pdata)
+{
+ unsigned int mac_hfr0, mac_hfr1, mac_hfr2;
+ struct xgbe_hw_features *hw_feat = &pdata->hw_feat;
+
+ DBGPR("-->xgbe_get_all_hw_features\n");
+
+ mac_hfr0 = XGMAC_IOREAD(pdata, MAC_HWF0R);
+ mac_hfr1 = XGMAC_IOREAD(pdata, MAC_HWF1R);
+ mac_hfr2 = XGMAC_IOREAD(pdata, MAC_HWF2R);
+
+ memset(hw_feat, 0, sizeof(*hw_feat));
+
+ /* Hardware feature register 0 */
+ hw_feat->gmii = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, GMIISEL);
+ hw_feat->vlhash = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, VLHASH);
+ hw_feat->sma = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, SMASEL);
+ hw_feat->rwk = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, RWKSEL);
+ hw_feat->mgk = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, MGKSEL);
+ hw_feat->mmc = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, MMCSEL);
+ hw_feat->aoe = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, ARPOFFSEL);
+ hw_feat->ts = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, TSSEL);
+ hw_feat->eee = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, EEESEL);
+ hw_feat->tx_coe = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, TXCOESEL);
+ hw_feat->rx_coe = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, RXCOESEL);
+ hw_feat->addn_mac = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R,
+ ADDMACADRSEL);
+ hw_feat->ts_src = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, TSSTSSEL);
+ hw_feat->sa_vlan_ins = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, SAVLANINS);
+
+ /* Hardware feature register 1 */
+ hw_feat->rx_fifo_size = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R,
+ RXFIFOSIZE);
+ hw_feat->tx_fifo_size = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R,
+ TXFIFOSIZE);
+ hw_feat->dcb = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, DCBEN);
+ hw_feat->sph = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, SPHEN);
+ hw_feat->tso = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, TSOEN);
+ hw_feat->dma_debug = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, DBGMEMA);
+ hw_feat->hash_table_size = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R,
+ HASHTBLSZ);
+ hw_feat->l3l4_filter_num = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R,
+ L3L4FNUM);
+
+ /* Hardware feature register 2 */
+ hw_feat->rx_q_cnt = XGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, RXQCNT);
+ hw_feat->tx_q_cnt = XGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, TXQCNT);
+ hw_feat->rx_ch_cnt = XGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, RXCHCNT);
+ hw_feat->tx_ch_cnt = XGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, TXCHCNT);
+ hw_feat->pps_out_num = XGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, PPSOUTNUM);
+ hw_feat->aux_snap_num = XGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, AUXSNAPNUM);
+
+ /* The Queue and Channel counts are zero based so increment them
+ * to get the actual number
+ */
+ hw_feat->rx_q_cnt++;
+ hw_feat->tx_q_cnt++;
+ hw_feat->rx_ch_cnt++;
+ hw_feat->tx_ch_cnt++;
+
+ DBGPR("<--xgbe_get_all_hw_features\n");
+}
+
+static void xgbe_napi_enable(struct xgbe_prv_data *pdata, unsigned int add)
+{
+ if (add)
+ netif_napi_add(pdata->netdev, &pdata->napi, xgbe_poll,
+ NAPI_POLL_WEIGHT);
+ napi_enable(&pdata->napi);
+}
+
+static void xgbe_napi_disable(struct xgbe_prv_data *pdata)
+{
+ napi_disable(&pdata->napi);
+}
+
+void xgbe_init_tx_coalesce(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_hw_if *hw_if = &pdata->hw_if;
+
+ DBGPR("-->xgbe_init_tx_coalesce\n");
+
+ pdata->tx_usecs = XGMAC_INIT_DMA_TX_USECS;
+ pdata->tx_frames = XGMAC_INIT_DMA_TX_FRAMES;
+
+ hw_if->config_tx_coalesce(pdata);
+
+ DBGPR("<--xgbe_init_tx_coalesce\n");
+}
+
+void xgbe_init_rx_coalesce(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_hw_if *hw_if = &pdata->hw_if;
+
+ DBGPR("-->xgbe_init_rx_coalesce\n");
+
+ pdata->rx_riwt = hw_if->usec_to_riwt(pdata, XGMAC_INIT_DMA_RX_USECS);
+ pdata->rx_frames = XGMAC_INIT_DMA_RX_FRAMES;
+
+ hw_if->config_rx_coalesce(pdata);
+
+ DBGPR("<--xgbe_init_rx_coalesce\n");
+}
+
+static void xgbe_free_tx_skbuff(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_desc_if *desc_if = &pdata->desc_if;
+ struct xgbe_channel *channel;
+ struct xgbe_ring *ring;
+ struct xgbe_ring_data *rdata;
+ unsigned int i, j;
+
+ DBGPR("-->xgbe_free_tx_skbuff\n");
+
+ channel = pdata->channel;
+ for (i = 0; i < pdata->channel_count; i++, channel++) {
+ ring = channel->tx_ring;
+ if (!ring)
+ break;
+
+ for (j = 0; j < ring->rdesc_count; j++) {
+ rdata = GET_DESC_DATA(ring, j);
+ desc_if->unmap_skb(pdata, rdata);
+ }
+ }
+
+ DBGPR("<--xgbe_free_tx_skbuff\n");
+}
+
+static void xgbe_free_rx_skbuff(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_desc_if *desc_if = &pdata->desc_if;
+ struct xgbe_channel *channel;
+ struct xgbe_ring *ring;
+ struct xgbe_ring_data *rdata;
+ unsigned int i, j;
+
+ DBGPR("-->xgbe_free_rx_skbuff\n");
+
+ channel = pdata->channel;
+ for (i = 0; i < pdata->channel_count; i++, channel++) {
+ ring = channel->rx_ring;
+ if (!ring)
+ break;
+
+ for (j = 0; j < ring->rdesc_count; j++) {
+ rdata = GET_DESC_DATA(ring, j);
+ desc_if->unmap_skb(pdata, rdata);
+ }
+ }
+
+ DBGPR("<--xgbe_free_rx_skbuff\n");
+}
+
+int xgbe_powerdown(struct net_device *netdev, unsigned int caller)
+{
+ struct xgbe_prv_data *pdata = netdev_priv(netdev);
+ struct xgbe_hw_if *hw_if = &pdata->hw_if;
+ unsigned long flags;
+
+ DBGPR("-->xgbe_powerdown\n");
+
+ if (!netif_running(netdev) ||
+ (caller == XGMAC_IOCTL_CONTEXT && pdata->power_down)) {
+ netdev_alert(netdev, "Device is already powered down\n");
+ DBGPR("<--xgbe_powerdown\n");
+ return -EINVAL;
+ }
+
+ phy_stop(pdata->phydev);
+
+ spin_lock_irqsave(&pdata->lock, flags);
+
+ if (caller == XGMAC_DRIVER_CONTEXT)
+ netif_device_detach(netdev);
+
+ netif_tx_stop_all_queues(netdev);
+ xgbe_napi_disable(pdata);
+
+ /* Powerdown Tx/Rx */
+ hw_if->powerdown_tx(pdata);
+ hw_if->powerdown_rx(pdata);
+
+ pdata->power_down = 1;
+
+ spin_unlock_irqrestore(&pdata->lock, flags);
+
+ DBGPR("<--xgbe_powerdown\n");
+
+ return 0;
+}
+
+int xgbe_powerup(struct net_device *netdev, unsigned int caller)
+{
+ struct xgbe_prv_data *pdata = netdev_priv(netdev);
+ struct xgbe_hw_if *hw_if = &pdata->hw_if;
+ unsigned long flags;
+
+ DBGPR("-->xgbe_powerup\n");
+
+ if (!netif_running(netdev) ||
+ (caller == XGMAC_IOCTL_CONTEXT && !pdata->power_down)) {
+ netdev_alert(netdev, "Device is already powered up\n");
+ DBGPR("<--xgbe_powerup\n");
+ return -EINVAL;
+ }
+
+ spin_lock_irqsave(&pdata->lock, flags);
+
+ pdata->power_down = 0;
+
+ phy_start(pdata->phydev);
+
+ /* Enable Tx/Rx */
+ hw_if->powerup_tx(pdata);
+ hw_if->powerup_rx(pdata);
+
+ if (caller == XGMAC_DRIVER_CONTEXT)
+ netif_device_attach(netdev);
+
+ xgbe_napi_enable(pdata, 0);
+ netif_tx_start_all_queues(netdev);
+
+ spin_unlock_irqrestore(&pdata->lock, flags);
+
+ DBGPR("<--xgbe_powerup\n");
+
+ return 0;
+}
+
+static int xgbe_start(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_hw_if *hw_if = &pdata->hw_if;
+ struct net_device *netdev = pdata->netdev;
+
+ DBGPR("-->xgbe_start\n");
+
+ xgbe_set_rx_mode(netdev);
+
+ hw_if->init(pdata);
+
+ phy_start(pdata->phydev);
+
+ hw_if->enable_tx(pdata);
+ hw_if->enable_rx(pdata);
+
+ xgbe_init_tx_timers(pdata);
+
+ xgbe_napi_enable(pdata, 1);
+ netif_tx_start_all_queues(netdev);
+
+ DBGPR("<--xgbe_start\n");
+
+ return 0;
+}
+
+static void xgbe_stop(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_hw_if *hw_if = &pdata->hw_if;
+ struct net_device *netdev = pdata->netdev;
+
+ DBGPR("-->xgbe_stop\n");
+
+ phy_stop(pdata->phydev);
+
+ netif_tx_stop_all_queues(netdev);
+ xgbe_napi_disable(pdata);
+
+ xgbe_stop_tx_timers(pdata);
+
+ hw_if->disable_tx(pdata);
+ hw_if->disable_rx(pdata);
+
+ DBGPR("<--xgbe_stop\n");
+}
+
+static void xgbe_restart_dev(struct xgbe_prv_data *pdata, unsigned int reset)
+{
+ struct xgbe_hw_if *hw_if = &pdata->hw_if;
+
+ DBGPR("-->xgbe_restart_dev\n");
+
+ /* If not running, "restart" will happen on open */
+ if (!netif_running(pdata->netdev))
+ return;
+
+ xgbe_stop(pdata);
+ synchronize_irq(pdata->irq_number);
+
+ xgbe_free_tx_skbuff(pdata);
+ xgbe_free_rx_skbuff(pdata);
+
+ /* Issue software reset to device if requested */
+ if (reset)
+ hw_if->exit(pdata);
+
+ xgbe_start(pdata);
+
+ DBGPR("<--xgbe_restart_dev\n");
+}
+
+static void xgbe_restart(struct work_struct *work)
+{
+ struct xgbe_prv_data *pdata = container_of(work,
+ struct xgbe_prv_data,
+ restart_work);
+
+ rtnl_lock();
+
+ xgbe_restart_dev(pdata, 1);
+
+ rtnl_unlock();
+}
+
+static void xgbe_prep_vlan(struct sk_buff *skb, struct xgbe_packet_data *packet)
+{
+ if (vlan_tx_tag_present(skb))
+ packet->vlan_ctag = vlan_tx_tag_get(skb);
+}
+
+static int xgbe_prep_tso(struct sk_buff *skb, struct xgbe_packet_data *packet)
+{
+ int ret;
+
+ if (!XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES,
+ TSO_ENABLE))
+ return 0;
+
+ ret = skb_cow_head(skb, 0);
+ if (ret)
+ return ret;
+
+ packet->header_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
+ packet->tcp_header_len = tcp_hdrlen(skb);
+ packet->tcp_payload_len = skb->len - packet->header_len;
+ packet->mss = skb_shinfo(skb)->gso_size;
+ DBGPR(" packet->header_len=%u\n", packet->header_len);
+ DBGPR(" packet->tcp_header_len=%u, packet->tcp_payload_len=%u\n",
+ packet->tcp_header_len, packet->tcp_payload_len);
+ DBGPR(" packet->mss=%u\n", packet->mss);
+
+ return 0;
+}
+
+static int xgbe_is_tso(struct sk_buff *skb)
+{
+ if (skb->ip_summed != CHECKSUM_PARTIAL)
+ return 0;
+
+ if (!skb_is_gso(skb))
+ return 0;
+
+ DBGPR(" TSO packet to be processed\n");
+
+ return 1;
+}
+
+static void xgbe_packet_info(struct xgbe_ring *ring, struct sk_buff *skb,
+ struct xgbe_packet_data *packet)
+{
+ struct skb_frag_struct *frag;
+ unsigned int context_desc;
+ unsigned int len;
+ unsigned int i;
+
+ context_desc = 0;
+ packet->rdesc_count = 0;
+
+ if (xgbe_is_tso(skb)) {
+ /* TSO requires an extra desriptor if mss is different */
+ if (skb_shinfo(skb)->gso_size != ring->tx.cur_mss) {
+ context_desc = 1;
+ packet->rdesc_count++;
+ }
+
+ /* TSO requires an extra desriptor for TSO header */
+ packet->rdesc_count++;
+
+ XGMAC_SET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES,
+ TSO_ENABLE, 1);
+ XGMAC_SET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES,
+ CSUM_ENABLE, 1);
+ } else if (skb->ip_summed == CHECKSUM_PARTIAL)
+ XGMAC_SET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES,
+ CSUM_ENABLE, 1);
+
+ if (vlan_tx_tag_present(skb)) {
+ /* VLAN requires an extra descriptor if tag is different */
+ if (vlan_tx_tag_get(skb) != ring->tx.cur_vlan_ctag)
+ /* We can share with the TSO context descriptor */
+ if (!context_desc) {
+ context_desc = 1;
+ packet->rdesc_count++;
+ }
+
+ XGMAC_SET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES,
+ VLAN_CTAG, 1);
+ }
+
+ for (len = skb_headlen(skb); len;) {
+ packet->rdesc_count++;
+ len -= min_t(unsigned int, len, TX_MAX_BUF_SIZE);
+ }
+
+ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ frag = &skb_shinfo(skb)->frags[i];
+ for (len = skb_frag_size(frag); len; ) {
+ packet->rdesc_count++;
+ len -= min_t(unsigned int, len, TX_MAX_BUF_SIZE);
+ }
+ }
+}
+
+static int xgbe_open(struct net_device *netdev)
+{
+ struct xgbe_prv_data *pdata = netdev_priv(netdev);
+ struct xgbe_hw_if *hw_if = &pdata->hw_if;
+ struct xgbe_desc_if *desc_if = &pdata->desc_if;
+ int ret;
+
+ DBGPR("-->xgbe_open\n");
+
+ /* Enable the clock */
+ ret = clk_prepare_enable(pdata->sysclock);
+ if (ret) {
+ netdev_alert(netdev, "clk_prepare_enable failed\n");
+ return ret;
+ }
+
+ /* Calculate the Rx buffer size before allocating rings */
+ ret = xgbe_calc_rx_buf_size(netdev, netdev->mtu);
+ if (ret < 0)
+ goto err_clk;
+ pdata->rx_buf_size = ret;
+
+ /* Allocate the ring descriptors and buffers */
+ ret = desc_if->alloc_ring_resources(pdata);
+ if (ret)
+ goto err_clk;
+
+ /* Initialize the device restart work struct */
+ INIT_WORK(&pdata->restart_work, xgbe_restart);
+
+ /* Request interrupts */
+ ret = devm_request_irq(pdata->dev, netdev->irq, xgbe_isr, 0,
+ netdev->name, pdata);
+ if (ret) {
+ netdev_alert(netdev, "error requesting irq %d\n",
+ pdata->irq_number);
+ goto err_irq;
+ }
+ pdata->irq_number = netdev->irq;
+
+ ret = xgbe_start(pdata);
+ if (ret)
+ goto err_start;
+
+ DBGPR("<--xgbe_open\n");
+
+ return 0;
+
+err_start:
+ hw_if->exit(pdata);
+
+ devm_free_irq(pdata->dev, pdata->irq_number, pdata);
+ pdata->irq_number = 0;
+
+err_irq:
+ desc_if->free_ring_resources(pdata);
+
+err_clk:
+ clk_disable_unprepare(pdata->sysclock);
+
+ return ret;
+}
+
+static int xgbe_close(struct net_device *netdev)
+{
+ struct xgbe_prv_data *pdata = netdev_priv(netdev);
+ struct xgbe_hw_if *hw_if = &pdata->hw_if;
+ struct xgbe_desc_if *desc_if = &pdata->desc_if;
+
+ DBGPR("-->xgbe_close\n");
+
+ /* Stop the device */
+ xgbe_stop(pdata);
+
+ /* Issue software reset to device */
+ hw_if->exit(pdata);
+
+ /* Free all the ring data */
+ desc_if->free_ring_resources(pdata);
+
+ /* Release the interrupt */
+ if (pdata->irq_number != 0) {
+ devm_free_irq(pdata->dev, pdata->irq_number, pdata);
+ pdata->irq_number = 0;
+ }
+
+ /* Disable the clock */
+ clk_disable_unprepare(pdata->sysclock);
+
+ DBGPR("<--xgbe_close\n");
+
+ return 0;
+}
+
+static int xgbe_xmit(struct sk_buff *skb, struct net_device *netdev)
+{
+ struct xgbe_prv_data *pdata = netdev_priv(netdev);
+ struct xgbe_hw_if *hw_if = &pdata->hw_if;
+ struct xgbe_desc_if *desc_if = &pdata->desc_if;
+ struct xgbe_channel *channel;
+ struct xgbe_ring *ring;
+ struct xgbe_packet_data *packet;
+ unsigned long flags;
+ int ret;
+
+ DBGPR("-->xgbe_xmit: skb->len = %d\n", skb->len);
+
+ channel = pdata->channel + skb->queue_mapping;
+ ring = channel->tx_ring;
+ packet = &ring->packet_data;
+
+ ret = NETDEV_TX_OK;
+
+ spin_lock_irqsave(&ring->lock, flags);
+
+ if (skb->len == 0) {
+ netdev_err(netdev, "empty skb received from stack\n");
+ dev_kfree_skb_any(skb);
+ goto tx_netdev_return;
+ }
+
+ /* Calculate preliminary packet info */
+ memset(packet, 0, sizeof(*packet));
+ xgbe_packet_info(ring, skb, packet);
+
+ /* Check that there are enough descriptors available */
+ if (packet->rdesc_count > xgbe_tx_avail_desc(ring)) {
+ DBGPR(" Tx queue stopped, not enough descriptors available\n");
+ netif_stop_subqueue(netdev, channel->queue_index);
+ ring->tx.queue_stopped = 1;
+ ret = NETDEV_TX_BUSY;
+ goto tx_netdev_return;
+ }
+
+ ret = xgbe_prep_tso(skb, packet);
+ if (ret) {
+ netdev_err(netdev, "error processing TSO packet\n");
+ dev_kfree_skb_any(skb);
+ goto tx_netdev_return;
+ }
+ xgbe_prep_vlan(skb, packet);
+
+ if (!desc_if->map_tx_skb(channel, skb)) {
+ dev_kfree_skb_any(skb);
+ goto tx_netdev_return;
+ }
+
+ /* Configure required descriptor fields for transmission */
+ hw_if->pre_xmit(channel);
+
+#ifdef XGMAC_ENABLE_TX_PKT_DUMP
+ xgbe_print_pkt(netdev, skb, true);
+#endif
+
+tx_netdev_return:
+ spin_unlock_irqrestore(&ring->lock, flags);
+
+ DBGPR("<--xgbe_xmit\n");
+
+ return ret;
+}
+
+static void xgbe_set_rx_mode(struct net_device *netdev)
+{
+ struct xgbe_prv_data *pdata = netdev_priv(netdev);
+ struct xgbe_hw_if *hw_if = &pdata->hw_if;
+ unsigned int pr_mode, am_mode;
+
+ DBGPR("-->xgbe_set_rx_mode\n");
+
+ pr_mode = ((netdev->flags & IFF_PROMISC) != 0);
+ am_mode = ((netdev->flags & IFF_ALLMULTI) != 0);
+
+ if (netdev_uc_count(netdev) > pdata->hw_feat.addn_mac)
+ pr_mode = 1;
+ if (netdev_mc_count(netdev) > pdata->hw_feat.addn_mac)
+ am_mode = 1;
+ if ((netdev_uc_count(netdev) + netdev_mc_count(netdev)) >
+ pdata->hw_feat.addn_mac)
+ pr_mode = 1;
+
+ hw_if->set_promiscuous_mode(pdata, pr_mode);
+ hw_if->set_all_multicast_mode(pdata, am_mode);
+ if (!pr_mode)
+ hw_if->set_addn_mac_addrs(pdata, am_mode);
+
+ DBGPR("<--xgbe_set_rx_mode\n");
+}
+
+static int xgbe_set_mac_address(struct net_device *netdev, void *addr)
+{
+ struct xgbe_prv_data *pdata = netdev_priv(netdev);
+ struct xgbe_hw_if *hw_if = &pdata->hw_if;
+ struct sockaddr *saddr = addr;
+
+ DBGPR("-->xgbe_set_mac_address\n");
+
+ if (!is_valid_ether_addr(saddr->sa_data))
+ return -EADDRNOTAVAIL;
+
+ memcpy(netdev->dev_addr, saddr->sa_data, netdev->addr_len);
+
+ hw_if->set_mac_address(pdata, netdev->dev_addr);
+
+ DBGPR("<--xgbe_set_mac_address\n");
+
+ return 0;
+}
+
+static int xgbe_change_mtu(struct net_device *netdev, int mtu)
+{
+ struct xgbe_prv_data *pdata = netdev_priv(netdev);
+ int ret;
+
+ DBGPR("-->xgbe_change_mtu\n");
+
+ ret = xgbe_calc_rx_buf_size(netdev, mtu);
+ if (ret < 0)
+ return ret;
+
+ pdata->rx_buf_size = ret;
+ netdev->mtu = mtu;
+
+ xgbe_restart_dev(pdata, 0);
+
+ DBGPR("<--xgbe_change_mtu\n");
+
+ return 0;
+}
+
+static struct rtnl_link_stats64 *xgbe_get_stats64(struct net_device *netdev,
+ struct rtnl_link_stats64 *s)
+{
+ struct xgbe_prv_data *pdata = netdev_priv(netdev);
+ struct xgbe_mmc_stats *pstats = &pdata->mmc_stats;
+
+ DBGPR("-->%s\n", __func__);
+
+ pdata->hw_if.read_mmc_stats(pdata);
+
+ s->rx_packets = pstats->rxframecount_gb;
+ s->rx_bytes = pstats->rxoctetcount_gb;
+ s->rx_errors = pstats->rxframecount_gb -
+ pstats->rxbroadcastframes_g -
+ pstats->rxmulticastframes_g -
+ pstats->rxunicastframes_g;
+ s->multicast = pstats->rxmulticastframes_g;
+ s->rx_length_errors = pstats->rxlengtherror;
+ s->rx_crc_errors = pstats->rxcrcerror;
+ s->rx_fifo_errors = pstats->rxfifooverflow;
+
+ s->tx_packets = pstats->txframecount_gb;
+ s->tx_bytes = pstats->txoctetcount_gb;
+ s->tx_errors = pstats->txframecount_gb - pstats->txframecount_g;
+ s->tx_dropped = netdev->stats.tx_dropped;
+
+ DBGPR("<--%s\n", __func__);
+
+ return s;
+}
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void xgbe_poll_controller(struct net_device *netdev)
+{
+ struct xgbe_prv_data *pdata = netdev_priv(netdev);
+
+ DBGPR("-->xgbe_poll_controller\n");
+
+ disable_irq(pdata->irq_number);
+
+ xgbe_isr(pdata->irq_number, pdata);
+
+ enable_irq(pdata->irq_number);
+
+ DBGPR("<--xgbe_poll_controller\n");
+}
+#endif /* End CONFIG_NET_POLL_CONTROLLER */
+
+static int xgbe_set_features(struct net_device *netdev,
+ netdev_features_t features)
+{
+ struct xgbe_prv_data *pdata = netdev_priv(netdev);
+ struct xgbe_hw_if *hw_if = &pdata->hw_if;
+ unsigned int rxcsum_enabled, rxvlan_enabled;
+
+ rxcsum_enabled = !!(pdata->netdev_features & NETIF_F_RXCSUM);
+ rxvlan_enabled = !!(pdata->netdev_features & NETIF_F_HW_VLAN_CTAG_RX);
+
+ if ((features & NETIF_F_RXCSUM) && !rxcsum_enabled) {
+ hw_if->enable_rx_csum(pdata);
+ netdev_alert(netdev, "state change - rxcsum enabled\n");
+ } else if (!(features & NETIF_F_RXCSUM) && rxcsum_enabled) {
+ hw_if->disable_rx_csum(pdata);
+ netdev_alert(netdev, "state change - rxcsum disabled\n");
+ }
+
+ if ((features & NETIF_F_HW_VLAN_CTAG_RX) && !rxvlan_enabled) {
+ hw_if->enable_rx_vlan_stripping(pdata);
+ netdev_alert(netdev, "state change - rxvlan enabled\n");
+ } else if (!(features & NETIF_F_HW_VLAN_CTAG_RX) && rxvlan_enabled) {
+ hw_if->disable_rx_vlan_stripping(pdata);
+ netdev_alert(netdev, "state change - rxvlan disabled\n");
+ }
+
+ pdata->netdev_features = features;
+
+ DBGPR("<--xgbe_set_features\n");
+
+ return 0;
+}
+
+static const struct net_device_ops xgbe_netdev_ops = {
+ .ndo_open = xgbe_open,
+ .ndo_stop = xgbe_close,
+ .ndo_start_xmit = xgbe_xmit,
+ .ndo_set_rx_mode = xgbe_set_rx_mode,
+ .ndo_set_mac_address = xgbe_set_mac_address,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_change_mtu = xgbe_change_mtu,
+ .ndo_get_stats64 = xgbe_get_stats64,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = xgbe_poll_controller,
+#endif
+ .ndo_set_features = xgbe_set_features,
+};
+
+struct net_device_ops *xgbe_get_netdev_ops(void)
+{
+ return (struct net_device_ops *)&xgbe_netdev_ops;
+}
+
+static int xgbe_tx_poll(struct xgbe_channel *channel)
+{
+ struct xgbe_prv_data *pdata = channel->pdata;
+ struct xgbe_hw_if *hw_if = &pdata->hw_if;
+ struct xgbe_desc_if *desc_if = &pdata->desc_if;
+ struct xgbe_ring *ring = channel->tx_ring;
+ struct xgbe_ring_data *rdata;
+ struct xgbe_ring_desc *rdesc;
+ struct net_device *netdev = pdata->netdev;
+ unsigned long flags;
+ int processed = 0;
+
+ DBGPR("-->xgbe_tx_poll\n");
+
+ /* Nothing to do if there isn't a Tx ring for this channel */
+ if (!ring)
+ return 0;
+
+ spin_lock_irqsave(&ring->lock, flags);
+
+ while ((processed < TX_DESC_MAX_PROC) && (ring->dirty < ring->cur)) {
+ rdata = GET_DESC_DATA(ring, ring->dirty);
+ rdesc = rdata->rdesc;
+
+ if (!hw_if->tx_complete(rdesc))
+ break;
+
+#ifdef XGMAC_ENABLE_TX_DESC_DUMP
+ xgbe_dump_tx_desc(ring, ring->dirty, 1, 0);
+#endif
+
+ /* Free the SKB and reset the descriptor for re-use */
+ desc_if->unmap_skb(pdata, rdata);
+ hw_if->tx_desc_reset(rdata);
+
+ processed++;
+ ring->dirty++;
+ }
+
+ if ((ring->tx.queue_stopped == 1) &&
+ (xgbe_tx_avail_desc(ring) > TX_DESC_MIN_FREE)) {
+ ring->tx.queue_stopped = 0;
+ netif_wake_subqueue(netdev, channel->queue_index);
+ }
+
+ DBGPR("<--xgbe_tx_poll: processed=%d\n", processed);
+
+ spin_unlock_irqrestore(&ring->lock, flags);
+
+ return processed;
+}
+
+static int xgbe_rx_poll(struct xgbe_channel *channel, int budget)
+{
+ struct xgbe_prv_data *pdata = channel->pdata;
+ struct xgbe_hw_if *hw_if = &pdata->hw_if;
+ struct xgbe_desc_if *desc_if = &pdata->desc_if;
+ struct xgbe_ring *ring = channel->rx_ring;
+ struct xgbe_ring_data *rdata;
+ struct xgbe_packet_data *packet;
+ struct net_device *netdev = pdata->netdev;
+ struct sk_buff *skb;
+ unsigned int incomplete, error;
+ unsigned int cur_len, put_len, max_len;
+ int received = 0;
+
+ DBGPR("-->xgbe_rx_poll: budget=%d\n", budget);
+
+ /* Nothing to do if there isn't a Rx ring for this channel */
+ if (!ring)
+ return 0;
+
+ packet = &ring->packet_data;
+ while (received < budget) {
+ DBGPR(" cur = %d\n", ring->cur);
+
+ /* Clear the packet data information */
+ memset(packet, 0, sizeof(*packet));
+ skb = NULL;
+ error = 0;
+ cur_len = 0;
+
+read_again:
+ rdata = GET_DESC_DATA(ring, ring->cur);
+
+ if (hw_if->dev_read(channel))
+ break;
+
+ received++;
+ ring->cur++;
+ ring->dirty++;
+
+ dma_unmap_single(pdata->dev, rdata->skb_dma,
+ rdata->skb_dma_len, DMA_FROM_DEVICE);
+ rdata->skb_dma = 0;
+
+ incomplete = XGMAC_GET_BITS(packet->attributes,
+ RX_PACKET_ATTRIBUTES,
+ INCOMPLETE);
+
+ /* Earlier error, just drain the remaining data */
+ if (incomplete && error)
+ goto read_again;
+
+ if (error || packet->errors) {
+ if (packet->errors)
+ DBGPR("Error in received packet\n");
+ dev_kfree_skb(skb);
+ continue;
+ }
+
+ put_len = rdata->len - cur_len;
+ if (skb) {
+ if (pskb_expand_head(skb, 0, put_len, GFP_ATOMIC)) {
+ DBGPR("pskb_expand_head error\n");
+ if (incomplete) {
+ error = 1;
+ goto read_again;
+ }
+
+ dev_kfree_skb(skb);
+ continue;
+ }
+ memcpy(skb_tail_pointer(skb), rdata->skb->data,
+ put_len);
+ } else {
+ skb = rdata->skb;
+ rdata->skb = NULL;
+ }
+ skb_put(skb, put_len);
+ cur_len += put_len;
+
+ if (incomplete)
+ goto read_again;
+
+ /* Be sure we don't exceed the configured MTU */
+ max_len = netdev->mtu + ETH_HLEN;
+ if (!(netdev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
+ (skb->protocol == htons(ETH_P_8021Q)))
+ max_len += VLAN_HLEN;
+
+ if (skb->len > max_len) {
+ DBGPR("packet length exceeds configured MTU\n");
+ dev_kfree_skb(skb);
+ continue;
+ }
+
+#ifdef XGMAC_ENABLE_RX_PKT_DUMP
+ xgbe_print_pkt(netdev, skb, false);
+#endif
+
+ skb_checksum_none_assert(skb);
+ if (XGMAC_GET_BITS(packet->attributes,
+ RX_PACKET_ATTRIBUTES, CSUM_DONE))
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+
+ if (XGMAC_GET_BITS(packet->attributes,
+ RX_PACKET_ATTRIBUTES, VLAN_CTAG))
+ __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
+ packet->vlan_ctag);
+
+ skb->dev = netdev;
+ skb->protocol = eth_type_trans(skb, netdev);
+ skb_record_rx_queue(skb, channel->queue_index);
+ skb_mark_napi_id(skb, &pdata->napi);
+
+ netdev->last_rx = jiffies;
+ napi_gro_receive(&pdata->napi, skb);
+ }
+
+ if (received) {
+ desc_if->realloc_skb(channel);
+
+ /* Update the Rx Tail Pointer Register with address of
+ * the last cleaned entry */
+ rdata = GET_DESC_DATA(ring, ring->rx.realloc_index - 1);
+ XGMAC_DMA_IOWRITE(channel, DMA_CH_RDTR_LO,
+ lower_32_bits(rdata->rdesc_dma));
+ }
+
+ DBGPR("<--xgbe_rx_poll: received = %d\n", received);
+
+ return received;
+}
+
+static int xgbe_poll(struct napi_struct *napi, int budget)
+{
+ struct xgbe_prv_data *pdata = container_of(napi, struct xgbe_prv_data,
+ napi);
+ struct xgbe_channel *channel;
+ int processed;
+ unsigned int i;
+
+ DBGPR("-->xgbe_poll: budget=%d\n", budget);
+
+ /* Cleanup Tx ring first */
+ channel = pdata->channel;
+ for (i = 0; i < pdata->channel_count; i++, channel++)
+ xgbe_tx_poll(channel);
+
+ /* Process Rx ring next */
+ processed = 0;
+ channel = pdata->channel;
+ for (i = 0; i < pdata->channel_count; i++, channel++)
+ processed += xgbe_rx_poll(channel, budget - processed);
+
+ /* If we processed everything, we are done */
+ if (processed < budget) {
+ /* Turn off polling */
+ napi_complete(napi);
+
+ /* Enable Tx and Rx interrupts */
+ xgbe_enable_rx_tx_ints(pdata);
+ }
+
+ DBGPR("<--xgbe_poll: received = %d\n", processed);
+
+ return processed;
+}
+
+void xgbe_dump_tx_desc(struct xgbe_ring *ring, unsigned int idx,
+ unsigned int count, unsigned int flag)
+{
+ struct xgbe_ring_data *rdata;
+ struct xgbe_ring_desc *rdesc;
+
+ while (count--) {
+ rdata = GET_DESC_DATA(ring, idx);
+ rdesc = rdata->rdesc;
+ DBGPR("TX_NORMAL_DESC[%d %s] = %08x:%08x:%08x:%08x\n", idx,
+ (flag == 1) ? "QUEUED FOR TX" : "TX BY DEVICE",
+ le32_to_cpu(rdesc->desc0), le32_to_cpu(rdesc->desc1),
+ le32_to_cpu(rdesc->desc2), le32_to_cpu(rdesc->desc3));
+ idx++;
+ }
+}
+
+void xgbe_dump_rx_desc(struct xgbe_ring *ring, struct xgbe_ring_desc *desc,
+ unsigned int idx)
+{
+ DBGPR("RX_NORMAL_DESC[%d RX BY DEVICE] = %08x:%08x:%08x:%08x\n", idx,
+ le32_to_cpu(desc->desc0), le32_to_cpu(desc->desc1),
+ le32_to_cpu(desc->desc2), le32_to_cpu(desc->desc3));
+}
+
+void xgbe_print_pkt(struct net_device *netdev, struct sk_buff *skb, bool tx_rx)
+{
+ struct ethhdr *eth = (struct ethhdr *)skb->data;
+ unsigned char *buf = skb->data;
+ unsigned char buffer[128];
+ unsigned int i, j;
+
+ netdev_alert(netdev, "\n************** SKB dump ****************\n");
+
+ netdev_alert(netdev, "%s packet of %d bytes\n",
+ (tx_rx ? "TX" : "RX"), skb->len);
+
+ netdev_alert(netdev, "Dst MAC addr: %pM\n", eth->h_dest);
+ netdev_alert(netdev, "Src MAC addr: %pM\n", eth->h_source);
+ netdev_alert(netdev, "Protocol: 0x%04hx\n", ntohs(eth->h_proto));
+
+ for (i = 0, j = 0; i < skb->len;) {
+ j += snprintf(buffer + j, sizeof(buffer) - j, "%02hhx",
+ buf[i++]);
+
+ if ((i % 32) == 0) {
+ netdev_alert(netdev, " 0x%04x: %s\n", i - 32, buffer);
+ j = 0;
+ } else if ((i % 16) == 0) {
+ buffer[j++] = ' ';
+ buffer[j++] = ' ';
+ } else if ((i % 4) == 0) {
+ buffer[j++] = ' ';
+ }
+ }
+ if (i % 32)
+ netdev_alert(netdev, " 0x%04x: %s\n", i - (i % 32), buffer);
+
+ netdev_alert(netdev, "\n************** SKB dump ****************\n");
+}
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c b/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c
new file mode 100644
index 000000000000..8909f2b51af1
--- /dev/null
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c
@@ -0,0 +1,510 @@
+/*
+ * AMD 10Gb Ethernet driver
+ *
+ * This file is available to you under your choice of the following two
+ * licenses:
+ *
+ * License 1: GPLv2
+ *
+ * Copyright (c) 2014 Advanced Micro Devices, Inc.
+ *
+ * This file is free software; you may copy, redistribute and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 2 of the License, or (at
+ * your option) any later version.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ * This file incorporates work covered by the following copyright and
+ * permission notice:
+ * The Synopsys DWC ETHER XGMAC Software Driver and documentation
+ * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
+ * Inc. unless otherwise expressly agreed to in writing between Synopsys
+ * and you.
+ *
+ * The Software IS NOT an item of Licensed Software or Licensed Product
+ * under any End User Software License Agreement or Agreement for Licensed
+ * Product with Synopsys or any supplement thereto. Permission is hereby
+ * granted, free of charge, to any person obtaining a copy of this software
+ * annotated with this license and the Software, to deal in the Software
+ * without restriction, including without limitation the rights to use,
+ * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+ * of the Software, and to permit persons to whom the Software is furnished
+ * to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
+ * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
+ * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *
+ * License 2: Modified BSD
+ *
+ * Copyright (c) 2014 Advanced Micro Devices, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Advanced Micro Devices, Inc. nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * This file incorporates work covered by the following copyright and
+ * permission notice:
+ * The Synopsys DWC ETHER XGMAC Software Driver and documentation
+ * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
+ * Inc. unless otherwise expressly agreed to in writing between Synopsys
+ * and you.
+ *
+ * The Software IS NOT an item of Licensed Software or Licensed Product
+ * under any End User Software License Agreement or Agreement for Licensed
+ * Product with Synopsys or any supplement thereto. Permission is hereby
+ * granted, free of charge, to any person obtaining a copy of this software
+ * annotated with this license and the Software, to deal in the Software
+ * without restriction, including without limitation the rights to use,
+ * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+ * of the Software, and to permit persons to whom the Software is furnished
+ * to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
+ * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
+ * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/spinlock.h>
+#include <linux/phy.h>
+
+#include "xgbe.h"
+#include "xgbe-common.h"
+
+
+struct xgbe_stats {
+ char stat_string[ETH_GSTRING_LEN];
+ int stat_size;
+ int stat_offset;
+};
+
+#define XGMAC_MMC_STAT(_string, _var) \
+ { _string, \
+ FIELD_SIZEOF(struct xgbe_mmc_stats, _var), \
+ offsetof(struct xgbe_prv_data, mmc_stats._var), \
+ }
+
+static const struct xgbe_stats xgbe_gstring_stats[] = {
+ XGMAC_MMC_STAT("tx_bytes", txoctetcount_gb),
+ XGMAC_MMC_STAT("tx_packets", txframecount_gb),
+ XGMAC_MMC_STAT("tx_unicast_packets", txunicastframes_gb),
+ XGMAC_MMC_STAT("tx_broadcast_packets", txbroadcastframes_gb),
+ XGMAC_MMC_STAT("tx_multicast_packets", txmulticastframes_gb),
+ XGMAC_MMC_STAT("tx_vlan_packets", txvlanframes_g),
+ XGMAC_MMC_STAT("tx_64_byte_packets", tx64octets_gb),
+ XGMAC_MMC_STAT("tx_65_to_127_byte_packets", tx65to127octets_gb),
+ XGMAC_MMC_STAT("tx_128_to_255_byte_packets", tx128to255octets_gb),
+ XGMAC_MMC_STAT("tx_256_to_511_byte_packets", tx256to511octets_gb),
+ XGMAC_MMC_STAT("tx_512_to_1023_byte_packets", tx512to1023octets_gb),
+ XGMAC_MMC_STAT("tx_1024_to_max_byte_packets", tx1024tomaxoctets_gb),
+ XGMAC_MMC_STAT("tx_underflow_errors", txunderflowerror),
+ XGMAC_MMC_STAT("tx_pause_frames", txpauseframes),
+
+ XGMAC_MMC_STAT("rx_bytes", rxoctetcount_gb),
+ XGMAC_MMC_STAT("rx_packets", rxframecount_gb),
+ XGMAC_MMC_STAT("rx_unicast_packets", rxunicastframes_g),
+ XGMAC_MMC_STAT("rx_broadcast_packets", rxbroadcastframes_g),
+ XGMAC_MMC_STAT("rx_multicast_packets", rxmulticastframes_g),
+ XGMAC_MMC_STAT("rx_vlan_packets", rxvlanframes_gb),
+ XGMAC_MMC_STAT("rx_64_byte_packets", rx64octets_gb),
+ XGMAC_MMC_STAT("rx_65_to_127_byte_packets", rx65to127octets_gb),
+ XGMAC_MMC_STAT("rx_128_to_255_byte_packets", rx128to255octets_gb),
+ XGMAC_MMC_STAT("rx_256_to_511_byte_packets", rx256to511octets_gb),
+ XGMAC_MMC_STAT("rx_512_to_1023_byte_packets", rx512to1023octets_gb),
+ XGMAC_MMC_STAT("rx_1024_to_max_byte_packets", rx1024tomaxoctets_gb),
+ XGMAC_MMC_STAT("rx_undersize_packets", rxundersize_g),
+ XGMAC_MMC_STAT("rx_oversize_packets", rxoversize_g),
+ XGMAC_MMC_STAT("rx_crc_errors", rxcrcerror),
+ XGMAC_MMC_STAT("rx_crc_errors_small_packets", rxrunterror),
+ XGMAC_MMC_STAT("rx_crc_errors_giant_packets", rxjabbererror),
+ XGMAC_MMC_STAT("rx_length_errors", rxlengtherror),
+ XGMAC_MMC_STAT("rx_out_of_range_errors", rxoutofrangetype),
+ XGMAC_MMC_STAT("rx_fifo_overflow_errors", rxfifooverflow),
+ XGMAC_MMC_STAT("rx_watchdog_errors", rxwatchdogerror),
+ XGMAC_MMC_STAT("rx_pause_frames", rxpauseframes),
+};
+#define XGBE_STATS_COUNT ARRAY_SIZE(xgbe_gstring_stats)
+
+static void xgbe_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
+{
+ int i;
+
+ DBGPR("-->%s\n", __func__);
+
+ switch (stringset) {
+ case ETH_SS_STATS:
+ for (i = 0; i < XGBE_STATS_COUNT; i++) {
+ memcpy(data, xgbe_gstring_stats[i].stat_string,
+ ETH_GSTRING_LEN);
+ data += ETH_GSTRING_LEN;
+ }
+ break;
+ }
+
+ DBGPR("<--%s\n", __func__);
+}
+
+static void xgbe_get_ethtool_stats(struct net_device *netdev,
+ struct ethtool_stats *stats, u64 *data)
+{
+ struct xgbe_prv_data *pdata = netdev_priv(netdev);
+ u8 *stat;
+ int i;
+
+ DBGPR("-->%s\n", __func__);
+
+ pdata->hw_if.read_mmc_stats(pdata);
+ for (i = 0; i < XGBE_STATS_COUNT; i++) {
+ stat = (u8 *)pdata + xgbe_gstring_stats[i].stat_offset;
+ *data++ = *(u64 *)stat;
+ }
+
+ DBGPR("<--%s\n", __func__);
+}
+
+static int xgbe_get_sset_count(struct net_device *netdev, int stringset)
+{
+ int ret;
+
+ DBGPR("-->%s\n", __func__);
+
+ switch (stringset) {
+ case ETH_SS_STATS:
+ ret = XGBE_STATS_COUNT;
+ break;
+
+ default:
+ ret = -EOPNOTSUPP;
+ }
+
+ DBGPR("<--%s\n", __func__);
+
+ return ret;
+}
+
+static void xgbe_get_pauseparam(struct net_device *netdev,
+ struct ethtool_pauseparam *pause)
+{
+ struct xgbe_prv_data *pdata = netdev_priv(netdev);
+
+ DBGPR("-->xgbe_get_pauseparam\n");
+
+ pause->autoneg = pdata->pause_autoneg;
+ pause->tx_pause = pdata->tx_pause;
+ pause->rx_pause = pdata->rx_pause;
+
+ DBGPR("<--xgbe_get_pauseparam\n");
+}
+
+static int xgbe_set_pauseparam(struct net_device *netdev,
+ struct ethtool_pauseparam *pause)
+{
+ struct xgbe_prv_data *pdata = netdev_priv(netdev);
+ struct phy_device *phydev = pdata->phydev;
+ int ret = 0;
+
+ DBGPR("-->xgbe_set_pauseparam\n");
+
+ DBGPR(" autoneg = %d, tx_pause = %d, rx_pause = %d\n",
+ pause->autoneg, pause->tx_pause, pause->rx_pause);
+
+ pdata->pause_autoneg = pause->autoneg;
+ if (pause->autoneg) {
+ phydev->advertising |= ADVERTISED_Pause;
+ phydev->advertising |= ADVERTISED_Asym_Pause;
+
+ } else {
+ phydev->advertising &= ~ADVERTISED_Pause;
+ phydev->advertising &= ~ADVERTISED_Asym_Pause;
+
+ pdata->tx_pause = pause->tx_pause;
+ pdata->rx_pause = pause->rx_pause;
+ }
+
+ if (netif_running(netdev))
+ ret = phy_start_aneg(phydev);
+
+ DBGPR("<--xgbe_set_pauseparam\n");
+
+ return ret;
+}
+
+static int xgbe_get_settings(struct net_device *netdev,
+ struct ethtool_cmd *cmd)
+{
+ struct xgbe_prv_data *pdata = netdev_priv(netdev);
+ int ret;
+
+ DBGPR("-->xgbe_get_settings\n");
+
+ if (!pdata->phydev)
+ return -ENODEV;
+
+ spin_lock_irq(&pdata->lock);
+
+ ret = phy_ethtool_gset(pdata->phydev, cmd);
+ cmd->transceiver = XCVR_EXTERNAL;
+
+ spin_unlock_irq(&pdata->lock);
+
+ DBGPR("<--xgbe_get_settings\n");
+
+ return ret;
+}
+
+static int xgbe_set_settings(struct net_device *netdev,
+ struct ethtool_cmd *cmd)
+{
+ struct xgbe_prv_data *pdata = netdev_priv(netdev);
+ struct phy_device *phydev = pdata->phydev;
+ u32 speed;
+ int ret;
+
+ DBGPR("-->xgbe_set_settings\n");
+
+ if (!pdata->phydev)
+ return -ENODEV;
+
+ spin_lock_irq(&pdata->lock);
+
+ speed = ethtool_cmd_speed(cmd);
+
+ ret = -EINVAL;
+ if (cmd->phy_address != phydev->addr)
+ goto unlock;
+
+ if ((cmd->autoneg != AUTONEG_ENABLE) &&
+ (cmd->autoneg != AUTONEG_DISABLE))
+ goto unlock;
+
+ if ((cmd->autoneg == AUTONEG_DISABLE) &&
+ (((speed != SPEED_10000) && (speed != SPEED_1000)) ||
+ (cmd->duplex != DUPLEX_FULL)))
+ goto unlock;
+
+ if (cmd->autoneg == AUTONEG_ENABLE) {
+ /* Clear settings needed to force speeds */
+ phydev->supported &= ~SUPPORTED_1000baseT_Full;
+ phydev->supported &= ~SUPPORTED_10000baseT_Full;
+ } else {
+ /* Add settings needed to force speed */
+ phydev->supported |= SUPPORTED_1000baseT_Full;
+ phydev->supported |= SUPPORTED_10000baseT_Full;
+ }
+
+ cmd->advertising &= phydev->supported;
+ if ((cmd->autoneg == AUTONEG_ENABLE) && !cmd->advertising)
+ goto unlock;
+
+ ret = 0;
+ phydev->autoneg = cmd->autoneg;
+ phydev->speed = speed;
+ phydev->duplex = cmd->duplex;
+ phydev->advertising = cmd->advertising;
+
+ if (cmd->autoneg == AUTONEG_ENABLE)
+ phydev->advertising |= ADVERTISED_Autoneg;
+ else
+ phydev->advertising &= ~ADVERTISED_Autoneg;
+
+ if (netif_running(netdev))
+ ret = phy_start_aneg(phydev);
+
+unlock:
+ spin_unlock_irq(&pdata->lock);
+
+ DBGPR("<--xgbe_set_settings\n");
+
+ return ret;
+}
+
+static void xgbe_get_drvinfo(struct net_device *netdev,
+ struct ethtool_drvinfo *drvinfo)
+{
+ struct xgbe_prv_data *pdata = netdev_priv(netdev);
+
+ strlcpy(drvinfo->driver, XGBE_DRV_NAME, sizeof(drvinfo->driver));
+ strlcpy(drvinfo->version, XGBE_DRV_VERSION, sizeof(drvinfo->version));
+ strlcpy(drvinfo->bus_info, dev_name(pdata->dev),
+ sizeof(drvinfo->bus_info));
+ snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), "%d.%d.%d",
+ XGMAC_IOREAD_BITS(pdata, MAC_VR, USERVER),
+ XGMAC_IOREAD_BITS(pdata, MAC_VR, DEVID),
+ XGMAC_IOREAD_BITS(pdata, MAC_VR, SNPSVER));
+ drvinfo->n_stats = XGBE_STATS_COUNT;
+}
+
+static int xgbe_get_coalesce(struct net_device *netdev,
+ struct ethtool_coalesce *ec)
+{
+ struct xgbe_prv_data *pdata = netdev_priv(netdev);
+ struct xgbe_hw_if *hw_if = &pdata->hw_if;
+ unsigned int riwt;
+
+ DBGPR("-->xgbe_get_coalesce\n");
+
+ memset(ec, 0, sizeof(struct ethtool_coalesce));
+
+ riwt = pdata->rx_riwt;
+ ec->rx_coalesce_usecs = hw_if->riwt_to_usec(pdata, riwt);
+ ec->rx_max_coalesced_frames = pdata->rx_frames;
+
+ ec->tx_coalesce_usecs = pdata->tx_usecs;
+ ec->tx_max_coalesced_frames = pdata->tx_frames;
+
+ DBGPR("<--xgbe_get_coalesce\n");
+
+ return 0;
+}
+
+static int xgbe_set_coalesce(struct net_device *netdev,
+ struct ethtool_coalesce *ec)
+{
+ struct xgbe_prv_data *pdata = netdev_priv(netdev);
+ struct xgbe_hw_if *hw_if = &pdata->hw_if;
+ unsigned int rx_frames, rx_riwt, rx_usecs;
+ unsigned int tx_frames, tx_usecs;
+
+ DBGPR("-->xgbe_set_coalesce\n");
+
+ /* Check for not supported parameters */
+ if ((ec->rx_coalesce_usecs_irq) ||
+ (ec->rx_max_coalesced_frames_irq) ||
+ (ec->tx_coalesce_usecs_irq) ||
+ (ec->tx_max_coalesced_frames_irq) ||
+ (ec->stats_block_coalesce_usecs) ||
+ (ec->use_adaptive_rx_coalesce) ||
+ (ec->use_adaptive_tx_coalesce) ||
+ (ec->pkt_rate_low) ||
+ (ec->rx_coalesce_usecs_low) ||
+ (ec->rx_max_coalesced_frames_low) ||
+ (ec->tx_coalesce_usecs_low) ||
+ (ec->tx_max_coalesced_frames_low) ||
+ (ec->pkt_rate_high) ||
+ (ec->rx_coalesce_usecs_high) ||
+ (ec->rx_max_coalesced_frames_high) ||
+ (ec->tx_coalesce_usecs_high) ||
+ (ec->tx_max_coalesced_frames_high) ||
+ (ec->rate_sample_interval))
+ return -EOPNOTSUPP;
+
+ /* Can only change rx-frames when interface is down (see
+ * rx_descriptor_init in xgbe-dev.c)
+ */
+ rx_frames = pdata->rx_frames;
+ if (rx_frames != ec->rx_max_coalesced_frames && netif_running(netdev)) {
+ netdev_alert(netdev,
+ "interface must be down to change rx-frames\n");
+ return -EINVAL;
+ }
+
+ rx_riwt = hw_if->usec_to_riwt(pdata, ec->rx_coalesce_usecs);
+ rx_frames = ec->rx_max_coalesced_frames;
+
+ /* Use smallest possible value if conversion resulted in zero */
+ if (ec->rx_coalesce_usecs && !rx_riwt)
+ rx_riwt = 1;
+
+ /* Check the bounds of values for Rx */
+ if (rx_riwt > XGMAC_MAX_DMA_RIWT) {
+ rx_usecs = hw_if->riwt_to_usec(pdata, XGMAC_MAX_DMA_RIWT);
+ netdev_alert(netdev, "rx-usec is limited to %d usecs\n",
+ rx_usecs);
+ return -EINVAL;
+ }
+ if (rx_frames > pdata->channel->rx_ring->rdesc_count) {
+ netdev_alert(netdev, "rx-frames is limited to %d frames\n",
+ pdata->channel->rx_ring->rdesc_count);
+ return -EINVAL;
+ }
+
+ tx_usecs = ec->tx_coalesce_usecs;
+ tx_frames = ec->tx_max_coalesced_frames;
+
+ /* Check the bounds of values for Tx */
+ if (tx_frames > pdata->channel->tx_ring->rdesc_count) {
+ netdev_alert(netdev, "tx-frames is limited to %d frames\n",
+ pdata->channel->tx_ring->rdesc_count);
+ return -EINVAL;
+ }
+
+ pdata->rx_riwt = rx_riwt;
+ pdata->rx_frames = rx_frames;
+ hw_if->config_rx_coalesce(pdata);
+
+ pdata->tx_usecs = tx_usecs;
+ pdata->tx_frames = tx_frames;
+ hw_if->config_tx_coalesce(pdata);
+
+ DBGPR("<--xgbe_set_coalesce\n");
+
+ return 0;
+}
+
+static const struct ethtool_ops xgbe_ethtool_ops = {
+ .get_settings = xgbe_get_settings,
+ .set_settings = xgbe_set_settings,
+ .get_drvinfo = xgbe_get_drvinfo,
+ .get_link = ethtool_op_get_link,
+ .get_coalesce = xgbe_get_coalesce,
+ .set_coalesce = xgbe_set_coalesce,
+ .get_pauseparam = xgbe_get_pauseparam,
+ .set_pauseparam = xgbe_set_pauseparam,
+ .get_strings = xgbe_get_strings,
+ .get_ethtool_stats = xgbe_get_ethtool_stats,
+ .get_sset_count = xgbe_get_sset_count,
+};
+
+struct ethtool_ops *xgbe_get_ethtool_ops(void)
+{
+ return (struct ethtool_ops *)&xgbe_ethtool_ops;
+}
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-main.c b/drivers/net/ethernet/amd/xgbe/xgbe-main.c
new file mode 100644
index 000000000000..c83584a26713
--- /dev/null
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-main.c
@@ -0,0 +1,512 @@
+/*
+ * AMD 10Gb Ethernet driver
+ *
+ * This file is available to you under your choice of the following two
+ * licenses:
+ *
+ * License 1: GPLv2
+ *
+ * Copyright (c) 2014 Advanced Micro Devices, Inc.
+ *
+ * This file is free software; you may copy, redistribute and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 2 of the License, or (at
+ * your option) any later version.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ * This file incorporates work covered by the following copyright and
+ * permission notice:
+ * The Synopsys DWC ETHER XGMAC Software Driver and documentation
+ * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
+ * Inc. unless otherwise expressly agreed to in writing between Synopsys
+ * and you.
+ *
+ * The Software IS NOT an item of Licensed Software or Licensed Product
+ * under any End User Software License Agreement or Agreement for Licensed
+ * Product with Synopsys or any supplement thereto. Permission is hereby
+ * granted, free of charge, to any person obtaining a copy of this software
+ * annotated with this license and the Software, to deal in the Software
+ * without restriction, including without limitation the rights to use,
+ * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+ * of the Software, and to permit persons to whom the Software is furnished
+ * to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
+ * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
+ * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *
+ * License 2: Modified BSD
+ *
+ * Copyright (c) 2014 Advanced Micro Devices, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Advanced Micro Devices, Inc. nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * This file incorporates work covered by the following copyright and
+ * permission notice:
+ * The Synopsys DWC ETHER XGMAC Software Driver and documentation
+ * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
+ * Inc. unless otherwise expressly agreed to in writing between Synopsys
+ * and you.
+ *
+ * The Software IS NOT an item of Licensed Software or Licensed Product
+ * under any End User Software License Agreement or Agreement for Licensed
+ * Product with Synopsys or any supplement thereto. Permission is hereby
+ * granted, free of charge, to any person obtaining a copy of this software
+ * annotated with this license and the Software, to deal in the Software
+ * without restriction, including without limitation the rights to use,
+ * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+ * of the Software, and to permit persons to whom the Software is furnished
+ * to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
+ * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
+ * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/spinlock.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/of_net.h>
+#include <linux/clk.h>
+
+#include "xgbe.h"
+#include "xgbe-common.h"
+
+
+MODULE_AUTHOR("Tom Lendacky <thomas.lendacky@amd.com>");
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_VERSION(XGBE_DRV_VERSION);
+MODULE_DESCRIPTION(XGBE_DRV_DESC);
+
+static struct xgbe_channel *xgbe_alloc_rings(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_channel *channel_mem, *channel;
+ struct xgbe_ring *tx_ring, *rx_ring;
+ unsigned int count, i;
+
+ DBGPR("-->xgbe_alloc_rings\n");
+
+ count = max_t(unsigned int, pdata->tx_ring_count, pdata->rx_ring_count);
+
+ channel_mem = devm_kcalloc(pdata->dev, count,
+ sizeof(struct xgbe_channel), GFP_KERNEL);
+ if (!channel_mem)
+ return NULL;
+
+ tx_ring = devm_kcalloc(pdata->dev, pdata->tx_ring_count,
+ sizeof(struct xgbe_ring), GFP_KERNEL);
+ if (!tx_ring)
+ return NULL;
+
+ rx_ring = devm_kcalloc(pdata->dev, pdata->rx_ring_count,
+ sizeof(struct xgbe_ring), GFP_KERNEL);
+ if (!rx_ring)
+ return NULL;
+
+ for (i = 0, channel = channel_mem; i < count; i++, channel++) {
+ snprintf(channel->name, sizeof(channel->name), "channel-%d", i);
+ channel->pdata = pdata;
+ channel->queue_index = i;
+ channel->dma_regs = pdata->xgmac_regs + DMA_CH_BASE +
+ (DMA_CH_INC * i);
+
+ if (i < pdata->tx_ring_count) {
+ spin_lock_init(&tx_ring->lock);
+ channel->tx_ring = tx_ring++;
+ }
+
+ if (i < pdata->rx_ring_count) {
+ spin_lock_init(&tx_ring->lock);
+ channel->rx_ring = rx_ring++;
+ }
+
+ DBGPR(" %s - queue_index=%u, dma_regs=%p, tx=%p, rx=%p\n",
+ channel->name, channel->queue_index, channel->dma_regs,
+ channel->tx_ring, channel->rx_ring);
+ }
+
+ pdata->channel_count = count;
+
+ DBGPR("<--xgbe_alloc_rings\n");
+
+ return channel_mem;
+}
+
+static void xgbe_default_config(struct xgbe_prv_data *pdata)
+{
+ DBGPR("-->xgbe_default_config\n");
+
+ pdata->pblx8 = DMA_PBL_X8_ENABLE;
+ pdata->tx_sf_mode = MTL_TSF_ENABLE;
+ pdata->tx_threshold = MTL_TX_THRESHOLD_64;
+ pdata->tx_pbl = DMA_PBL_16;
+ pdata->tx_osp_mode = DMA_OSP_ENABLE;
+ pdata->rx_sf_mode = MTL_RSF_DISABLE;
+ pdata->rx_threshold = MTL_RX_THRESHOLD_64;
+ pdata->rx_pbl = DMA_PBL_16;
+ pdata->pause_autoneg = 1;
+ pdata->tx_pause = 1;
+ pdata->rx_pause = 1;
+ pdata->power_down = 0;
+ pdata->default_autoneg = AUTONEG_ENABLE;
+ pdata->default_speed = SPEED_10000;
+
+ DBGPR("<--xgbe_default_config\n");
+}
+
+static void xgbe_init_all_fptrs(struct xgbe_prv_data *pdata)
+{
+ xgbe_init_function_ptrs_dev(&pdata->hw_if);
+ xgbe_init_function_ptrs_desc(&pdata->desc_if);
+}
+
+static int xgbe_probe(struct platform_device *pdev)
+{
+ struct xgbe_prv_data *pdata;
+ struct xgbe_hw_if *hw_if;
+ struct xgbe_desc_if *desc_if;
+ struct net_device *netdev;
+ struct device *dev = &pdev->dev;
+ struct resource *res;
+ const u8 *mac_addr;
+ int ret;
+
+ DBGPR("--> xgbe_probe\n");
+
+ netdev = alloc_etherdev_mq(sizeof(struct xgbe_prv_data),
+ XGBE_MAX_DMA_CHANNELS);
+ if (!netdev) {
+ dev_err(dev, "alloc_etherdev failed\n");
+ ret = -ENOMEM;
+ goto err_alloc;
+ }
+ SET_NETDEV_DEV(netdev, dev);
+ pdata = netdev_priv(netdev);
+ pdata->netdev = netdev;
+ pdata->pdev = pdev;
+ pdata->dev = dev;
+ platform_set_drvdata(pdev, netdev);
+
+ spin_lock_init(&pdata->lock);
+ mutex_init(&pdata->xpcs_mutex);
+
+ /* Set and validate the number of descriptors for a ring */
+ BUILD_BUG_ON_NOT_POWER_OF_2(TX_DESC_CNT);
+ pdata->tx_desc_count = TX_DESC_CNT;
+ if (pdata->tx_desc_count & (pdata->tx_desc_count - 1)) {
+ dev_err(dev, "tx descriptor count (%d) is not valid\n",
+ pdata->tx_desc_count);
+ ret = -EINVAL;
+ goto err_io;
+ }
+ BUILD_BUG_ON_NOT_POWER_OF_2(RX_DESC_CNT);
+ pdata->rx_desc_count = RX_DESC_CNT;
+ if (pdata->rx_desc_count & (pdata->rx_desc_count - 1)) {
+ dev_err(dev, "rx descriptor count (%d) is not valid\n",
+ pdata->rx_desc_count);
+ ret = -EINVAL;
+ goto err_io;
+ }
+
+ /* Obtain the system clock setting */
+ pdata->sysclock = devm_clk_get(dev, NULL);
+ if (IS_ERR(pdata->sysclock)) {
+ dev_err(dev, "devm_clk_get failed\n");
+ ret = PTR_ERR(pdata->sysclock);
+ goto err_io;
+ }
+
+ /* Obtain the mmio areas for the device */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ pdata->xgmac_regs = devm_ioremap_resource(dev, res);
+ if (IS_ERR(pdata->xgmac_regs)) {
+ dev_err(dev, "xgmac ioremap failed\n");
+ ret = PTR_ERR(pdata->xgmac_regs);
+ goto err_io;
+ }
+ DBGPR(" xgmac_regs = %p\n", pdata->xgmac_regs);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ pdata->xpcs_regs = devm_ioremap_resource(dev, res);
+ if (IS_ERR(pdata->xpcs_regs)) {
+ dev_err(dev, "xpcs ioremap failed\n");
+ ret = PTR_ERR(pdata->xpcs_regs);
+ goto err_io;
+ }
+ DBGPR(" xpcs_regs = %p\n", pdata->xpcs_regs);
+
+ /* Set the DMA mask */
+ if (!dev->dma_mask)
+ dev->dma_mask = &dev->coherent_dma_mask;
+ *(dev->dma_mask) = DMA_BIT_MASK(40);
+ dev->coherent_dma_mask = DMA_BIT_MASK(40);
+
+ ret = platform_get_irq(pdev, 0);
+ if (ret < 0) {
+ dev_err(dev, "platform_get_irq failed\n");
+ goto err_io;
+ }
+ netdev->irq = ret;
+ netdev->base_addr = (unsigned long)pdata->xgmac_regs;
+
+ /* Set all the function pointers */
+ xgbe_init_all_fptrs(pdata);
+ hw_if = &pdata->hw_if;
+ desc_if = &pdata->desc_if;
+
+ /* Issue software reset to device */
+ hw_if->exit(pdata);
+
+ /* Populate the hardware features */
+ xgbe_get_all_hw_features(pdata);
+
+ /* Retrieve the MAC address */
+ mac_addr = of_get_mac_address(dev->of_node);
+ if (!mac_addr) {
+ dev_err(dev, "invalid mac address for this device\n");
+ ret = -EINVAL;
+ goto err_io;
+ }
+ memcpy(netdev->dev_addr, mac_addr, netdev->addr_len);
+
+ /* Retrieve the PHY mode - it must be "xgmii" */
+ pdata->phy_mode = of_get_phy_mode(dev->of_node);
+ if (pdata->phy_mode != PHY_INTERFACE_MODE_XGMII) {
+ dev_err(dev, "invalid phy-mode specified for this device\n");
+ ret = -EINVAL;
+ goto err_io;
+ }
+
+ /* Set default configuration data */
+ xgbe_default_config(pdata);
+
+ /* Calculate the number of Tx and Rx rings to be created */
+ pdata->tx_ring_count = min_t(unsigned int, num_online_cpus(),
+ pdata->hw_feat.tx_ch_cnt);
+ if (netif_set_real_num_tx_queues(netdev, pdata->tx_ring_count)) {
+ dev_err(dev, "error setting real tx queue count\n");
+ goto err_io;
+ }
+
+ pdata->rx_ring_count = min_t(unsigned int,
+ netif_get_num_default_rss_queues(),
+ pdata->hw_feat.rx_ch_cnt);
+ ret = netif_set_real_num_rx_queues(netdev, pdata->rx_ring_count);
+ if (ret) {
+ dev_err(dev, "error setting real rx queue count\n");
+ goto err_io;
+ }
+
+ /* Allocate the rings for the DMA channels */
+ pdata->channel = xgbe_alloc_rings(pdata);
+ if (!pdata->channel) {
+ dev_err(dev, "ring allocation failed\n");
+ ret = -ENOMEM;
+ goto err_io;
+ }
+
+ /* Prepare to regsiter with MDIO */
+ pdata->mii_bus_id = kasprintf(GFP_KERNEL, "%s", pdev->name);
+ if (!pdata->mii_bus_id) {
+ dev_err(dev, "failed to allocate mii bus id\n");
+ ret = -ENOMEM;
+ goto err_io;
+ }
+ ret = xgbe_mdio_register(pdata);
+ if (ret)
+ goto err_bus_id;
+
+ /* Set network and ethtool operations */
+ netdev->netdev_ops = xgbe_get_netdev_ops();
+ netdev->ethtool_ops = xgbe_get_ethtool_ops();
+
+ /* Set device features */
+ netdev->hw_features = NETIF_F_SG |
+ NETIF_F_IP_CSUM |
+ NETIF_F_IPV6_CSUM |
+ NETIF_F_RXCSUM |
+ NETIF_F_TSO |
+ NETIF_F_TSO6 |
+ NETIF_F_GRO |
+ NETIF_F_HW_VLAN_CTAG_RX |
+ NETIF_F_HW_VLAN_CTAG_TX;
+
+ netdev->vlan_features |= NETIF_F_SG |
+ NETIF_F_IP_CSUM |
+ NETIF_F_IPV6_CSUM |
+ NETIF_F_TSO |
+ NETIF_F_TSO6;
+
+ netdev->features |= netdev->hw_features;
+ pdata->netdev_features = netdev->features;
+
+ xgbe_init_rx_coalesce(pdata);
+ xgbe_init_tx_coalesce(pdata);
+
+ netif_carrier_off(netdev);
+ ret = register_netdev(netdev);
+ if (ret) {
+ dev_err(dev, "net device registration failed\n");
+ goto err_reg_netdev;
+ }
+
+ xgbe_debugfs_init(pdata);
+
+ netdev_notice(netdev, "net device enabled\n");
+
+ DBGPR("<-- xgbe_probe\n");
+
+ return 0;
+
+err_reg_netdev:
+ xgbe_mdio_unregister(pdata);
+
+err_bus_id:
+ kfree(pdata->mii_bus_id);
+
+err_io:
+ free_netdev(netdev);
+
+err_alloc:
+ dev_notice(dev, "net device not enabled\n");
+
+ return ret;
+}
+
+static int xgbe_remove(struct platform_device *pdev)
+{
+ struct net_device *netdev = platform_get_drvdata(pdev);
+ struct xgbe_prv_data *pdata = netdev_priv(netdev);
+
+ DBGPR("-->xgbe_remove\n");
+
+ xgbe_debugfs_exit(pdata);
+
+ unregister_netdev(netdev);
+
+ xgbe_mdio_unregister(pdata);
+
+ kfree(pdata->mii_bus_id);
+
+ free_netdev(netdev);
+
+ DBGPR("<--xgbe_remove\n");
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int xgbe_suspend(struct device *dev)
+{
+ struct net_device *netdev = dev_get_drvdata(dev);
+ int ret;
+
+ DBGPR("-->xgbe_suspend\n");
+
+ if (!netif_running(netdev)) {
+ DBGPR("<--xgbe_dev_suspend\n");
+ return -EINVAL;
+ }
+
+ ret = xgbe_powerdown(netdev, XGMAC_DRIVER_CONTEXT);
+
+ DBGPR("<--xgbe_suspend\n");
+
+ return ret;
+}
+
+static int xgbe_resume(struct device *dev)
+{
+ struct net_device *netdev = dev_get_drvdata(dev);
+ int ret;
+
+ DBGPR("-->xgbe_resume\n");
+
+ if (!netif_running(netdev)) {
+ DBGPR("<--xgbe_dev_resume\n");
+ return -EINVAL;
+ }
+
+ ret = xgbe_powerup(netdev, XGMAC_DRIVER_CONTEXT);
+
+ DBGPR("<--xgbe_resume\n");
+
+ return ret;
+}
+#endif /* CONFIG_PM */
+
+static const struct of_device_id xgbe_of_match[] = {
+ { .compatible = "amd,xgbe-seattle-v1a", },
+ {},
+};
+
+MODULE_DEVICE_TABLE(of, xgbe_of_match);
+static SIMPLE_DEV_PM_OPS(xgbe_pm_ops, xgbe_suspend, xgbe_resume);
+
+static struct platform_driver xgbe_driver = {
+ .driver = {
+ .name = "amd-xgbe",
+ .of_match_table = xgbe_of_match,
+ .pm = &xgbe_pm_ops,
+ },
+ .probe = xgbe_probe,
+ .remove = xgbe_remove,
+};
+
+module_platform_driver(xgbe_driver);
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
new file mode 100644
index 000000000000..ea7a5d6750ea
--- /dev/null
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
@@ -0,0 +1,433 @@
+/*
+ * AMD 10Gb Ethernet driver
+ *
+ * This file is available to you under your choice of the following two
+ * licenses:
+ *
+ * License 1: GPLv2
+ *
+ * Copyright (c) 2014 Advanced Micro Devices, Inc.
+ *
+ * This file is free software; you may copy, redistribute and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 2 of the License, or (at
+ * your option) any later version.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ * This file incorporates work covered by the following copyright and
+ * permission notice:
+ * The Synopsys DWC ETHER XGMAC Software Driver and documentation
+ * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
+ * Inc. unless otherwise expressly agreed to in writing between Synopsys
+ * and you.
+ *
+ * The Software IS NOT an item of Licensed Software or Licensed Product
+ * under any End User Software License Agreement or Agreement for Licensed
+ * Product with Synopsys or any supplement thereto. Permission is hereby
+ * granted, free of charge, to any person obtaining a copy of this software
+ * annotated with this license and the Software, to deal in the Software
+ * without restriction, including without limitation the rights to use,
+ * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+ * of the Software, and to permit persons to whom the Software is furnished
+ * to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
+ * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
+ * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *
+ * License 2: Modified BSD
+ *
+ * Copyright (c) 2014 Advanced Micro Devices, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Advanced Micro Devices, Inc. nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * This file incorporates work covered by the following copyright and
+ * permission notice:
+ * The Synopsys DWC ETHER XGMAC Software Driver and documentation
+ * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
+ * Inc. unless otherwise expressly agreed to in writing between Synopsys
+ * and you.
+ *
+ * The Software IS NOT an item of Licensed Software or Licensed Product
+ * under any End User Software License Agreement or Agreement for Licensed
+ * Product with Synopsys or any supplement thereto. Permission is hereby
+ * granted, free of charge, to any person obtaining a copy of this software
+ * annotated with this license and the Software, to deal in the Software
+ * without restriction, including without limitation the rights to use,
+ * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+ * of the Software, and to permit persons to whom the Software is furnished
+ * to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
+ * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
+ * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/module.h>
+#include <linux/kmod.h>
+#include <linux/spinlock.h>
+#include <linux/mdio.h>
+#include <linux/phy.h>
+#include <linux/of.h>
+
+#include "xgbe.h"
+#include "xgbe-common.h"
+
+
+static int xgbe_mdio_read(struct mii_bus *mii, int prtad, int mmd_reg)
+{
+ struct xgbe_prv_data *pdata = mii->priv;
+ struct xgbe_hw_if *hw_if = &pdata->hw_if;
+ int mmd_data;
+
+ DBGPR_MDIO("-->xgbe_mdio_read: prtad=%#x mmd_reg=%#x\n",
+ prtad, mmd_reg);
+
+ mmd_data = hw_if->read_mmd_regs(pdata, prtad, mmd_reg);
+
+ DBGPR_MDIO("<--xgbe_mdio_read: mmd_data=%#x\n", mmd_data);
+
+ return mmd_data;
+}
+
+static int xgbe_mdio_write(struct mii_bus *mii, int prtad, int mmd_reg,
+ u16 mmd_val)
+{
+ struct xgbe_prv_data *pdata = mii->priv;
+ struct xgbe_hw_if *hw_if = &pdata->hw_if;
+ int mmd_data = mmd_val;
+
+ DBGPR_MDIO("-->xgbe_mdio_write: prtad=%#x mmd_reg=%#x mmd_data=%#x\n",
+ prtad, mmd_reg, mmd_data);
+
+ hw_if->write_mmd_regs(pdata, prtad, mmd_reg, mmd_data);
+
+ DBGPR_MDIO("<--xgbe_mdio_write\n");
+
+ return 0;
+}
+
+static void xgbe_adjust_link(struct net_device *netdev)
+{
+ struct xgbe_prv_data *pdata = netdev_priv(netdev);
+ struct xgbe_hw_if *hw_if = &pdata->hw_if;
+ struct phy_device *phydev = pdata->phydev;
+ unsigned long flags;
+ int new_state = 0;
+
+ if (phydev == NULL)
+ return;
+
+ DBGPR_MDIO("-->xgbe_adjust_link: address=%d, newlink=%d, curlink=%d\n",
+ phydev->addr, phydev->link, pdata->phy_link);
+
+ spin_lock_irqsave(&pdata->lock, flags);
+
+ if (phydev->link) {
+ /* Flow control support */
+ if (pdata->pause_autoneg) {
+ if (phydev->pause || phydev->asym_pause) {
+ pdata->tx_pause = 1;
+ pdata->rx_pause = 1;
+ } else {
+ pdata->tx_pause = 0;
+ pdata->rx_pause = 0;
+ }
+ }
+
+ if (pdata->tx_pause != pdata->phy_tx_pause) {
+ hw_if->config_tx_flow_control(pdata);
+ pdata->phy_tx_pause = pdata->tx_pause;
+ }
+
+ if (pdata->rx_pause != pdata->phy_rx_pause) {
+ hw_if->config_rx_flow_control(pdata);
+ pdata->phy_rx_pause = pdata->rx_pause;
+ }
+
+ /* Speed support */
+ if (phydev->speed != pdata->phy_speed) {
+ new_state = 1;
+
+ switch (phydev->speed) {
+ case SPEED_10000:
+ hw_if->set_xgmii_speed(pdata);
+ break;
+
+ case SPEED_2500:
+ hw_if->set_gmii_2500_speed(pdata);
+ break;
+
+ case SPEED_1000:
+ hw_if->set_gmii_speed(pdata);
+ break;
+ }
+ pdata->phy_speed = phydev->speed;
+ }
+
+ if (phydev->link != pdata->phy_link) {
+ new_state = 1;
+ pdata->phy_link = 1;
+ }
+ } else if (pdata->phy_link) {
+ new_state = 1;
+ pdata->phy_link = 0;
+ pdata->phy_speed = SPEED_UNKNOWN;
+ }
+
+ if (new_state)
+ phy_print_status(phydev);
+
+ spin_unlock_irqrestore(&pdata->lock, flags);
+
+ DBGPR_MDIO("<--xgbe_adjust_link\n");
+}
+
+void xgbe_dump_phy_registers(struct xgbe_prv_data *pdata)
+{
+ struct device *dev = pdata->dev;
+ struct phy_device *phydev = pdata->mii->phy_map[XGBE_PRTAD];
+ int i;
+
+ dev_alert(dev, "\n************* PHY Reg dump **********************\n");
+
+ dev_alert(dev, "PCS Control Reg (%#04x) = %#04x\n", MDIO_CTRL1,
+ XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_CTRL1));
+ dev_alert(dev, "PCS Status Reg (%#04x) = %#04x\n", MDIO_STAT1,
+ XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_STAT1));
+ dev_alert(dev, "Phy Id (PHYS ID 1 %#04x)= %#04x\n", MDIO_DEVID1,
+ XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_DEVID1));
+ dev_alert(dev, "Phy Id (PHYS ID 2 %#04x)= %#04x\n", MDIO_DEVID2,
+ XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_DEVID2));
+ dev_alert(dev, "Devices in Package (%#04x)= %#04x\n", MDIO_DEVS1,
+ XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_DEVS1));
+ dev_alert(dev, "Devices in Package (%#04x)= %#04x\n", MDIO_DEVS2,
+ XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_DEVS2));
+
+ dev_alert(dev, "Auto-Neg Control Reg (%#04x) = %#04x\n", MDIO_CTRL1,
+ XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_CTRL1));
+ dev_alert(dev, "Auto-Neg Status Reg (%#04x) = %#04x\n", MDIO_STAT1,
+ XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_STAT1));
+ dev_alert(dev, "Auto-Neg Ad Reg 1 (%#04x) = %#04x\n",
+ MDIO_AN_ADVERTISE,
+ XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE));
+ dev_alert(dev, "Auto-Neg Ad Reg 2 (%#04x) = %#04x\n",
+ MDIO_AN_ADVERTISE + 1,
+ XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 1));
+ dev_alert(dev, "Auto-Neg Ad Reg 3 (%#04x) = %#04x\n",
+ MDIO_AN_ADVERTISE + 2,
+ XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 2));
+ dev_alert(dev, "Auto-Neg Completion Reg (%#04x) = %#04x\n",
+ MDIO_AN_COMP_STAT,
+ XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_COMP_STAT));
+
+ dev_alert(dev, "MMD Device Mask = %#x\n",
+ phydev->c45_ids.devices_in_package);
+ for (i = 0; i < ARRAY_SIZE(phydev->c45_ids.device_ids); i++)
+ dev_alert(dev, " MMD %d: ID = %#08x\n", i,
+ phydev->c45_ids.device_ids[i]);
+
+ dev_alert(dev, "\n*************************************************\n");
+}
+
+int xgbe_mdio_register(struct xgbe_prv_data *pdata)
+{
+ struct net_device *netdev = pdata->netdev;
+ struct device_node *phy_node;
+ struct mii_bus *mii;
+ struct phy_device *phydev;
+ int ret = 0;
+
+ DBGPR("-->xgbe_mdio_register\n");
+
+ /* Retrieve the phy-handle */
+ phy_node = of_parse_phandle(pdata->dev->of_node, "phy-handle", 0);
+ if (!phy_node) {
+ dev_err(pdata->dev, "unable to parse phy-handle\n");
+ return -EINVAL;
+ }
+
+ /* Register with the MDIO bus */
+ mii = mdiobus_alloc();
+ if (mii == NULL) {
+ dev_err(pdata->dev, "mdiobus_alloc failed\n");
+ ret = -ENOMEM;
+ goto err_node_get;
+ }
+
+ /* Register on the MDIO bus (don't probe any PHYs) */
+ mii->name = XGBE_PHY_NAME;
+ mii->read = xgbe_mdio_read;
+ mii->write = xgbe_mdio_write;
+ snprintf(mii->id, sizeof(mii->id), "%s", pdata->mii_bus_id);
+ mii->priv = pdata;
+ mii->phy_mask = ~0;
+ mii->parent = pdata->dev;
+ ret = mdiobus_register(mii);
+ if (ret) {
+ dev_err(pdata->dev, "mdiobus_register failed\n");
+ goto err_mdiobus_alloc;
+ }
+ DBGPR(" mdiobus_register succeeded for %s\n", pdata->mii_bus_id);
+
+ /* Probe the PCS using Clause 45 */
+ phydev = get_phy_device(mii, XGBE_PRTAD, true);
+ if (IS_ERR(phydev) || !phydev ||
+ !phydev->c45_ids.device_ids[MDIO_MMD_PCS]) {
+ dev_err(pdata->dev, "get_phy_device failed\n");
+ ret = phydev ? PTR_ERR(phydev) : -ENOLINK;
+ goto err_mdiobus_register;
+ }
+ request_module(MDIO_MODULE_PREFIX MDIO_ID_FMT,
+ MDIO_ID_ARGS(phydev->c45_ids.device_ids[MDIO_MMD_PCS]));
+
+ of_node_get(phy_node);
+ phydev->dev.of_node = phy_node;
+ ret = phy_device_register(phydev);
+ if (ret) {
+ dev_err(pdata->dev, "phy_device_register failed\n");
+ of_node_put(phy_node);
+ goto err_phy_device;
+ }
+
+ /* Add a reference to the PHY driver so it can't be unloaded */
+ pdata->phy_module = phydev->dev.driver ?
+ phydev->dev.driver->owner : NULL;
+ if (!try_module_get(pdata->phy_module)) {
+ dev_err(pdata->dev, "try_module_get failed\n");
+ ret = -EIO;
+ goto err_phy_device;
+ }
+
+ pdata->mii = mii;
+ pdata->mdio_mmd = MDIO_MMD_PCS;
+
+ pdata->phy_link = -1;
+ pdata->phy_speed = SPEED_UNKNOWN;
+ pdata->phy_tx_pause = pdata->tx_pause;
+ pdata->phy_rx_pause = pdata->rx_pause;
+
+ ret = phy_connect_direct(netdev, phydev, &xgbe_adjust_link,
+ pdata->phy_mode);
+ if (ret) {
+ netdev_err(netdev, "phy_connect_direct failed\n");
+ goto err_phy_device;
+ }
+
+ if (!phydev->drv || (phydev->drv->phy_id == 0)) {
+ netdev_err(netdev, "phy_id not valid\n");
+ ret = -ENODEV;
+ goto err_phy_connect;
+ }
+ DBGPR(" phy_connect_direct succeeded for PHY %s, link=%d\n",
+ dev_name(&phydev->dev), phydev->link);
+
+ phydev->autoneg = pdata->default_autoneg;
+ if (phydev->autoneg == AUTONEG_DISABLE) {
+ /* Add settings needed to force speed */
+ phydev->supported |= SUPPORTED_1000baseT_Full;
+ phydev->supported |= SUPPORTED_10000baseT_Full;
+
+ phydev->speed = pdata->default_speed;
+ phydev->duplex = DUPLEX_FULL;
+
+ phydev->advertising &= ~ADVERTISED_Autoneg;
+ }
+
+ pdata->phydev = phydev;
+
+ of_node_put(phy_node);
+
+ DBGPHY_REGS(pdata);
+
+ DBGPR("<--xgbe_mdio_register\n");
+
+ return 0;
+
+err_phy_connect:
+ phy_disconnect(phydev);
+
+err_phy_device:
+ phy_device_free(phydev);
+
+err_mdiobus_register:
+ mdiobus_unregister(mii);
+
+err_mdiobus_alloc:
+ mdiobus_free(mii);
+
+err_node_get:
+ of_node_put(phy_node);
+
+ return ret;
+}
+
+void xgbe_mdio_unregister(struct xgbe_prv_data *pdata)
+{
+ DBGPR("-->xgbe_mdio_unregister\n");
+
+ phy_disconnect(pdata->phydev);
+ pdata->phydev = NULL;
+
+ module_put(pdata->phy_module);
+ pdata->phy_module = NULL;
+
+ mdiobus_unregister(pdata->mii);
+ pdata->mii->priv = NULL;
+
+ mdiobus_free(pdata->mii);
+ pdata->mii = NULL;
+
+ DBGPR("<--xgbe_mdio_unregister\n");
+}
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe.h b/drivers/net/ethernet/amd/xgbe/xgbe.h
new file mode 100644
index 000000000000..ab0627162c01
--- /dev/null
+++ b/drivers/net/ethernet/amd/xgbe/xgbe.h
@@ -0,0 +1,676 @@
+/*
+ * AMD 10Gb Ethernet driver
+ *
+ * This file is available to you under your choice of the following two
+ * licenses:
+ *
+ * License 1: GPLv2
+ *
+ * Copyright (c) 2014 Advanced Micro Devices, Inc.
+ *
+ * This file is free software; you may copy, redistribute and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 2 of the License, or (at
+ * your option) any later version.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ * This file incorporates work covered by the following copyright and
+ * permission notice:
+ * The Synopsys DWC ETHER XGMAC Software Driver and documentation
+ * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
+ * Inc. unless otherwise expressly agreed to in writing between Synopsys
+ * and you.
+ *
+ * The Software IS NOT an item of Licensed Software or Licensed Product
+ * under any End User Software License Agreement or Agreement for Licensed
+ * Product with Synopsys or any supplement thereto. Permission is hereby
+ * granted, free of charge, to any person obtaining a copy of this software
+ * annotated with this license and the Software, to deal in the Software
+ * without restriction, including without limitation the rights to use,
+ * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+ * of the Software, and to permit persons to whom the Software is furnished
+ * to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
+ * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
+ * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *
+ * License 2: Modified BSD
+ *
+ * Copyright (c) 2014 Advanced Micro Devices, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Advanced Micro Devices, Inc. nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * This file incorporates work covered by the following copyright and
+ * permission notice:
+ * The Synopsys DWC ETHER XGMAC Software Driver and documentation
+ * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
+ * Inc. unless otherwise expressly agreed to in writing between Synopsys
+ * and you.
+ *
+ * The Software IS NOT an item of Licensed Software or Licensed Product
+ * under any End User Software License Agreement or Agreement for Licensed
+ * Product with Synopsys or any supplement thereto. Permission is hereby
+ * granted, free of charge, to any person obtaining a copy of this software
+ * annotated with this license and the Software, to deal in the Software
+ * without restriction, including without limitation the rights to use,
+ * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+ * of the Software, and to permit persons to whom the Software is furnished
+ * to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
+ * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
+ * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __XGBE_H__
+#define __XGBE_H__
+
+#include <linux/dma-mapping.h>
+#include <linux/netdevice.h>
+#include <linux/workqueue.h>
+#include <linux/phy.h>
+
+
+#define XGBE_DRV_NAME "amd-xgbe"
+#define XGBE_DRV_VERSION "1.0.0-a"
+#define XGBE_DRV_DESC "AMD 10 Gigabit Ethernet Driver"
+
+/* Descriptor related defines */
+#define TX_DESC_CNT 512
+#define TX_DESC_MIN_FREE (TX_DESC_CNT >> 3)
+#define TX_DESC_MAX_PROC (TX_DESC_CNT >> 1)
+#define RX_DESC_CNT 512
+
+#define TX_MAX_BUF_SIZE (0x3fff & ~(64 - 1))
+
+#define RX_MIN_BUF_SIZE (ETH_FRAME_LEN + ETH_FCS_LEN + VLAN_HLEN)
+#define RX_BUF_ALIGN 64
+
+#define XGBE_MAX_DMA_CHANNELS 16
+#define DMA_ARDOMAIN_SETTING 0x2
+#define DMA_ARCACHE_SETTING 0xb
+#define DMA_AWDOMAIN_SETTING 0x2
+#define DMA_AWCACHE_SETTING 0x7
+#define DMA_INTERRUPT_MASK 0x31c7
+
+#define XGMAC_MIN_PACKET 60
+#define XGMAC_STD_PACKET_MTU 1500
+#define XGMAC_MAX_STD_PACKET 1518
+#define XGMAC_JUMBO_PACKET_MTU 9000
+#define XGMAC_MAX_JUMBO_PACKET 9018
+
+#define MAX_MULTICAST_LIST 14
+#define TX_FLAGS_IP_PKT 0x00000001
+#define TX_FLAGS_TCP_PKT 0x00000002
+
+/* MDIO bus phy name */
+#define XGBE_PHY_NAME "amd_xgbe_phy"
+#define XGBE_PRTAD 0
+
+/* Driver PMT macros */
+#define XGMAC_DRIVER_CONTEXT 1
+#define XGMAC_IOCTL_CONTEXT 2
+
+#define FIFO_SIZE_B(x) (x)
+#define FIFO_SIZE_KB(x) (x * 1024)
+
+#define XGBE_TC_CNT 2
+
+/* Helper macro for descriptor handling
+ * Always use GET_DESC_DATA to access the descriptor data
+ * since the index is free-running and needs to be and-ed
+ * with the descriptor count value of the ring to index to
+ * the proper descriptor data.
+ */
+#define GET_DESC_DATA(_ring, _idx) \
+ ((_ring)->rdata + \
+ ((_idx) & ((_ring)->rdesc_count - 1)))
+
+
+/* Default coalescing parameters */
+#define XGMAC_INIT_DMA_TX_USECS 100
+#define XGMAC_INIT_DMA_TX_FRAMES 16
+
+#define XGMAC_MAX_DMA_RIWT 0xff
+#define XGMAC_INIT_DMA_RX_USECS 100
+#define XGMAC_INIT_DMA_RX_FRAMES 16
+
+/* Flow control queue count */
+#define XGMAC_MAX_FLOW_CONTROL_QUEUES 8
+
+
+struct xgbe_prv_data;
+
+struct xgbe_packet_data {
+ unsigned int attributes;
+
+ unsigned int errors;
+
+ unsigned int rdesc_count;
+ unsigned int length;
+
+ unsigned int header_len;
+ unsigned int tcp_header_len;
+ unsigned int tcp_payload_len;
+ unsigned short mss;
+
+ unsigned short vlan_ctag;
+};
+
+/* Common Rx and Tx descriptor mapping */
+struct xgbe_ring_desc {
+ unsigned int desc0;
+ unsigned int desc1;
+ unsigned int desc2;
+ unsigned int desc3;
+};
+
+/* Structure used to hold information related to the descriptor
+ * and the packet associated with the descriptor (always use
+ * use the GET_DESC_DATA macro to access this data from the ring)
+ */
+struct xgbe_ring_data {
+ struct xgbe_ring_desc *rdesc; /* Virtual address of descriptor */
+ dma_addr_t rdesc_dma; /* DMA address of descriptor */
+
+ struct sk_buff *skb; /* Virtual address of SKB */
+ dma_addr_t skb_dma; /* DMA address of SKB data */
+ unsigned int skb_dma_len; /* Length of SKB DMA area */
+ unsigned int tso_header; /* TSO header indicator */
+
+ unsigned short len; /* Length of received Rx packet */
+
+ unsigned int interrupt; /* Interrupt indicator */
+
+ unsigned int mapped_as_page;
+};
+
+struct xgbe_ring {
+ /* Ring lock - used just for TX rings at the moment */
+ spinlock_t lock;
+
+ /* Per packet related information */
+ struct xgbe_packet_data packet_data;
+
+ /* Virtual/DMA addresses and count of allocated descriptor memory */
+ struct xgbe_ring_desc *rdesc;
+ dma_addr_t rdesc_dma;
+ unsigned int rdesc_count;
+
+ /* Array of descriptor data corresponding the descriptor memory
+ * (always use the GET_DESC_DATA macro to access this data)
+ */
+ struct xgbe_ring_data *rdata;
+
+ /* Ring index values
+ * cur - Tx: index of descriptor to be used for current transfer
+ * Rx: index of descriptor to check for packet availability
+ * dirty - Tx: index of descriptor to check for transfer complete
+ * Rx: count of descriptors in which a packet has been received
+ * (used with skb_realloc_index to refresh the ring)
+ */
+ unsigned int cur;
+ unsigned int dirty;
+
+ /* Coalesce frame count used for interrupt bit setting */
+ unsigned int coalesce_count;
+
+ union {
+ struct {
+ unsigned int queue_stopped;
+ unsigned short cur_mss;
+ unsigned short cur_vlan_ctag;
+ } tx;
+
+ struct {
+ unsigned int realloc_index;
+ unsigned int realloc_threshold;
+ } rx;
+ };
+} ____cacheline_aligned;
+
+/* Structure used to describe the descriptor rings associated with
+ * a DMA channel.
+ */
+struct xgbe_channel {
+ char name[16];
+
+ /* Address of private data area for device */
+ struct xgbe_prv_data *pdata;
+
+ /* Queue index and base address of queue's DMA registers */
+ unsigned int queue_index;
+ void __iomem *dma_regs;
+
+ unsigned int saved_ier;
+
+ unsigned int tx_timer_active;
+ struct hrtimer tx_timer;
+
+ struct xgbe_ring *tx_ring;
+ struct xgbe_ring *rx_ring;
+} ____cacheline_aligned;
+
+enum xgbe_int {
+ XGMAC_INT_DMA_ISR_DC0IS,
+ XGMAC_INT_DMA_CH_SR_TI,
+ XGMAC_INT_DMA_CH_SR_TPS,
+ XGMAC_INT_DMA_CH_SR_TBU,
+ XGMAC_INT_DMA_CH_SR_RI,
+ XGMAC_INT_DMA_CH_SR_RBU,
+ XGMAC_INT_DMA_CH_SR_RPS,
+ XGMAC_INT_DMA_CH_SR_FBE,
+ XGMAC_INT_DMA_ALL,
+};
+
+enum xgbe_int_state {
+ XGMAC_INT_STATE_SAVE,
+ XGMAC_INT_STATE_RESTORE,
+};
+
+enum xgbe_mtl_fifo_size {
+ XGMAC_MTL_FIFO_SIZE_256 = 0x00,
+ XGMAC_MTL_FIFO_SIZE_512 = 0x01,
+ XGMAC_MTL_FIFO_SIZE_1K = 0x03,
+ XGMAC_MTL_FIFO_SIZE_2K = 0x07,
+ XGMAC_MTL_FIFO_SIZE_4K = 0x0f,
+ XGMAC_MTL_FIFO_SIZE_8K = 0x1f,
+ XGMAC_MTL_FIFO_SIZE_16K = 0x3f,
+ XGMAC_MTL_FIFO_SIZE_32K = 0x7f,
+ XGMAC_MTL_FIFO_SIZE_64K = 0xff,
+ XGMAC_MTL_FIFO_SIZE_128K = 0x1ff,
+ XGMAC_MTL_FIFO_SIZE_256K = 0x3ff,
+};
+
+struct xgbe_mmc_stats {
+ /* Tx Stats */
+ u64 txoctetcount_gb;
+ u64 txframecount_gb;
+ u64 txbroadcastframes_g;
+ u64 txmulticastframes_g;
+ u64 tx64octets_gb;
+ u64 tx65to127octets_gb;
+ u64 tx128to255octets_gb;
+ u64 tx256to511octets_gb;
+ u64 tx512to1023octets_gb;
+ u64 tx1024tomaxoctets_gb;
+ u64 txunicastframes_gb;
+ u64 txmulticastframes_gb;
+ u64 txbroadcastframes_gb;
+ u64 txunderflowerror;
+ u64 txoctetcount_g;
+ u64 txframecount_g;
+ u64 txpauseframes;
+ u64 txvlanframes_g;
+
+ /* Rx Stats */
+ u64 rxframecount_gb;
+ u64 rxoctetcount_gb;
+ u64 rxoctetcount_g;
+ u64 rxbroadcastframes_g;
+ u64 rxmulticastframes_g;
+ u64 rxcrcerror;
+ u64 rxrunterror;
+ u64 rxjabbererror;
+ u64 rxundersize_g;
+ u64 rxoversize_g;
+ u64 rx64octets_gb;
+ u64 rx65to127octets_gb;
+ u64 rx128to255octets_gb;
+ u64 rx256to511octets_gb;
+ u64 rx512to1023octets_gb;
+ u64 rx1024tomaxoctets_gb;
+ u64 rxunicastframes_g;
+ u64 rxlengtherror;
+ u64 rxoutofrangetype;
+ u64 rxpauseframes;
+ u64 rxfifooverflow;
+ u64 rxvlanframes_gb;
+ u64 rxwatchdogerror;
+};
+
+struct xgbe_hw_if {
+ int (*tx_complete)(struct xgbe_ring_desc *);
+
+ int (*set_promiscuous_mode)(struct xgbe_prv_data *, unsigned int);
+ int (*set_all_multicast_mode)(struct xgbe_prv_data *, unsigned int);
+ int (*set_addn_mac_addrs)(struct xgbe_prv_data *, unsigned int);
+ int (*set_mac_address)(struct xgbe_prv_data *, u8 *addr);
+
+ int (*enable_rx_csum)(struct xgbe_prv_data *);
+ int (*disable_rx_csum)(struct xgbe_prv_data *);
+
+ int (*enable_rx_vlan_stripping)(struct xgbe_prv_data *);
+ int (*disable_rx_vlan_stripping)(struct xgbe_prv_data *);
+
+ int (*read_mmd_regs)(struct xgbe_prv_data *, int, int);
+ void (*write_mmd_regs)(struct xgbe_prv_data *, int, int, int);
+ int (*set_gmii_speed)(struct xgbe_prv_data *);
+ int (*set_gmii_2500_speed)(struct xgbe_prv_data *);
+ int (*set_xgmii_speed)(struct xgbe_prv_data *);
+
+ void (*enable_tx)(struct xgbe_prv_data *);
+ void (*disable_tx)(struct xgbe_prv_data *);
+ void (*enable_rx)(struct xgbe_prv_data *);
+ void (*disable_rx)(struct xgbe_prv_data *);
+
+ void (*powerup_tx)(struct xgbe_prv_data *);
+ void (*powerdown_tx)(struct xgbe_prv_data *);
+ void (*powerup_rx)(struct xgbe_prv_data *);
+ void (*powerdown_rx)(struct xgbe_prv_data *);
+
+ int (*init)(struct xgbe_prv_data *);
+ int (*exit)(struct xgbe_prv_data *);
+
+ int (*enable_int)(struct xgbe_channel *, enum xgbe_int);
+ int (*disable_int)(struct xgbe_channel *, enum xgbe_int);
+ void (*pre_xmit)(struct xgbe_channel *);
+ int (*dev_read)(struct xgbe_channel *);
+ void (*tx_desc_init)(struct xgbe_channel *);
+ void (*rx_desc_init)(struct xgbe_channel *);
+ void (*rx_desc_reset)(struct xgbe_ring_data *);
+ void (*tx_desc_reset)(struct xgbe_ring_data *);
+ int (*is_last_desc)(struct xgbe_ring_desc *);
+ int (*is_context_desc)(struct xgbe_ring_desc *);
+
+ /* For FLOW ctrl */
+ int (*config_tx_flow_control)(struct xgbe_prv_data *);
+ int (*config_rx_flow_control)(struct xgbe_prv_data *);
+
+ /* For RX coalescing */
+ int (*config_rx_coalesce)(struct xgbe_prv_data *);
+ int (*config_tx_coalesce)(struct xgbe_prv_data *);
+ unsigned int (*usec_to_riwt)(struct xgbe_prv_data *, unsigned int);
+ unsigned int (*riwt_to_usec)(struct xgbe_prv_data *, unsigned int);
+
+ /* For RX and TX threshold config */
+ int (*config_rx_threshold)(struct xgbe_prv_data *, unsigned int);
+ int (*config_tx_threshold)(struct xgbe_prv_data *, unsigned int);
+
+ /* For RX and TX Store and Forward Mode config */
+ int (*config_rsf_mode)(struct xgbe_prv_data *, unsigned int);
+ int (*config_tsf_mode)(struct xgbe_prv_data *, unsigned int);
+
+ /* For TX DMA Operate on Second Frame config */
+ int (*config_osp_mode)(struct xgbe_prv_data *);
+
+ /* For RX and TX PBL config */
+ int (*config_rx_pbl_val)(struct xgbe_prv_data *);
+ int (*get_rx_pbl_val)(struct xgbe_prv_data *);
+ int (*config_tx_pbl_val)(struct xgbe_prv_data *);
+ int (*get_tx_pbl_val)(struct xgbe_prv_data *);
+ int (*config_pblx8)(struct xgbe_prv_data *);
+
+ /* For MMC statistics */
+ void (*rx_mmc_int)(struct xgbe_prv_data *);
+ void (*tx_mmc_int)(struct xgbe_prv_data *);
+ void (*read_mmc_stats)(struct xgbe_prv_data *);
+};
+
+struct xgbe_desc_if {
+ int (*alloc_ring_resources)(struct xgbe_prv_data *);
+ void (*free_ring_resources)(struct xgbe_prv_data *);
+ int (*map_tx_skb)(struct xgbe_channel *, struct sk_buff *);
+ void (*realloc_skb)(struct xgbe_channel *);
+ void (*unmap_skb)(struct xgbe_prv_data *, struct xgbe_ring_data *);
+ void (*wrapper_tx_desc_init)(struct xgbe_prv_data *);
+ void (*wrapper_rx_desc_init)(struct xgbe_prv_data *);
+};
+
+/* This structure contains flags that indicate what hardware features
+ * or configurations are present in the device.
+ */
+struct xgbe_hw_features {
+ /* HW Feature Register0 */
+ unsigned int gmii; /* 1000 Mbps support */
+ unsigned int vlhash; /* VLAN Hash Filter */
+ unsigned int sma; /* SMA(MDIO) Interface */
+ unsigned int rwk; /* PMT remote wake-up packet */
+ unsigned int mgk; /* PMT magic packet */
+ unsigned int mmc; /* RMON module */
+ unsigned int aoe; /* ARP Offload */
+ unsigned int ts; /* IEEE 1588-2008 Adavanced Timestamp */
+ unsigned int eee; /* Energy Efficient Ethernet */
+ unsigned int tx_coe; /* Tx Checksum Offload */
+ unsigned int rx_coe; /* Rx Checksum Offload */
+ unsigned int addn_mac; /* Additional MAC Addresses */
+ unsigned int ts_src; /* Timestamp Source */
+ unsigned int sa_vlan_ins; /* Source Address or VLAN Insertion */
+
+ /* HW Feature Register1 */
+ unsigned int rx_fifo_size; /* MTL Receive FIFO Size */
+ unsigned int tx_fifo_size; /* MTL Transmit FIFO Size */
+ unsigned int adv_ts_hi; /* Advance Timestamping High Word */
+ unsigned int dcb; /* DCB Feature */
+ unsigned int sph; /* Split Header Feature */
+ unsigned int tso; /* TCP Segmentation Offload */
+ unsigned int dma_debug; /* DMA Debug Registers */
+ unsigned int rss; /* Receive Side Scaling */
+ unsigned int hash_table_size; /* Hash Table Size */
+ unsigned int l3l4_filter_num; /* Number of L3-L4 Filters */
+
+ /* HW Feature Register2 */
+ unsigned int rx_q_cnt; /* Number of MTL Receive Queues */
+ unsigned int tx_q_cnt; /* Number of MTL Transmit Queues */
+ unsigned int rx_ch_cnt; /* Number of DMA Receive Channels */
+ unsigned int tx_ch_cnt; /* Number of DMA Transmit Channels */
+ unsigned int pps_out_num; /* Number of PPS outputs */
+ unsigned int aux_snap_num; /* Number of Aux snapshot inputs */
+};
+
+struct xgbe_prv_data {
+ struct net_device *netdev;
+ struct platform_device *pdev;
+ struct device *dev;
+
+ /* XGMAC/XPCS related mmio registers */
+ void __iomem *xgmac_regs; /* XGMAC CSRs */
+ void __iomem *xpcs_regs; /* XPCS MMD registers */
+
+ /* Overall device lock */
+ spinlock_t lock;
+
+ /* XPCS indirect addressing mutex */
+ struct mutex xpcs_mutex;
+
+ int irq_number;
+
+ struct xgbe_hw_if hw_if;
+ struct xgbe_desc_if desc_if;
+
+ /* Rings for Tx/Rx on a DMA channel */
+ struct xgbe_channel *channel;
+ unsigned int channel_count;
+ unsigned int tx_ring_count;
+ unsigned int tx_desc_count;
+ unsigned int rx_ring_count;
+ unsigned int rx_desc_count;
+
+ /* Tx/Rx common settings */
+ unsigned int pblx8;
+
+ /* Tx settings */
+ unsigned int tx_sf_mode;
+ unsigned int tx_threshold;
+ unsigned int tx_pbl;
+ unsigned int tx_osp_mode;
+
+ /* Rx settings */
+ unsigned int rx_sf_mode;
+ unsigned int rx_threshold;
+ unsigned int rx_pbl;
+
+ /* Tx coalescing settings */
+ unsigned int tx_usecs;
+ unsigned int tx_frames;
+
+ /* Rx coalescing settings */
+ unsigned int rx_riwt;
+ unsigned int rx_frames;
+
+ /* Current MTU */
+ unsigned int rx_buf_size;
+
+ /* Flow control settings */
+ unsigned int pause_autoneg;
+ unsigned int tx_pause;
+ unsigned int rx_pause;
+
+ /* MDIO settings */
+ struct module *phy_module;
+ char *mii_bus_id;
+ struct mii_bus *mii;
+ int mdio_mmd;
+ struct phy_device *phydev;
+ int default_autoneg;
+ int default_speed;
+
+ /* Current PHY settings */
+ phy_interface_t phy_mode;
+ int phy_link;
+ int phy_speed;
+ unsigned int phy_tx_pause;
+ unsigned int phy_rx_pause;
+
+ /* Netdev related settings */
+ netdev_features_t netdev_features;
+ struct napi_struct napi;
+ struct xgbe_mmc_stats mmc_stats;
+
+ /* System clock value used for Rx watchdog */
+ struct clk *sysclock;
+
+ /* Hardware features of the device */
+ struct xgbe_hw_features hw_feat;
+
+ /* Device restart work structure */
+ struct work_struct restart_work;
+
+ /* Keeps track of power mode */
+ unsigned int power_down;
+
+#ifdef CONFIG_DEBUG_FS
+ struct dentry *xgbe_debugfs;
+
+ unsigned int debugfs_xgmac_reg;
+
+ unsigned int debugfs_xpcs_mmd;
+ unsigned int debugfs_xpcs_reg;
+#endif
+};
+
+/* Function prototypes*/
+
+void xgbe_init_function_ptrs_dev(struct xgbe_hw_if *);
+void xgbe_init_function_ptrs_desc(struct xgbe_desc_if *);
+struct net_device_ops *xgbe_get_netdev_ops(void);
+struct ethtool_ops *xgbe_get_ethtool_ops(void);
+
+int xgbe_mdio_register(struct xgbe_prv_data *);
+void xgbe_mdio_unregister(struct xgbe_prv_data *);
+void xgbe_dump_phy_registers(struct xgbe_prv_data *);
+void xgbe_dump_tx_desc(struct xgbe_ring *, unsigned int, unsigned int,
+ unsigned int);
+void xgbe_dump_rx_desc(struct xgbe_ring *, struct xgbe_ring_desc *,
+ unsigned int);
+void xgbe_print_pkt(struct net_device *, struct sk_buff *, bool);
+void xgbe_get_all_hw_features(struct xgbe_prv_data *);
+int xgbe_powerup(struct net_device *, unsigned int);
+int xgbe_powerdown(struct net_device *, unsigned int);
+void xgbe_init_rx_coalesce(struct xgbe_prv_data *);
+void xgbe_init_tx_coalesce(struct xgbe_prv_data *);
+
+#ifdef CONFIG_DEBUG_FS
+void xgbe_debugfs_init(struct xgbe_prv_data *);
+void xgbe_debugfs_exit(struct xgbe_prv_data *);
+#else
+static inline void xgbe_debugfs_init(struct xgbe_prv_data *pdata) {}
+static inline void xgbe_debugfs_exit(struct xgbe_prv_data *pdata) {}
+#endif /* CONFIG_DEBUG_FS */
+
+/* NOTE: Uncomment for TX and RX DESCRIPTOR DUMP in KERNEL LOG */
+#if 0
+#define XGMAC_ENABLE_TX_DESC_DUMP
+#define XGMAC_ENABLE_RX_DESC_DUMP
+#endif
+
+/* NOTE: Uncomment for TX and RX PACKET DUMP in KERNEL LOG */
+#if 0
+#define XGMAC_ENABLE_TX_PKT_DUMP
+#define XGMAC_ENABLE_RX_PKT_DUMP
+#endif
+
+/* NOTE: Uncomment for function trace log messages in KERNEL LOG */
+#if 0
+#define YDEBUG
+#define YDEBUG_MDIO
+#endif
+
+/* For debug prints */
+#ifdef YDEBUG
+#define DBGPR(x...) pr_alert(x)
+#define DBGPHY_REGS(x...) xgbe_dump_phy_registers(x)
+#else
+#define DBGPR(x...) do { } while (0)
+#define DBGPHY_REGS(x...) do { } while (0)
+#endif
+
+#ifdef YDEBUG_MDIO
+#define DBGPR_MDIO(x...) pr_alert(x)
+#else
+#define DBGPR_MDIO(x...) do { } while (0)
+#endif
+
+#endif
diff --git a/drivers/net/ethernet/arc/emac_main.c b/drivers/net/ethernet/arc/emac_main.c
index d647a7d115ac..18e2faccebb0 100644
--- a/drivers/net/ethernet/arc/emac_main.c
+++ b/drivers/net/ethernet/arc/emac_main.c
@@ -13,6 +13,7 @@
* Vineet Gupta
*/
+#include <linux/crc32.h>
#include <linux/etherdevice.h>
#include <linux/interrupt.h>
#include <linux/io.h>
@@ -362,6 +363,15 @@ static irqreturn_t arc_emac_intr(int irq, void *dev_instance)
return IRQ_HANDLED;
}
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void arc_emac_poll_controller(struct net_device *dev)
+{
+ disable_irq(dev->irq);
+ arc_emac_intr(dev->irq, dev);
+ enable_irq(dev->irq);
+}
+#endif
+
/**
* arc_emac_open - Open the network device.
* @ndev: Pointer to the network device.
@@ -451,6 +461,41 @@ static int arc_emac_open(struct net_device *ndev)
}
/**
+ * arc_emac_set_rx_mode - Change the receive filtering mode.
+ * @ndev: Pointer to the network device.
+ *
+ * This function enables/disables promiscuous or all-multicast mode
+ * and updates the multicast filtering list of the network device.
+ */
+static void arc_emac_set_rx_mode(struct net_device *ndev)
+{
+ struct arc_emac_priv *priv = netdev_priv(ndev);
+
+ if (ndev->flags & IFF_PROMISC) {
+ arc_reg_or(priv, R_CTRL, PROM_MASK);
+ } else {
+ arc_reg_clr(priv, R_CTRL, PROM_MASK);
+
+ if (ndev->flags & IFF_ALLMULTI) {
+ arc_reg_set(priv, R_LAFL, ~0);
+ arc_reg_set(priv, R_LAFH, ~0);
+ } else {
+ struct netdev_hw_addr *ha;
+ unsigned int filter[2] = { 0, 0 };
+ int bit;
+
+ netdev_for_each_mc_addr(ha, ndev) {
+ bit = ether_crc_le(ETH_ALEN, ha->addr) >> 26;
+ filter[bit >> 5] |= 1 << (bit & 31);
+ }
+
+ arc_reg_set(priv, R_LAFL, filter[0]);
+ arc_reg_set(priv, R_LAFH, filter[1]);
+ }
+ }
+}
+
+/**
* arc_emac_stop - Close the network device.
* @ndev: Pointer to the network device.
*
@@ -620,6 +665,10 @@ static const struct net_device_ops arc_emac_netdev_ops = {
.ndo_start_xmit = arc_emac_tx,
.ndo_set_mac_address = arc_emac_set_address,
.ndo_get_stats = arc_emac_stats,
+ .ndo_set_rx_mode = arc_emac_set_rx_mode,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = arc_emac_poll_controller,
+#endif
};
static int arc_emac_probe(struct platform_device *pdev)
diff --git a/drivers/net/ethernet/atheros/alx/main.c b/drivers/net/ethernet/atheros/alx/main.c
index 17bb9ce96260..49faa97a30c3 100644
--- a/drivers/net/ethernet/atheros/alx/main.c
+++ b/drivers/net/ethernet/atheros/alx/main.c
@@ -1302,7 +1302,7 @@ static int alx_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
}
netdev->netdev_ops = &alx_netdev_ops;
- SET_ETHTOOL_OPS(netdev, &alx_ethtool_ops);
+ netdev->ethtool_ops = &alx_ethtool_ops;
netdev->irq = pdev->irq;
netdev->watchdog_timeo = ALX_WATCHDOG_TIME;
diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c_ethtool.c b/drivers/net/ethernet/atheros/atl1c/atl1c_ethtool.c
index 859ea844ba0f..48694c239d5c 100644
--- a/drivers/net/ethernet/atheros/atl1c/atl1c_ethtool.c
+++ b/drivers/net/ethernet/atheros/atl1c/atl1c_ethtool.c
@@ -56,8 +56,8 @@ static int atl1c_get_settings(struct net_device *netdev,
else
ecmd->duplex = DUPLEX_HALF;
} else {
- ethtool_cmd_speed_set(ecmd, -1);
- ecmd->duplex = -1;
+ ethtool_cmd_speed_set(ecmd, SPEED_UNKNOWN);
+ ecmd->duplex = DUPLEX_UNKNOWN;
}
ecmd->autoneg = AUTONEG_ENABLE;
@@ -305,5 +305,5 @@ static const struct ethtool_ops atl1c_ethtool_ops = {
void atl1c_set_ethtool_ops(struct net_device *netdev)
{
- SET_ETHTOOL_OPS(netdev, &atl1c_ethtool_ops);
+ netdev->ethtool_ops = &atl1c_ethtool_ops;
}
diff --git a/drivers/net/ethernet/atheros/atl1e/atl1e_ethtool.c b/drivers/net/ethernet/atheros/atl1e/atl1e_ethtool.c
index 82b23861bf55..1be072f4afc2 100644
--- a/drivers/net/ethernet/atheros/atl1e/atl1e_ethtool.c
+++ b/drivers/net/ethernet/atheros/atl1e/atl1e_ethtool.c
@@ -57,8 +57,8 @@ static int atl1e_get_settings(struct net_device *netdev,
else
ecmd->duplex = DUPLEX_HALF;
} else {
- ethtool_cmd_speed_set(ecmd, -1);
- ecmd->duplex = -1;
+ ethtool_cmd_speed_set(ecmd, SPEED_UNKNOWN);
+ ecmd->duplex = DUPLEX_UNKNOWN;
}
ecmd->autoneg = AUTONEG_ENABLE;
@@ -388,5 +388,5 @@ static const struct ethtool_ops atl1e_ethtool_ops = {
void atl1e_set_ethtool_ops(struct net_device *netdev)
{
- SET_ETHTOOL_OPS(netdev, &atl1e_ethtool_ops);
+ netdev->ethtool_ops = &atl1e_ethtool_ops;
}
diff --git a/drivers/net/ethernet/atheros/atlx/atl1.c b/drivers/net/ethernet/atheros/atlx/atl1.c
index dfd0e91fa726..b460db7919a2 100644
--- a/drivers/net/ethernet/atheros/atlx/atl1.c
+++ b/drivers/net/ethernet/atheros/atlx/atl1.c
@@ -3258,8 +3258,8 @@ static int atl1_get_settings(struct net_device *netdev,
else
ecmd->duplex = DUPLEX_HALF;
} else {
- ethtool_cmd_speed_set(ecmd, -1);
- ecmd->duplex = -1;
+ ethtool_cmd_speed_set(ecmd, SPEED_UNKNOWN);
+ ecmd->duplex = DUPLEX_UNKNOWN;
}
if (hw->media_type == MEDIA_TYPE_AUTO_SENSOR ||
hw->media_type == MEDIA_TYPE_1000M_FULL)
diff --git a/drivers/net/ethernet/atheros/atlx/atl2.c b/drivers/net/ethernet/atheros/atlx/atl2.c
index 78befb522a52..6746bd717146 100644
--- a/drivers/net/ethernet/atheros/atlx/atl2.c
+++ b/drivers/net/ethernet/atheros/atlx/atl2.c
@@ -1396,7 +1396,7 @@ static int atl2_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
atl2_setup_pcicmd(pdev);
netdev->netdev_ops = &atl2_netdev_ops;
- SET_ETHTOOL_OPS(netdev, &atl2_ethtool_ops);
+ netdev->ethtool_ops = &atl2_ethtool_ops;
netdev->watchdog_timeo = 5 * HZ;
strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1);
@@ -1769,8 +1769,8 @@ static int atl2_get_settings(struct net_device *netdev,
else
ecmd->duplex = DUPLEX_HALF;
} else {
- ethtool_cmd_speed_set(ecmd, -1);
- ecmd->duplex = -1;
+ ethtool_cmd_speed_set(ecmd, SPEED_UNKNOWN);
+ ecmd->duplex = DUPLEX_UNKNOWN;
}
ecmd->autoneg = AUTONEG_ENABLE;
diff --git a/drivers/net/ethernet/broadcom/Kconfig b/drivers/net/ethernet/broadcom/Kconfig
index 85dbddd03722..3e488094b073 100644
--- a/drivers/net/ethernet/broadcom/Kconfig
+++ b/drivers/net/ethernet/broadcom/Kconfig
@@ -150,4 +150,15 @@ config BGMAC
In case of using this driver on BCM4706 it's also requires to enable
BCMA_DRIVER_GMAC_CMN to make it work.
+config SYSTEMPORT
+ tristate "Broadcom SYSTEMPORT internal MAC support"
+ depends on OF
+ select MII
+ select PHYLIB
+ select FIXED_PHY if SYSTEMPORT=y
+ help
+ This driver supports the built-in Ethernet MACs found in the
+ Broadcom BCM7xxx Set Top Box family chipset using an internal
+ Ethernet switch.
+
endif # NET_VENDOR_BROADCOM
diff --git a/drivers/net/ethernet/broadcom/Makefile b/drivers/net/ethernet/broadcom/Makefile
index fd639a0d4c7d..e2a958a657e0 100644
--- a/drivers/net/ethernet/broadcom/Makefile
+++ b/drivers/net/ethernet/broadcom/Makefile
@@ -11,3 +11,4 @@ obj-$(CONFIG_BNX2X) += bnx2x/
obj-$(CONFIG_SB1250_MAC) += sb1250-mac.o
obj-$(CONFIG_TIGON3) += tg3.o
obj-$(CONFIG_BGMAC) += bgmac.o
+obj-$(CONFIG_SYSTEMPORT) += bcmsysport.o
diff --git a/drivers/net/ethernet/broadcom/b44.c b/drivers/net/ethernet/broadcom/b44.c
index 05ba62589017..ca5a20a48b14 100644
--- a/drivers/net/ethernet/broadcom/b44.c
+++ b/drivers/net/ethernet/broadcom/b44.c
@@ -2380,7 +2380,7 @@ static int b44_init_one(struct ssb_device *sdev,
netif_napi_add(dev, &bp->napi, b44_poll, 64);
dev->watchdog_timeo = B44_TX_TIMEOUT;
dev->irq = sdev->irq;
- SET_ETHTOOL_OPS(dev, &b44_ethtool_ops);
+ dev->ethtool_ops = &b44_ethtool_ops;
err = ssb_bus_powerup(sdev->bus, 0);
if (err) {
diff --git a/drivers/net/ethernet/broadcom/bcm63xx_enet.c b/drivers/net/ethernet/broadcom/bcm63xx_enet.c
index a7d11f5565d6..3e8d1a88ed3d 100644
--- a/drivers/net/ethernet/broadcom/bcm63xx_enet.c
+++ b/drivers/net/ethernet/broadcom/bcm63xx_enet.c
@@ -1315,8 +1315,7 @@ static const struct bcm_enet_stats bcm_enet_gstrings_stats[] = {
};
-#define BCM_ENET_STATS_LEN \
- (sizeof(bcm_enet_gstrings_stats) / sizeof(struct bcm_enet_stats))
+#define BCM_ENET_STATS_LEN ARRAY_SIZE(bcm_enet_gstrings_stats)
static const u32 unused_mib_regs[] = {
ETH_MIB_TX_ALL_OCTETS,
@@ -1898,7 +1897,7 @@ static int bcm_enet_probe(struct platform_device *pdev)
dev->netdev_ops = &bcm_enet_ops;
netif_napi_add(dev, &priv->napi, bcm_enet_poll, 16);
- SET_ETHTOOL_OPS(dev, &bcm_enet_ethtool_ops);
+ dev->ethtool_ops = &bcm_enet_ethtool_ops;
SET_NETDEV_DEV(dev, &pdev->dev);
ret = register_netdev(dev);
@@ -2784,7 +2783,7 @@ static int bcm_enetsw_probe(struct platform_device *pdev)
/* register netdevice */
dev->netdev_ops = &bcm_enetsw_ops;
netif_napi_add(dev, &priv->napi, bcm_enet_poll, 16);
- SET_ETHTOOL_OPS(dev, &bcm_enetsw_ethtool_ops);
+ dev->ethtool_ops = &bcm_enetsw_ethtool_ops;
SET_NETDEV_DEV(dev, &pdev->dev);
spin_lock_init(&priv->enetsw_mdio_lock);
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c
new file mode 100644
index 000000000000..141160ef249a
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bcmsysport.c
@@ -0,0 +1,1654 @@
+/*
+ * Broadcom BCM7xxx System Port Ethernet MAC driver
+ *
+ * Copyright (C) 2014 Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/platform_device.h>
+#include <linux/of.h>
+#include <linux/of_net.h>
+#include <linux/of_mdio.h>
+#include <linux/phy.h>
+#include <linux/phy_fixed.h>
+#include <net/ip.h>
+#include <net/ipv6.h>
+
+#include "bcmsysport.h"
+
+/* I/O accessors register helpers */
+#define BCM_SYSPORT_IO_MACRO(name, offset) \
+static inline u32 name##_readl(struct bcm_sysport_priv *priv, u32 off) \
+{ \
+ u32 reg = __raw_readl(priv->base + offset + off); \
+ return reg; \
+} \
+static inline void name##_writel(struct bcm_sysport_priv *priv, \
+ u32 val, u32 off) \
+{ \
+ __raw_writel(val, priv->base + offset + off); \
+} \
+
+BCM_SYSPORT_IO_MACRO(intrl2_0, SYS_PORT_INTRL2_0_OFFSET);
+BCM_SYSPORT_IO_MACRO(intrl2_1, SYS_PORT_INTRL2_1_OFFSET);
+BCM_SYSPORT_IO_MACRO(umac, SYS_PORT_UMAC_OFFSET);
+BCM_SYSPORT_IO_MACRO(tdma, SYS_PORT_TDMA_OFFSET);
+BCM_SYSPORT_IO_MACRO(rdma, SYS_PORT_RDMA_OFFSET);
+BCM_SYSPORT_IO_MACRO(rxchk, SYS_PORT_RXCHK_OFFSET);
+BCM_SYSPORT_IO_MACRO(txchk, SYS_PORT_TXCHK_OFFSET);
+BCM_SYSPORT_IO_MACRO(rbuf, SYS_PORT_RBUF_OFFSET);
+BCM_SYSPORT_IO_MACRO(tbuf, SYS_PORT_TBUF_OFFSET);
+BCM_SYSPORT_IO_MACRO(topctrl, SYS_PORT_TOPCTRL_OFFSET);
+
+/* L2-interrupt masking/unmasking helpers, does automatic saving of the applied
+ * mask in a software copy to avoid CPU_MASK_STATUS reads in hot-paths.
+ */
+#define BCM_SYSPORT_INTR_L2(which) \
+static inline void intrl2_##which##_mask_clear(struct bcm_sysport_priv *priv, \
+ u32 mask) \
+{ \
+ intrl2_##which##_writel(priv, mask, INTRL2_CPU_MASK_CLEAR); \
+ priv->irq##which##_mask &= ~(mask); \
+} \
+static inline void intrl2_##which##_mask_set(struct bcm_sysport_priv *priv, \
+ u32 mask) \
+{ \
+ intrl2_## which##_writel(priv, mask, INTRL2_CPU_MASK_SET); \
+ priv->irq##which##_mask |= (mask); \
+} \
+
+BCM_SYSPORT_INTR_L2(0)
+BCM_SYSPORT_INTR_L2(1)
+
+/* Register accesses to GISB/RBUS registers are expensive (few hundred
+ * nanoseconds), so keep the check for 64-bits explicit here to save
+ * one register write per-packet on 32-bits platforms.
+ */
+static inline void dma_desc_set_addr(struct bcm_sysport_priv *priv,
+ void __iomem *d,
+ dma_addr_t addr)
+{
+#ifdef CONFIG_PHYS_ADDR_T_64BIT
+ __raw_writel(upper_32_bits(addr) & DESC_ADDR_HI_MASK,
+ d + DESC_ADDR_HI_STATUS_LEN);
+#endif
+ __raw_writel(lower_32_bits(addr), d + DESC_ADDR_LO);
+}
+
+static inline void tdma_port_write_desc_addr(struct bcm_sysport_priv *priv,
+ struct dma_desc *desc,
+ unsigned int port)
+{
+ /* Ports are latched, so write upper address first */
+ tdma_writel(priv, desc->addr_status_len, TDMA_WRITE_PORT_HI(port));
+ tdma_writel(priv, desc->addr_lo, TDMA_WRITE_PORT_LO(port));
+}
+
+/* Ethtool operations */
+static int bcm_sysport_set_settings(struct net_device *dev,
+ struct ethtool_cmd *cmd)
+{
+ struct bcm_sysport_priv *priv = netdev_priv(dev);
+
+ if (!netif_running(dev))
+ return -EINVAL;
+
+ return phy_ethtool_sset(priv->phydev, cmd);
+}
+
+static int bcm_sysport_get_settings(struct net_device *dev,
+ struct ethtool_cmd *cmd)
+{
+ struct bcm_sysport_priv *priv = netdev_priv(dev);
+
+ if (!netif_running(dev))
+ return -EINVAL;
+
+ return phy_ethtool_gset(priv->phydev, cmd);
+}
+
+static int bcm_sysport_set_rx_csum(struct net_device *dev,
+ netdev_features_t wanted)
+{
+ struct bcm_sysport_priv *priv = netdev_priv(dev);
+ u32 reg;
+
+ priv->rx_csum_en = !!(wanted & NETIF_F_RXCSUM);
+ reg = rxchk_readl(priv, RXCHK_CONTROL);
+ if (priv->rx_csum_en)
+ reg |= RXCHK_EN;
+ else
+ reg &= ~RXCHK_EN;
+
+ /* If UniMAC forwards CRC, we need to skip over it to get
+ * a valid CHK bit to be set in the per-packet status word
+ */
+ if (priv->rx_csum_en && priv->crc_fwd)
+ reg |= RXCHK_SKIP_FCS;
+ else
+ reg &= ~RXCHK_SKIP_FCS;
+
+ rxchk_writel(priv, reg, RXCHK_CONTROL);
+
+ return 0;
+}
+
+static int bcm_sysport_set_tx_csum(struct net_device *dev,
+ netdev_features_t wanted)
+{
+ struct bcm_sysport_priv *priv = netdev_priv(dev);
+ u32 reg;
+
+ /* Hardware transmit checksum requires us to enable the Transmit status
+ * block prepended to the packet contents
+ */
+ priv->tsb_en = !!(wanted & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM));
+ reg = tdma_readl(priv, TDMA_CONTROL);
+ if (priv->tsb_en)
+ reg |= TSB_EN;
+ else
+ reg &= ~TSB_EN;
+ tdma_writel(priv, reg, TDMA_CONTROL);
+
+ return 0;
+}
+
+static int bcm_sysport_set_features(struct net_device *dev,
+ netdev_features_t features)
+{
+ netdev_features_t changed = features ^ dev->features;
+ netdev_features_t wanted = dev->wanted_features;
+ int ret = 0;
+
+ if (changed & NETIF_F_RXCSUM)
+ ret = bcm_sysport_set_rx_csum(dev, wanted);
+ if (changed & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM))
+ ret = bcm_sysport_set_tx_csum(dev, wanted);
+
+ return ret;
+}
+
+/* Hardware counters must be kept in sync because the order/offset
+ * is important here (order in structure declaration = order in hardware)
+ */
+static const struct bcm_sysport_stats bcm_sysport_gstrings_stats[] = {
+ /* general stats */
+ STAT_NETDEV(rx_packets),
+ STAT_NETDEV(tx_packets),
+ STAT_NETDEV(rx_bytes),
+ STAT_NETDEV(tx_bytes),
+ STAT_NETDEV(rx_errors),
+ STAT_NETDEV(tx_errors),
+ STAT_NETDEV(rx_dropped),
+ STAT_NETDEV(tx_dropped),
+ STAT_NETDEV(multicast),
+ /* UniMAC RSV counters */
+ STAT_MIB_RX("rx_64_octets", mib.rx.pkt_cnt.cnt_64),
+ STAT_MIB_RX("rx_65_127_oct", mib.rx.pkt_cnt.cnt_127),
+ STAT_MIB_RX("rx_128_255_oct", mib.rx.pkt_cnt.cnt_255),
+ STAT_MIB_RX("rx_256_511_oct", mib.rx.pkt_cnt.cnt_511),
+ STAT_MIB_RX("rx_512_1023_oct", mib.rx.pkt_cnt.cnt_1023),
+ STAT_MIB_RX("rx_1024_1518_oct", mib.rx.pkt_cnt.cnt_1518),
+ STAT_MIB_RX("rx_vlan_1519_1522_oct", mib.rx.pkt_cnt.cnt_mgv),
+ STAT_MIB_RX("rx_1522_2047_oct", mib.rx.pkt_cnt.cnt_2047),
+ STAT_MIB_RX("rx_2048_4095_oct", mib.rx.pkt_cnt.cnt_4095),
+ STAT_MIB_RX("rx_4096_9216_oct", mib.rx.pkt_cnt.cnt_9216),
+ STAT_MIB_RX("rx_pkts", mib.rx.pkt),
+ STAT_MIB_RX("rx_bytes", mib.rx.bytes),
+ STAT_MIB_RX("rx_multicast", mib.rx.mca),
+ STAT_MIB_RX("rx_broadcast", mib.rx.bca),
+ STAT_MIB_RX("rx_fcs", mib.rx.fcs),
+ STAT_MIB_RX("rx_control", mib.rx.cf),
+ STAT_MIB_RX("rx_pause", mib.rx.pf),
+ STAT_MIB_RX("rx_unknown", mib.rx.uo),
+ STAT_MIB_RX("rx_align", mib.rx.aln),
+ STAT_MIB_RX("rx_outrange", mib.rx.flr),
+ STAT_MIB_RX("rx_code", mib.rx.cde),
+ STAT_MIB_RX("rx_carrier", mib.rx.fcr),
+ STAT_MIB_RX("rx_oversize", mib.rx.ovr),
+ STAT_MIB_RX("rx_jabber", mib.rx.jbr),
+ STAT_MIB_RX("rx_mtu_err", mib.rx.mtue),
+ STAT_MIB_RX("rx_good_pkts", mib.rx.pok),
+ STAT_MIB_RX("rx_unicast", mib.rx.uc),
+ STAT_MIB_RX("rx_ppp", mib.rx.ppp),
+ STAT_MIB_RX("rx_crc", mib.rx.rcrc),
+ /* UniMAC TSV counters */
+ STAT_MIB_TX("tx_64_octets", mib.tx.pkt_cnt.cnt_64),
+ STAT_MIB_TX("tx_65_127_oct", mib.tx.pkt_cnt.cnt_127),
+ STAT_MIB_TX("tx_128_255_oct", mib.tx.pkt_cnt.cnt_255),
+ STAT_MIB_TX("tx_256_511_oct", mib.tx.pkt_cnt.cnt_511),
+ STAT_MIB_TX("tx_512_1023_oct", mib.tx.pkt_cnt.cnt_1023),
+ STAT_MIB_TX("tx_1024_1518_oct", mib.tx.pkt_cnt.cnt_1518),
+ STAT_MIB_TX("tx_vlan_1519_1522_oct", mib.tx.pkt_cnt.cnt_mgv),
+ STAT_MIB_TX("tx_1522_2047_oct", mib.tx.pkt_cnt.cnt_2047),
+ STAT_MIB_TX("tx_2048_4095_oct", mib.tx.pkt_cnt.cnt_4095),
+ STAT_MIB_TX("tx_4096_9216_oct", mib.tx.pkt_cnt.cnt_9216),
+ STAT_MIB_TX("tx_pkts", mib.tx.pkts),
+ STAT_MIB_TX("tx_multicast", mib.tx.mca),
+ STAT_MIB_TX("tx_broadcast", mib.tx.bca),
+ STAT_MIB_TX("tx_pause", mib.tx.pf),
+ STAT_MIB_TX("tx_control", mib.tx.cf),
+ STAT_MIB_TX("tx_fcs_err", mib.tx.fcs),
+ STAT_MIB_TX("tx_oversize", mib.tx.ovr),
+ STAT_MIB_TX("tx_defer", mib.tx.drf),
+ STAT_MIB_TX("tx_excess_defer", mib.tx.edf),
+ STAT_MIB_TX("tx_single_col", mib.tx.scl),
+ STAT_MIB_TX("tx_multi_col", mib.tx.mcl),
+ STAT_MIB_TX("tx_late_col", mib.tx.lcl),
+ STAT_MIB_TX("tx_excess_col", mib.tx.ecl),
+ STAT_MIB_TX("tx_frags", mib.tx.frg),
+ STAT_MIB_TX("tx_total_col", mib.tx.ncl),
+ STAT_MIB_TX("tx_jabber", mib.tx.jbr),
+ STAT_MIB_TX("tx_bytes", mib.tx.bytes),
+ STAT_MIB_TX("tx_good_pkts", mib.tx.pok),
+ STAT_MIB_TX("tx_unicast", mib.tx.uc),
+ /* UniMAC RUNT counters */
+ STAT_RUNT("rx_runt_pkts", mib.rx_runt_cnt),
+ STAT_RUNT("rx_runt_valid_fcs", mib.rx_runt_fcs),
+ STAT_RUNT("rx_runt_inval_fcs_align", mib.rx_runt_fcs_align),
+ STAT_RUNT("rx_runt_bytes", mib.rx_runt_bytes),
+ /* RXCHK misc statistics */
+ STAT_RXCHK("rxchk_bad_csum", mib.rxchk_bad_csum, RXCHK_BAD_CSUM_CNTR),
+ STAT_RXCHK("rxchk_other_pkt_disc", mib.rxchk_other_pkt_disc,
+ RXCHK_OTHER_DISC_CNTR),
+ /* RBUF misc statistics */
+ STAT_RBUF("rbuf_ovflow_cnt", mib.rbuf_ovflow_cnt, RBUF_OVFL_DISC_CNTR),
+ STAT_RBUF("rbuf_err_cnt", mib.rbuf_err_cnt, RBUF_ERR_PKT_CNTR),
+};
+
+#define BCM_SYSPORT_STATS_LEN ARRAY_SIZE(bcm_sysport_gstrings_stats)
+
+static void bcm_sysport_get_drvinfo(struct net_device *dev,
+ struct ethtool_drvinfo *info)
+{
+ strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
+ strlcpy(info->version, "0.1", sizeof(info->version));
+ strlcpy(info->bus_info, "platform", sizeof(info->bus_info));
+ info->n_stats = BCM_SYSPORT_STATS_LEN;
+}
+
+static u32 bcm_sysport_get_msglvl(struct net_device *dev)
+{
+ struct bcm_sysport_priv *priv = netdev_priv(dev);
+
+ return priv->msg_enable;
+}
+
+static void bcm_sysport_set_msglvl(struct net_device *dev, u32 enable)
+{
+ struct bcm_sysport_priv *priv = netdev_priv(dev);
+
+ priv->msg_enable = enable;
+}
+
+static int bcm_sysport_get_sset_count(struct net_device *dev, int string_set)
+{
+ switch (string_set) {
+ case ETH_SS_STATS:
+ return BCM_SYSPORT_STATS_LEN;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static void bcm_sysport_get_strings(struct net_device *dev,
+ u32 stringset, u8 *data)
+{
+ int i;
+
+ switch (stringset) {
+ case ETH_SS_STATS:
+ for (i = 0; i < BCM_SYSPORT_STATS_LEN; i++) {
+ memcpy(data + i * ETH_GSTRING_LEN,
+ bcm_sysport_gstrings_stats[i].stat_string,
+ ETH_GSTRING_LEN);
+ }
+ break;
+ default:
+ break;
+ }
+}
+
+static void bcm_sysport_update_mib_counters(struct bcm_sysport_priv *priv)
+{
+ int i, j = 0;
+
+ for (i = 0; i < BCM_SYSPORT_STATS_LEN; i++) {
+ const struct bcm_sysport_stats *s;
+ u8 offset = 0;
+ u32 val = 0;
+ char *p;
+
+ s = &bcm_sysport_gstrings_stats[i];
+ switch (s->type) {
+ case BCM_SYSPORT_STAT_NETDEV:
+ continue;
+ case BCM_SYSPORT_STAT_MIB_RX:
+ case BCM_SYSPORT_STAT_MIB_TX:
+ case BCM_SYSPORT_STAT_RUNT:
+ if (s->type != BCM_SYSPORT_STAT_MIB_RX)
+ offset = UMAC_MIB_STAT_OFFSET;
+ val = umac_readl(priv, UMAC_MIB_START + j + offset);
+ break;
+ case BCM_SYSPORT_STAT_RXCHK:
+ val = rxchk_readl(priv, s->reg_offset);
+ if (val == ~0)
+ rxchk_writel(priv, 0, s->reg_offset);
+ break;
+ case BCM_SYSPORT_STAT_RBUF:
+ val = rbuf_readl(priv, s->reg_offset);
+ if (val == ~0)
+ rbuf_writel(priv, 0, s->reg_offset);
+ break;
+ }
+
+ j += s->stat_sizeof;
+ p = (char *)priv + s->stat_offset;
+ *(u32 *)p = val;
+ }
+
+ netif_dbg(priv, hw, priv->netdev, "updated MIB counters\n");
+}
+
+static void bcm_sysport_get_stats(struct net_device *dev,
+ struct ethtool_stats *stats, u64 *data)
+{
+ struct bcm_sysport_priv *priv = netdev_priv(dev);
+ int i;
+
+ if (netif_running(dev))
+ bcm_sysport_update_mib_counters(priv);
+
+ for (i = 0; i < BCM_SYSPORT_STATS_LEN; i++) {
+ const struct bcm_sysport_stats *s;
+ char *p;
+
+ s = &bcm_sysport_gstrings_stats[i];
+ if (s->type == BCM_SYSPORT_STAT_NETDEV)
+ p = (char *)&dev->stats;
+ else
+ p = (char *)priv;
+ p += s->stat_offset;
+ data[i] = *(u32 *)p;
+ }
+}
+
+static void bcm_sysport_free_cb(struct bcm_sysport_cb *cb)
+{
+ dev_kfree_skb_any(cb->skb);
+ cb->skb = NULL;
+ dma_unmap_addr_set(cb, dma_addr, 0);
+}
+
+static int bcm_sysport_rx_refill(struct bcm_sysport_priv *priv,
+ struct bcm_sysport_cb *cb)
+{
+ struct device *kdev = &priv->pdev->dev;
+ struct net_device *ndev = priv->netdev;
+ dma_addr_t mapping;
+ int ret;
+
+ cb->skb = netdev_alloc_skb(priv->netdev, RX_BUF_LENGTH);
+ if (!cb->skb) {
+ netif_err(priv, rx_err, ndev, "SKB alloc failed\n");
+ return -ENOMEM;
+ }
+
+ mapping = dma_map_single(kdev, cb->skb->data,
+ RX_BUF_LENGTH, DMA_FROM_DEVICE);
+ ret = dma_mapping_error(kdev, mapping);
+ if (ret) {
+ bcm_sysport_free_cb(cb);
+ netif_err(priv, rx_err, ndev, "DMA mapping failure\n");
+ return ret;
+ }
+
+ dma_unmap_addr_set(cb, dma_addr, mapping);
+ dma_desc_set_addr(priv, priv->rx_bd_assign_ptr, mapping);
+
+ priv->rx_bd_assign_index++;
+ priv->rx_bd_assign_index &= (priv->num_rx_bds - 1);
+ priv->rx_bd_assign_ptr = priv->rx_bds +
+ (priv->rx_bd_assign_index * DESC_SIZE);
+
+ netif_dbg(priv, rx_status, ndev, "RX refill\n");
+
+ return 0;
+}
+
+static int bcm_sysport_alloc_rx_bufs(struct bcm_sysport_priv *priv)
+{
+ struct bcm_sysport_cb *cb;
+ int ret = 0;
+ unsigned int i;
+
+ for (i = 0; i < priv->num_rx_bds; i++) {
+ cb = &priv->rx_cbs[priv->rx_bd_assign_index];
+ if (cb->skb)
+ continue;
+
+ ret = bcm_sysport_rx_refill(priv, cb);
+ if (ret)
+ break;
+ }
+
+ return ret;
+}
+
+/* Poll the hardware for up to budget packets to process */
+static unsigned int bcm_sysport_desc_rx(struct bcm_sysport_priv *priv,
+ unsigned int budget)
+{
+ struct device *kdev = &priv->pdev->dev;
+ struct net_device *ndev = priv->netdev;
+ unsigned int processed = 0, to_process;
+ struct bcm_sysport_cb *cb;
+ struct sk_buff *skb;
+ unsigned int p_index;
+ u16 len, status;
+ struct bcm_rsb *rsb;
+
+ /* Determine how much we should process since last call */
+ p_index = rdma_readl(priv, RDMA_PROD_INDEX);
+ p_index &= RDMA_PROD_INDEX_MASK;
+
+ if (p_index < priv->rx_c_index)
+ to_process = (RDMA_CONS_INDEX_MASK + 1) -
+ priv->rx_c_index + p_index;
+ else
+ to_process = p_index - priv->rx_c_index;
+
+ netif_dbg(priv, rx_status, ndev,
+ "p_index=%d rx_c_index=%d to_process=%d\n",
+ p_index, priv->rx_c_index, to_process);
+
+ while ((processed < to_process) &&
+ (processed < budget)) {
+
+ cb = &priv->rx_cbs[priv->rx_read_ptr];
+ skb = cb->skb;
+ dma_unmap_single(kdev, dma_unmap_addr(cb, dma_addr),
+ RX_BUF_LENGTH, DMA_FROM_DEVICE);
+
+ /* Extract the Receive Status Block prepended */
+ rsb = (struct bcm_rsb *)skb->data;
+ len = (rsb->rx_status_len >> DESC_LEN_SHIFT) & DESC_LEN_MASK;
+ status = (rsb->rx_status_len >> DESC_STATUS_SHIFT) &
+ DESC_STATUS_MASK;
+
+ processed++;
+ priv->rx_read_ptr++;
+ if (priv->rx_read_ptr == priv->num_rx_bds)
+ priv->rx_read_ptr = 0;
+
+ netif_dbg(priv, rx_status, ndev,
+ "p=%d, c=%d, rd_ptr=%d, len=%d, flag=0x%04x\n",
+ p_index, priv->rx_c_index, priv->rx_read_ptr,
+ len, status);
+
+ if (unlikely(!skb)) {
+ netif_err(priv, rx_err, ndev, "out of memory!\n");
+ ndev->stats.rx_dropped++;
+ ndev->stats.rx_errors++;
+ goto refill;
+ }
+
+ if (unlikely(!(status & DESC_EOP) || !(status & DESC_SOP))) {
+ netif_err(priv, rx_status, ndev, "fragmented packet!\n");
+ ndev->stats.rx_dropped++;
+ ndev->stats.rx_errors++;
+ bcm_sysport_free_cb(cb);
+ goto refill;
+ }
+
+ if (unlikely(status & (RX_STATUS_ERR | RX_STATUS_OVFLOW))) {
+ netif_err(priv, rx_err, ndev, "error packet\n");
+ if (status & RX_STATUS_OVFLOW)
+ ndev->stats.rx_over_errors++;
+ ndev->stats.rx_dropped++;
+ ndev->stats.rx_errors++;
+ bcm_sysport_free_cb(cb);
+ goto refill;
+ }
+
+ skb_put(skb, len);
+
+ /* Hardware validated our checksum */
+ if (likely(status & DESC_L4_CSUM))
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+
+ /* Hardware pre-pends packets with 2bytes before Ethernet
+ * header plus we have the Receive Status Block, strip off all
+ * of this from the SKB.
+ */
+ skb_pull(skb, sizeof(*rsb) + 2);
+ len -= (sizeof(*rsb) + 2);
+
+ /* UniMAC may forward CRC */
+ if (priv->crc_fwd) {
+ skb_trim(skb, len - ETH_FCS_LEN);
+ len -= ETH_FCS_LEN;
+ }
+
+ skb->protocol = eth_type_trans(skb, ndev);
+ ndev->stats.rx_packets++;
+ ndev->stats.rx_bytes += len;
+
+ napi_gro_receive(&priv->napi, skb);
+refill:
+ bcm_sysport_rx_refill(priv, cb);
+ }
+
+ return processed;
+}
+
+static void bcm_sysport_tx_reclaim_one(struct bcm_sysport_priv *priv,
+ struct bcm_sysport_cb *cb,
+ unsigned int *bytes_compl,
+ unsigned int *pkts_compl)
+{
+ struct device *kdev = &priv->pdev->dev;
+ struct net_device *ndev = priv->netdev;
+
+ if (cb->skb) {
+ ndev->stats.tx_bytes += cb->skb->len;
+ *bytes_compl += cb->skb->len;
+ dma_unmap_single(kdev, dma_unmap_addr(cb, dma_addr),
+ dma_unmap_len(cb, dma_len),
+ DMA_TO_DEVICE);
+ ndev->stats.tx_packets++;
+ (*pkts_compl)++;
+ bcm_sysport_free_cb(cb);
+ /* SKB fragment */
+ } else if (dma_unmap_addr(cb, dma_addr)) {
+ ndev->stats.tx_bytes += dma_unmap_len(cb, dma_len);
+ dma_unmap_page(kdev, dma_unmap_addr(cb, dma_addr),
+ dma_unmap_len(cb, dma_len), DMA_TO_DEVICE);
+ dma_unmap_addr_set(cb, dma_addr, 0);
+ }
+}
+
+/* Reclaim queued SKBs for transmission completion, lockless version */
+static unsigned int __bcm_sysport_tx_reclaim(struct bcm_sysport_priv *priv,
+ struct bcm_sysport_tx_ring *ring)
+{
+ struct net_device *ndev = priv->netdev;
+ unsigned int c_index, last_c_index, last_tx_cn, num_tx_cbs;
+ unsigned int pkts_compl = 0, bytes_compl = 0;
+ struct bcm_sysport_cb *cb;
+ struct netdev_queue *txq;
+ u32 hw_ind;
+
+ txq = netdev_get_tx_queue(ndev, ring->index);
+
+ /* Compute how many descriptors have been processed since last call */
+ hw_ind = tdma_readl(priv, TDMA_DESC_RING_PROD_CONS_INDEX(ring->index));
+ c_index = (hw_ind >> RING_CONS_INDEX_SHIFT) & RING_CONS_INDEX_MASK;
+ ring->p_index = (hw_ind & RING_PROD_INDEX_MASK);
+
+ last_c_index = ring->c_index;
+ num_tx_cbs = ring->size;
+
+ c_index &= (num_tx_cbs - 1);
+
+ if (c_index >= last_c_index)
+ last_tx_cn = c_index - last_c_index;
+ else
+ last_tx_cn = num_tx_cbs - last_c_index + c_index;
+
+ netif_dbg(priv, tx_done, ndev,
+ "ring=%d c_index=%d last_tx_cn=%d last_c_index=%d\n",
+ ring->index, c_index, last_tx_cn, last_c_index);
+
+ while (last_tx_cn-- > 0) {
+ cb = ring->cbs + last_c_index;
+ bcm_sysport_tx_reclaim_one(priv, cb, &bytes_compl, &pkts_compl);
+
+ ring->desc_count++;
+ last_c_index++;
+ last_c_index &= (num_tx_cbs - 1);
+ }
+
+ ring->c_index = c_index;
+
+ if (netif_tx_queue_stopped(txq) && pkts_compl)
+ netif_tx_wake_queue(txq);
+
+ netif_dbg(priv, tx_done, ndev,
+ "ring=%d c_index=%d pkts_compl=%d, bytes_compl=%d\n",
+ ring->index, ring->c_index, pkts_compl, bytes_compl);
+
+ return pkts_compl;
+}
+
+/* Locked version of the per-ring TX reclaim routine */
+static unsigned int bcm_sysport_tx_reclaim(struct bcm_sysport_priv *priv,
+ struct bcm_sysport_tx_ring *ring)
+{
+ unsigned int released;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ring->lock, flags);
+ released = __bcm_sysport_tx_reclaim(priv, ring);
+ spin_unlock_irqrestore(&ring->lock, flags);
+
+ return released;
+}
+
+static int bcm_sysport_tx_poll(struct napi_struct *napi, int budget)
+{
+ struct bcm_sysport_tx_ring *ring =
+ container_of(napi, struct bcm_sysport_tx_ring, napi);
+ unsigned int work_done = 0;
+
+ work_done = bcm_sysport_tx_reclaim(ring->priv, ring);
+
+ if (work_done < budget) {
+ napi_complete(napi);
+ /* re-enable TX interrupt */
+ intrl2_1_mask_clear(ring->priv, BIT(ring->index));
+ }
+
+ return work_done;
+}
+
+static void bcm_sysport_tx_reclaim_all(struct bcm_sysport_priv *priv)
+{
+ unsigned int q;
+
+ for (q = 0; q < priv->netdev->num_tx_queues; q++)
+ bcm_sysport_tx_reclaim(priv, &priv->tx_rings[q]);
+}
+
+static int bcm_sysport_poll(struct napi_struct *napi, int budget)
+{
+ struct bcm_sysport_priv *priv =
+ container_of(napi, struct bcm_sysport_priv, napi);
+ unsigned int work_done = 0;
+
+ work_done = bcm_sysport_desc_rx(priv, budget);
+
+ priv->rx_c_index += work_done;
+ priv->rx_c_index &= RDMA_CONS_INDEX_MASK;
+ rdma_writel(priv, priv->rx_c_index, RDMA_CONS_INDEX);
+
+ if (work_done < budget) {
+ napi_complete(napi);
+ /* re-enable RX interrupts */
+ intrl2_0_mask_clear(priv, INTRL2_0_RDMA_MBDONE);
+ }
+
+ return work_done;
+}
+
+
+/* RX and misc interrupt routine */
+static irqreturn_t bcm_sysport_rx_isr(int irq, void *dev_id)
+{
+ struct net_device *dev = dev_id;
+ struct bcm_sysport_priv *priv = netdev_priv(dev);
+
+ priv->irq0_stat = intrl2_0_readl(priv, INTRL2_CPU_STATUS) &
+ ~intrl2_0_readl(priv, INTRL2_CPU_MASK_STATUS);
+ intrl2_0_writel(priv, priv->irq0_stat, INTRL2_CPU_CLEAR);
+
+ if (unlikely(priv->irq0_stat == 0)) {
+ netdev_warn(priv->netdev, "spurious RX interrupt\n");
+ return IRQ_NONE;
+ }
+
+ if (priv->irq0_stat & INTRL2_0_RDMA_MBDONE) {
+ if (likely(napi_schedule_prep(&priv->napi))) {
+ /* disable RX interrupts */
+ intrl2_0_mask_set(priv, INTRL2_0_RDMA_MBDONE);
+ __napi_schedule(&priv->napi);
+ }
+ }
+
+ /* TX ring is full, perform a full reclaim since we do not know
+ * which one would trigger this interrupt
+ */
+ if (priv->irq0_stat & INTRL2_0_TX_RING_FULL)
+ bcm_sysport_tx_reclaim_all(priv);
+
+ return IRQ_HANDLED;
+}
+
+/* TX interrupt service routine */
+static irqreturn_t bcm_sysport_tx_isr(int irq, void *dev_id)
+{
+ struct net_device *dev = dev_id;
+ struct bcm_sysport_priv *priv = netdev_priv(dev);
+ struct bcm_sysport_tx_ring *txr;
+ unsigned int ring;
+
+ priv->irq1_stat = intrl2_1_readl(priv, INTRL2_CPU_STATUS) &
+ ~intrl2_1_readl(priv, INTRL2_CPU_MASK_STATUS);
+ intrl2_1_writel(priv, 0xffffffff, INTRL2_CPU_CLEAR);
+
+ if (unlikely(priv->irq1_stat == 0)) {
+ netdev_warn(priv->netdev, "spurious TX interrupt\n");
+ return IRQ_NONE;
+ }
+
+ for (ring = 0; ring < dev->num_tx_queues; ring++) {
+ if (!(priv->irq1_stat & BIT(ring)))
+ continue;
+
+ txr = &priv->tx_rings[ring];
+
+ if (likely(napi_schedule_prep(&txr->napi))) {
+ intrl2_1_mask_set(priv, BIT(ring));
+ __napi_schedule(&txr->napi);
+ }
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int bcm_sysport_insert_tsb(struct sk_buff *skb, struct net_device *dev)
+{
+ struct sk_buff *nskb;
+ struct bcm_tsb *tsb;
+ u32 csum_info;
+ u8 ip_proto;
+ u16 csum_start;
+ u16 ip_ver;
+
+ /* Re-allocate SKB if needed */
+ if (unlikely(skb_headroom(skb) < sizeof(*tsb))) {
+ nskb = skb_realloc_headroom(skb, sizeof(*tsb));
+ dev_kfree_skb(skb);
+ if (!nskb) {
+ dev->stats.tx_errors++;
+ dev->stats.tx_dropped++;
+ return -ENOMEM;
+ }
+ skb = nskb;
+ }
+
+ tsb = (struct bcm_tsb *)skb_push(skb, sizeof(*tsb));
+ /* Zero-out TSB by default */
+ memset(tsb, 0, sizeof(*tsb));
+
+ if (skb->ip_summed == CHECKSUM_PARTIAL) {
+ ip_ver = htons(skb->protocol);
+ switch (ip_ver) {
+ case ETH_P_IP:
+ ip_proto = ip_hdr(skb)->protocol;
+ break;
+ case ETH_P_IPV6:
+ ip_proto = ipv6_hdr(skb)->nexthdr;
+ break;
+ default:
+ return 0;
+ }
+
+ /* Get the checksum offset and the L4 (transport) offset */
+ csum_start = skb_checksum_start_offset(skb) - sizeof(*tsb);
+ csum_info = (csum_start + skb->csum_offset) & L4_CSUM_PTR_MASK;
+ csum_info |= (csum_start << L4_PTR_SHIFT);
+
+ if (ip_proto == IPPROTO_TCP || ip_proto == IPPROTO_UDP) {
+ csum_info |= L4_LENGTH_VALID;
+ if (ip_proto == IPPROTO_UDP && ip_ver == ETH_P_IP)
+ csum_info |= L4_UDP;
+ } else
+ csum_info = 0;
+
+ tsb->l4_ptr_dest_map = csum_info;
+ }
+
+ return 0;
+}
+
+static netdev_tx_t bcm_sysport_xmit(struct sk_buff *skb,
+ struct net_device *dev)
+{
+ struct bcm_sysport_priv *priv = netdev_priv(dev);
+ struct device *kdev = &priv->pdev->dev;
+ struct bcm_sysport_tx_ring *ring;
+ struct bcm_sysport_cb *cb;
+ struct netdev_queue *txq;
+ struct dma_desc *desc;
+ unsigned int skb_len;
+ unsigned long flags;
+ dma_addr_t mapping;
+ u32 len_status;
+ u16 queue;
+ int ret;
+
+ queue = skb_get_queue_mapping(skb);
+ txq = netdev_get_tx_queue(dev, queue);
+ ring = &priv->tx_rings[queue];
+
+ /* lock against tx reclaim in BH context and TX ring full interrupt */
+ spin_lock_irqsave(&ring->lock, flags);
+ if (unlikely(ring->desc_count == 0)) {
+ netif_tx_stop_queue(txq);
+ netdev_err(dev, "queue %d awake and ring full!\n", queue);
+ ret = NETDEV_TX_BUSY;
+ goto out;
+ }
+
+ /* Insert TSB and checksum infos */
+ if (priv->tsb_en) {
+ ret = bcm_sysport_insert_tsb(skb, dev);
+ if (ret) {
+ ret = NETDEV_TX_OK;
+ goto out;
+ }
+ }
+
+ /* The Ethernet switch we are interfaced with needs packets to be at
+ * least 64 bytes (including FCS) otherwise they will be discarded when
+ * they enter the switch port logic. When Broadcom tags are enabled, we
+ * need to make sure that packets are at least 68 bytes
+ * (including FCS and tag) because the length verification is done after
+ * the Broadcom tag is stripped off the ingress packet.
+ */
+ if (skb_padto(skb, ETH_ZLEN + ENET_BRCM_TAG_LEN)) {
+ ret = NETDEV_TX_OK;
+ goto out;
+ }
+
+ skb_len = skb->len < ETH_ZLEN + ENET_BRCM_TAG_LEN ?
+ ETH_ZLEN + ENET_BRCM_TAG_LEN : skb->len;
+
+ mapping = dma_map_single(kdev, skb->data, skb_len, DMA_TO_DEVICE);
+ if (dma_mapping_error(kdev, mapping)) {
+ netif_err(priv, tx_err, dev, "DMA map failed at %p (len=%d)\n",
+ skb->data, skb_len);
+ ret = NETDEV_TX_OK;
+ goto out;
+ }
+
+ /* Remember the SKB for future freeing */
+ cb = &ring->cbs[ring->curr_desc];
+ cb->skb = skb;
+ dma_unmap_addr_set(cb, dma_addr, mapping);
+ dma_unmap_len_set(cb, dma_len, skb_len);
+
+ /* Fetch a descriptor entry from our pool */
+ desc = ring->desc_cpu;
+
+ desc->addr_lo = lower_32_bits(mapping);
+ len_status = upper_32_bits(mapping) & DESC_ADDR_HI_MASK;
+ len_status |= (skb_len << DESC_LEN_SHIFT);
+ len_status |= (DESC_SOP | DESC_EOP | TX_STATUS_APP_CRC) <<
+ DESC_STATUS_SHIFT;
+ if (skb->ip_summed == CHECKSUM_PARTIAL)
+ len_status |= (DESC_L4_CSUM << DESC_STATUS_SHIFT);
+
+ ring->curr_desc++;
+ if (ring->curr_desc == ring->size)
+ ring->curr_desc = 0;
+ ring->desc_count--;
+
+ /* Ensure write completion of the descriptor status/length
+ * in DRAM before the System Port WRITE_PORT register latches
+ * the value
+ */
+ wmb();
+ desc->addr_status_len = len_status;
+ wmb();
+
+ /* Write this descriptor address to the RING write port */
+ tdma_port_write_desc_addr(priv, desc, ring->index);
+
+ /* Check ring space and update SW control flow */
+ if (ring->desc_count == 0)
+ netif_tx_stop_queue(txq);
+
+ netif_dbg(priv, tx_queued, dev, "ring=%d desc_count=%d, curr_desc=%d\n",
+ ring->index, ring->desc_count, ring->curr_desc);
+
+ ret = NETDEV_TX_OK;
+out:
+ spin_unlock_irqrestore(&ring->lock, flags);
+ return ret;
+}
+
+static void bcm_sysport_tx_timeout(struct net_device *dev)
+{
+ netdev_warn(dev, "transmit timeout!\n");
+
+ dev->trans_start = jiffies;
+ dev->stats.tx_errors++;
+
+ netif_tx_wake_all_queues(dev);
+}
+
+/* phylib adjust link callback */
+static void bcm_sysport_adj_link(struct net_device *dev)
+{
+ struct bcm_sysport_priv *priv = netdev_priv(dev);
+ struct phy_device *phydev = priv->phydev;
+ unsigned int changed = 0;
+ u32 cmd_bits = 0, reg;
+
+ if (priv->old_link != phydev->link) {
+ changed = 1;
+ priv->old_link = phydev->link;
+ }
+
+ if (priv->old_duplex != phydev->duplex) {
+ changed = 1;
+ priv->old_duplex = phydev->duplex;
+ }
+
+ switch (phydev->speed) {
+ case SPEED_2500:
+ cmd_bits = CMD_SPEED_2500;
+ break;
+ case SPEED_1000:
+ cmd_bits = CMD_SPEED_1000;
+ break;
+ case SPEED_100:
+ cmd_bits = CMD_SPEED_100;
+ break;
+ case SPEED_10:
+ cmd_bits = CMD_SPEED_10;
+ break;
+ default:
+ break;
+ }
+ cmd_bits <<= CMD_SPEED_SHIFT;
+
+ if (phydev->duplex == DUPLEX_HALF)
+ cmd_bits |= CMD_HD_EN;
+
+ if (priv->old_pause != phydev->pause) {
+ changed = 1;
+ priv->old_pause = phydev->pause;
+ }
+
+ if (!phydev->pause)
+ cmd_bits |= CMD_RX_PAUSE_IGNORE | CMD_TX_PAUSE_IGNORE;
+
+ if (changed) {
+ reg = umac_readl(priv, UMAC_CMD);
+ reg &= ~((CMD_SPEED_MASK << CMD_SPEED_SHIFT) |
+ CMD_HD_EN | CMD_RX_PAUSE_IGNORE |
+ CMD_TX_PAUSE_IGNORE);
+ reg |= cmd_bits;
+ umac_writel(priv, reg, UMAC_CMD);
+
+ phy_print_status(priv->phydev);
+ }
+}
+
+static int bcm_sysport_init_tx_ring(struct bcm_sysport_priv *priv,
+ unsigned int index)
+{
+ struct bcm_sysport_tx_ring *ring = &priv->tx_rings[index];
+ struct device *kdev = &priv->pdev->dev;
+ size_t size;
+ void *p;
+ u32 reg;
+
+ /* Simple descriptors partitioning for now */
+ size = 256;
+
+ /* We just need one DMA descriptor which is DMA-able, since writing to
+ * the port will allocate a new descriptor in its internal linked-list
+ */
+ p = dma_zalloc_coherent(kdev, 1, &ring->desc_dma, GFP_KERNEL);
+ if (!p) {
+ netif_err(priv, hw, priv->netdev, "DMA alloc failed\n");
+ return -ENOMEM;
+ }
+
+ ring->cbs = kzalloc(sizeof(struct bcm_sysport_cb) * size, GFP_KERNEL);
+ if (!ring->cbs) {
+ netif_err(priv, hw, priv->netdev, "CB allocation failed\n");
+ return -ENOMEM;
+ }
+
+ /* Initialize SW view of the ring */
+ spin_lock_init(&ring->lock);
+ ring->priv = priv;
+ netif_napi_add(priv->netdev, &ring->napi, bcm_sysport_tx_poll, 64);
+ ring->index = index;
+ ring->size = size;
+ ring->alloc_size = ring->size;
+ ring->desc_cpu = p;
+ ring->desc_count = ring->size;
+ ring->curr_desc = 0;
+
+ /* Initialize HW ring */
+ tdma_writel(priv, RING_EN, TDMA_DESC_RING_HEAD_TAIL_PTR(index));
+ tdma_writel(priv, 0, TDMA_DESC_RING_COUNT(index));
+ tdma_writel(priv, 1, TDMA_DESC_RING_INTR_CONTROL(index));
+ tdma_writel(priv, 0, TDMA_DESC_RING_PROD_CONS_INDEX(index));
+ tdma_writel(priv, RING_IGNORE_STATUS, TDMA_DESC_RING_MAPPING(index));
+ tdma_writel(priv, 0, TDMA_DESC_RING_PCP_DEI_VID(index));
+
+ /* Program the number of descriptors as MAX_THRESHOLD and half of
+ * its size for the hysteresis trigger
+ */
+ tdma_writel(priv, ring->size |
+ 1 << RING_HYST_THRESH_SHIFT,
+ TDMA_DESC_RING_MAX_HYST(index));
+
+ /* Enable the ring queue in the arbiter */
+ reg = tdma_readl(priv, TDMA_TIER1_ARB_0_QUEUE_EN);
+ reg |= (1 << index);
+ tdma_writel(priv, reg, TDMA_TIER1_ARB_0_QUEUE_EN);
+
+ napi_enable(&ring->napi);
+
+ netif_dbg(priv, hw, priv->netdev,
+ "TDMA cfg, size=%d, desc_cpu=%p\n",
+ ring->size, ring->desc_cpu);
+
+ return 0;
+}
+
+static void bcm_sysport_fini_tx_ring(struct bcm_sysport_priv *priv,
+ unsigned int index)
+{
+ struct bcm_sysport_tx_ring *ring = &priv->tx_rings[index];
+ struct device *kdev = &priv->pdev->dev;
+ u32 reg;
+
+ /* Caller should stop the TDMA engine */
+ reg = tdma_readl(priv, TDMA_STATUS);
+ if (!(reg & TDMA_DISABLED))
+ netdev_warn(priv->netdev, "TDMA not stopped!\n");
+
+ napi_disable(&ring->napi);
+ netif_napi_del(&ring->napi);
+
+ bcm_sysport_tx_reclaim(priv, ring);
+
+ kfree(ring->cbs);
+ ring->cbs = NULL;
+
+ if (ring->desc_dma) {
+ dma_free_coherent(kdev, 1, ring->desc_cpu, ring->desc_dma);
+ ring->desc_dma = 0;
+ }
+ ring->size = 0;
+ ring->alloc_size = 0;
+
+ netif_dbg(priv, hw, priv->netdev, "TDMA fini done\n");
+}
+
+/* RDMA helper */
+static inline int rdma_enable_set(struct bcm_sysport_priv *priv,
+ unsigned int enable)
+{
+ unsigned int timeout = 1000;
+ u32 reg;
+
+ reg = rdma_readl(priv, RDMA_CONTROL);
+ if (enable)
+ reg |= RDMA_EN;
+ else
+ reg &= ~RDMA_EN;
+ rdma_writel(priv, reg, RDMA_CONTROL);
+
+ /* Poll for RMDA disabling completion */
+ do {
+ reg = rdma_readl(priv, RDMA_STATUS);
+ if (!!(reg & RDMA_DISABLED) == !enable)
+ return 0;
+ usleep_range(1000, 2000);
+ } while (timeout-- > 0);
+
+ netdev_err(priv->netdev, "timeout waiting for RDMA to finish\n");
+
+ return -ETIMEDOUT;
+}
+
+/* TDMA helper */
+static inline int tdma_enable_set(struct bcm_sysport_priv *priv,
+ unsigned int enable)
+{
+ unsigned int timeout = 1000;
+ u32 reg;
+
+ reg = tdma_readl(priv, TDMA_CONTROL);
+ if (enable)
+ reg |= TDMA_EN;
+ else
+ reg &= ~TDMA_EN;
+ tdma_writel(priv, reg, TDMA_CONTROL);
+
+ /* Poll for TMDA disabling completion */
+ do {
+ reg = tdma_readl(priv, TDMA_STATUS);
+ if (!!(reg & TDMA_DISABLED) == !enable)
+ return 0;
+
+ usleep_range(1000, 2000);
+ } while (timeout-- > 0);
+
+ netdev_err(priv->netdev, "timeout waiting for TDMA to finish\n");
+
+ return -ETIMEDOUT;
+}
+
+static int bcm_sysport_init_rx_ring(struct bcm_sysport_priv *priv)
+{
+ u32 reg;
+ int ret;
+
+ /* Initialize SW view of the RX ring */
+ priv->num_rx_bds = NUM_RX_DESC;
+ priv->rx_bds = priv->base + SYS_PORT_RDMA_OFFSET;
+ priv->rx_bd_assign_ptr = priv->rx_bds;
+ priv->rx_bd_assign_index = 0;
+ priv->rx_c_index = 0;
+ priv->rx_read_ptr = 0;
+ priv->rx_cbs = kzalloc(priv->num_rx_bds *
+ sizeof(struct bcm_sysport_cb), GFP_KERNEL);
+ if (!priv->rx_cbs) {
+ netif_err(priv, hw, priv->netdev, "CB allocation failed\n");
+ return -ENOMEM;
+ }
+
+ ret = bcm_sysport_alloc_rx_bufs(priv);
+ if (ret) {
+ netif_err(priv, hw, priv->netdev, "SKB allocation failed\n");
+ return ret;
+ }
+
+ /* Initialize HW, ensure RDMA is disabled */
+ reg = rdma_readl(priv, RDMA_STATUS);
+ if (!(reg & RDMA_DISABLED))
+ rdma_enable_set(priv, 0);
+
+ rdma_writel(priv, 0, RDMA_WRITE_PTR_LO);
+ rdma_writel(priv, 0, RDMA_WRITE_PTR_HI);
+ rdma_writel(priv, 0, RDMA_PROD_INDEX);
+ rdma_writel(priv, 0, RDMA_CONS_INDEX);
+ rdma_writel(priv, priv->num_rx_bds << RDMA_RING_SIZE_SHIFT |
+ RX_BUF_LENGTH, RDMA_RING_BUF_SIZE);
+ /* Operate the queue in ring mode */
+ rdma_writel(priv, 0, RDMA_START_ADDR_HI);
+ rdma_writel(priv, 0, RDMA_START_ADDR_LO);
+ rdma_writel(priv, 0, RDMA_END_ADDR_HI);
+ rdma_writel(priv, NUM_HW_RX_DESC_WORDS - 1, RDMA_END_ADDR_LO);
+
+ rdma_writel(priv, 1, RDMA_MBDONE_INTR);
+
+ netif_dbg(priv, hw, priv->netdev,
+ "RDMA cfg, num_rx_bds=%d, rx_bds=%p\n",
+ priv->num_rx_bds, priv->rx_bds);
+
+ return 0;
+}
+
+static void bcm_sysport_fini_rx_ring(struct bcm_sysport_priv *priv)
+{
+ struct bcm_sysport_cb *cb;
+ unsigned int i;
+ u32 reg;
+
+ /* Caller should ensure RDMA is disabled */
+ reg = rdma_readl(priv, RDMA_STATUS);
+ if (!(reg & RDMA_DISABLED))
+ netdev_warn(priv->netdev, "RDMA not stopped!\n");
+
+ for (i = 0; i < priv->num_rx_bds; i++) {
+ cb = &priv->rx_cbs[i];
+ if (dma_unmap_addr(cb, dma_addr))
+ dma_unmap_single(&priv->pdev->dev,
+ dma_unmap_addr(cb, dma_addr),
+ RX_BUF_LENGTH, DMA_FROM_DEVICE);
+ bcm_sysport_free_cb(cb);
+ }
+
+ kfree(priv->rx_cbs);
+ priv->rx_cbs = NULL;
+
+ netif_dbg(priv, hw, priv->netdev, "RDMA fini done\n");
+}
+
+static void bcm_sysport_set_rx_mode(struct net_device *dev)
+{
+ struct bcm_sysport_priv *priv = netdev_priv(dev);
+ u32 reg;
+
+ reg = umac_readl(priv, UMAC_CMD);
+ if (dev->flags & IFF_PROMISC)
+ reg |= CMD_PROMISC;
+ else
+ reg &= ~CMD_PROMISC;
+ umac_writel(priv, reg, UMAC_CMD);
+
+ /* No support for ALLMULTI */
+ if (dev->flags & IFF_ALLMULTI)
+ return;
+}
+
+static inline void umac_enable_set(struct bcm_sysport_priv *priv,
+ unsigned int enable)
+{
+ u32 reg;
+
+ reg = umac_readl(priv, UMAC_CMD);
+ if (enable)
+ reg |= CMD_RX_EN | CMD_TX_EN;
+ else
+ reg &= ~(CMD_RX_EN | CMD_TX_EN);
+ umac_writel(priv, reg, UMAC_CMD);
+
+ /* UniMAC stops on a packet boundary, wait for a full-sized packet
+ * to be processed (1 msec).
+ */
+ if (enable == 0)
+ usleep_range(1000, 2000);
+}
+
+static inline int umac_reset(struct bcm_sysport_priv *priv)
+{
+ unsigned int timeout = 0;
+ u32 reg;
+ int ret = 0;
+
+ umac_writel(priv, 0, UMAC_CMD);
+ while (timeout++ < 1000) {
+ reg = umac_readl(priv, UMAC_CMD);
+ if (!(reg & CMD_SW_RESET))
+ break;
+
+ udelay(1);
+ }
+
+ if (timeout == 1000) {
+ dev_err(&priv->pdev->dev,
+ "timeout waiting for MAC to come out of reset\n");
+ ret = -ETIMEDOUT;
+ }
+
+ return ret;
+}
+
+static void umac_set_hw_addr(struct bcm_sysport_priv *priv,
+ unsigned char *addr)
+{
+ umac_writel(priv, (addr[0] << 24) | (addr[1] << 16) |
+ (addr[2] << 8) | addr[3], UMAC_MAC0);
+ umac_writel(priv, (addr[4] << 8) | addr[5], UMAC_MAC1);
+}
+
+static void topctrl_flush(struct bcm_sysport_priv *priv)
+{
+ topctrl_writel(priv, RX_FLUSH, RX_FLUSH_CNTL);
+ topctrl_writel(priv, TX_FLUSH, TX_FLUSH_CNTL);
+ mdelay(1);
+ topctrl_writel(priv, 0, RX_FLUSH_CNTL);
+ topctrl_writel(priv, 0, TX_FLUSH_CNTL);
+}
+
+static int bcm_sysport_open(struct net_device *dev)
+{
+ struct bcm_sysport_priv *priv = netdev_priv(dev);
+ unsigned int i;
+ u32 reg;
+ int ret;
+
+ /* Reset UniMAC */
+ ret = umac_reset(priv);
+ if (ret) {
+ netdev_err(dev, "UniMAC reset failed\n");
+ return ret;
+ }
+
+ /* Flush TX and RX FIFOs at TOPCTRL level */
+ topctrl_flush(priv);
+
+ /* Disable the UniMAC RX/TX */
+ umac_enable_set(priv, 0);
+
+ /* Enable RBUF 2bytes alignment and Receive Status Block */
+ reg = rbuf_readl(priv, RBUF_CONTROL);
+ reg |= RBUF_4B_ALGN | RBUF_RSB_EN;
+ rbuf_writel(priv, reg, RBUF_CONTROL);
+
+ /* Set maximum frame length */
+ umac_writel(priv, UMAC_MAX_MTU_SIZE, UMAC_MAX_FRAME_LEN);
+
+ /* Set MAC address */
+ umac_set_hw_addr(priv, dev->dev_addr);
+
+ /* Read CRC forward */
+ priv->crc_fwd = !!(umac_readl(priv, UMAC_CMD) & CMD_CRC_FWD);
+
+ priv->phydev = of_phy_connect(dev, priv->phy_dn, bcm_sysport_adj_link,
+ 0, priv->phy_interface);
+ if (!priv->phydev) {
+ netdev_err(dev, "could not attach to PHY\n");
+ return -ENODEV;
+ }
+
+ /* Reset house keeping link status */
+ priv->old_duplex = -1;
+ priv->old_link = -1;
+ priv->old_pause = -1;
+
+ /* mask all interrupts and request them */
+ intrl2_0_writel(priv, 0xffffffff, INTRL2_CPU_MASK_SET);
+ intrl2_0_writel(priv, 0xffffffff, INTRL2_CPU_CLEAR);
+ intrl2_0_writel(priv, 0, INTRL2_CPU_MASK_CLEAR);
+ intrl2_1_writel(priv, 0xffffffff, INTRL2_CPU_MASK_SET);
+ intrl2_1_writel(priv, 0xffffffff, INTRL2_CPU_CLEAR);
+ intrl2_1_writel(priv, 0, INTRL2_CPU_MASK_CLEAR);
+
+ ret = request_irq(priv->irq0, bcm_sysport_rx_isr, 0, dev->name, dev);
+ if (ret) {
+ netdev_err(dev, "failed to request RX interrupt\n");
+ goto out_phy_disconnect;
+ }
+
+ ret = request_irq(priv->irq1, bcm_sysport_tx_isr, 0, dev->name, dev);
+ if (ret) {
+ netdev_err(dev, "failed to request TX interrupt\n");
+ goto out_free_irq0;
+ }
+
+ /* Initialize both hardware and software ring */
+ for (i = 0; i < dev->num_tx_queues; i++) {
+ ret = bcm_sysport_init_tx_ring(priv, i);
+ if (ret) {
+ netdev_err(dev, "failed to initialize TX ring %d\n",
+ i);
+ goto out_free_tx_ring;
+ }
+ }
+
+ /* Initialize linked-list */
+ tdma_writel(priv, TDMA_LL_RAM_INIT_BUSY, TDMA_STATUS);
+
+ /* Initialize RX ring */
+ ret = bcm_sysport_init_rx_ring(priv);
+ if (ret) {
+ netdev_err(dev, "failed to initialize RX ring\n");
+ goto out_free_rx_ring;
+ }
+
+ /* Turn on RDMA */
+ ret = rdma_enable_set(priv, 1);
+ if (ret)
+ goto out_free_rx_ring;
+
+ /* Enable RX interrupt and TX ring full interrupt */
+ intrl2_0_mask_clear(priv, INTRL2_0_RDMA_MBDONE | INTRL2_0_TX_RING_FULL);
+
+ /* Turn on TDMA */
+ ret = tdma_enable_set(priv, 1);
+ if (ret)
+ goto out_clear_rx_int;
+
+ /* Enable NAPI */
+ napi_enable(&priv->napi);
+
+ /* Turn on UniMAC TX/RX */
+ umac_enable_set(priv, 1);
+
+ phy_start(priv->phydev);
+
+ /* Enable TX interrupts for the 32 TXQs */
+ intrl2_1_mask_clear(priv, 0xffffffff);
+
+ /* Last call before we start the real business */
+ netif_tx_start_all_queues(dev);
+
+ return 0;
+
+out_clear_rx_int:
+ intrl2_0_mask_set(priv, INTRL2_0_RDMA_MBDONE | INTRL2_0_TX_RING_FULL);
+out_free_rx_ring:
+ bcm_sysport_fini_rx_ring(priv);
+out_free_tx_ring:
+ for (i = 0; i < dev->num_tx_queues; i++)
+ bcm_sysport_fini_tx_ring(priv, i);
+ free_irq(priv->irq1, dev);
+out_free_irq0:
+ free_irq(priv->irq0, dev);
+out_phy_disconnect:
+ phy_disconnect(priv->phydev);
+ return ret;
+}
+
+static int bcm_sysport_stop(struct net_device *dev)
+{
+ struct bcm_sysport_priv *priv = netdev_priv(dev);
+ unsigned int i;
+ u32 reg;
+ int ret;
+
+ /* stop all software from updating hardware */
+ netif_tx_stop_all_queues(dev);
+ napi_disable(&priv->napi);
+ phy_stop(priv->phydev);
+
+ /* mask all interrupts */
+ intrl2_0_mask_set(priv, 0xffffffff);
+ intrl2_0_writel(priv, 0xffffffff, INTRL2_CPU_CLEAR);
+ intrl2_1_mask_set(priv, 0xffffffff);
+ intrl2_1_writel(priv, 0xffffffff, INTRL2_CPU_CLEAR);
+
+ /* Disable UniMAC RX */
+ reg = umac_readl(priv, UMAC_CMD);
+ reg &= ~CMD_RX_EN;
+ umac_writel(priv, reg, UMAC_CMD);
+
+ ret = tdma_enable_set(priv, 0);
+ if (ret) {
+ netdev_err(dev, "timeout disabling RDMA\n");
+ return ret;
+ }
+
+ /* Wait for a maximum packet size to be drained */
+ usleep_range(2000, 3000);
+
+ ret = rdma_enable_set(priv, 0);
+ if (ret) {
+ netdev_err(dev, "timeout disabling TDMA\n");
+ return ret;
+ }
+
+ /* Disable UniMAC TX */
+ reg = umac_readl(priv, UMAC_CMD);
+ reg &= ~CMD_TX_EN;
+ umac_writel(priv, reg, UMAC_CMD);
+
+ /* Free RX/TX rings SW structures */
+ for (i = 0; i < dev->num_tx_queues; i++)
+ bcm_sysport_fini_tx_ring(priv, i);
+ bcm_sysport_fini_rx_ring(priv);
+
+ free_irq(priv->irq0, dev);
+ free_irq(priv->irq1, dev);
+
+ /* Disconnect from PHY */
+ phy_disconnect(priv->phydev);
+
+ return 0;
+}
+
+static struct ethtool_ops bcm_sysport_ethtool_ops = {
+ .get_settings = bcm_sysport_get_settings,
+ .set_settings = bcm_sysport_set_settings,
+ .get_drvinfo = bcm_sysport_get_drvinfo,
+ .get_msglevel = bcm_sysport_get_msglvl,
+ .set_msglevel = bcm_sysport_set_msglvl,
+ .get_link = ethtool_op_get_link,
+ .get_strings = bcm_sysport_get_strings,
+ .get_ethtool_stats = bcm_sysport_get_stats,
+ .get_sset_count = bcm_sysport_get_sset_count,
+};
+
+static const struct net_device_ops bcm_sysport_netdev_ops = {
+ .ndo_start_xmit = bcm_sysport_xmit,
+ .ndo_tx_timeout = bcm_sysport_tx_timeout,
+ .ndo_open = bcm_sysport_open,
+ .ndo_stop = bcm_sysport_stop,
+ .ndo_set_features = bcm_sysport_set_features,
+ .ndo_set_rx_mode = bcm_sysport_set_rx_mode,
+};
+
+#define REV_FMT "v%2x.%02x"
+
+static int bcm_sysport_probe(struct platform_device *pdev)
+{
+ struct bcm_sysport_priv *priv;
+ struct device_node *dn;
+ struct net_device *dev;
+ const void *macaddr;
+ struct resource *r;
+ u32 txq, rxq;
+ int ret;
+
+ dn = pdev->dev.of_node;
+ r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+
+ /* Read the Transmit/Receive Queue properties */
+ if (of_property_read_u32(dn, "systemport,num-txq", &txq))
+ txq = TDMA_NUM_RINGS;
+ if (of_property_read_u32(dn, "systemport,num-rxq", &rxq))
+ rxq = 1;
+
+ dev = alloc_etherdev_mqs(sizeof(*priv), txq, rxq);
+ if (!dev)
+ return -ENOMEM;
+
+ /* Initialize private members */
+ priv = netdev_priv(dev);
+
+ priv->irq0 = platform_get_irq(pdev, 0);
+ priv->irq1 = platform_get_irq(pdev, 1);
+ if (priv->irq0 <= 0 || priv->irq1 <= 0) {
+ dev_err(&pdev->dev, "invalid interrupts\n");
+ ret = -EINVAL;
+ goto err;
+ }
+
+ priv->base = devm_ioremap_resource(&pdev->dev, r);
+ if (IS_ERR(priv->base)) {
+ ret = PTR_ERR(priv->base);
+ goto err;
+ }
+
+ priv->netdev = dev;
+ priv->pdev = pdev;
+
+ priv->phy_interface = of_get_phy_mode(dn);
+ /* Default to GMII interface mode */
+ if (priv->phy_interface < 0)
+ priv->phy_interface = PHY_INTERFACE_MODE_GMII;
+
+ /* In the case of a fixed PHY, the DT node associated
+ * to the PHY is the Ethernet MAC DT node.
+ */
+ if (of_phy_is_fixed_link(dn)) {
+ ret = of_phy_register_fixed_link(dn);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to register fixed PHY\n");
+ goto err;
+ }
+
+ priv->phy_dn = dn;
+ }
+
+ /* Initialize netdevice members */
+ macaddr = of_get_mac_address(dn);
+ if (!macaddr || !is_valid_ether_addr(macaddr)) {
+ dev_warn(&pdev->dev, "using random Ethernet MAC\n");
+ random_ether_addr(dev->dev_addr);
+ } else {
+ ether_addr_copy(dev->dev_addr, macaddr);
+ }
+
+ SET_NETDEV_DEV(dev, &pdev->dev);
+ dev_set_drvdata(&pdev->dev, dev);
+ dev->ethtool_ops = &bcm_sysport_ethtool_ops;
+ dev->netdev_ops = &bcm_sysport_netdev_ops;
+ netif_napi_add(dev, &priv->napi, bcm_sysport_poll, 64);
+
+ /* HW supported features, none enabled by default */
+ dev->hw_features |= NETIF_F_RXCSUM | NETIF_F_HIGHDMA |
+ NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
+
+ /* Set the needed headroom once and for all */
+ BUILD_BUG_ON(sizeof(struct bcm_tsb) != 8);
+ dev->needed_headroom += sizeof(struct bcm_tsb);
+
+ /* We are interfaced to a switch which handles the multicast
+ * filtering for us, so we do not support programming any
+ * multicast hash table in this Ethernet MAC.
+ */
+ dev->flags &= ~IFF_MULTICAST;
+
+ /* libphy will adjust the link state accordingly */
+ netif_carrier_off(dev);
+
+ ret = register_netdev(dev);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to register net_device\n");
+ goto err;
+ }
+
+ priv->rev = topctrl_readl(priv, REV_CNTL) & REV_MASK;
+ dev_info(&pdev->dev,
+ "Broadcom SYSTEMPORT" REV_FMT
+ " at 0x%p (irqs: %d, %d, TXQs: %d, RXQs: %d)\n",
+ (priv->rev >> 8) & 0xff, priv->rev & 0xff,
+ priv->base, priv->irq0, priv->irq1, txq, rxq);
+
+ return 0;
+err:
+ free_netdev(dev);
+ return ret;
+}
+
+static int bcm_sysport_remove(struct platform_device *pdev)
+{
+ struct net_device *dev = dev_get_drvdata(&pdev->dev);
+
+ /* Not much to do, ndo_close has been called
+ * and we use managed allocations
+ */
+ unregister_netdev(dev);
+ free_netdev(dev);
+ dev_set_drvdata(&pdev->dev, NULL);
+
+ return 0;
+}
+
+static const struct of_device_id bcm_sysport_of_match[] = {
+ { .compatible = "brcm,systemport-v1.00" },
+ { .compatible = "brcm,systemport" },
+ { /* sentinel */ }
+};
+
+static struct platform_driver bcm_sysport_driver = {
+ .probe = bcm_sysport_probe,
+ .remove = bcm_sysport_remove,
+ .driver = {
+ .name = "brcm-systemport",
+ .owner = THIS_MODULE,
+ .of_match_table = bcm_sysport_of_match,
+ },
+};
+module_platform_driver(bcm_sysport_driver);
+
+MODULE_AUTHOR("Broadcom Corporation");
+MODULE_DESCRIPTION("Broadcom System Port Ethernet MAC driver");
+MODULE_ALIAS("platform:brcm-systemport");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.h b/drivers/net/ethernet/broadcom/bcmsysport.h
new file mode 100644
index 000000000000..281c08246037
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bcmsysport.h
@@ -0,0 +1,678 @@
+/*
+ * Broadcom BCM7xxx System Port Ethernet MAC driver
+ *
+ * Copyright (C) 2014 Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __BCM_SYSPORT_H
+#define __BCM_SYSPORT_H
+
+#include <linux/if_vlan.h>
+
+/* Receive/transmit descriptor format */
+#define DESC_ADDR_HI_STATUS_LEN 0x00
+#define DESC_ADDR_HI_SHIFT 0
+#define DESC_ADDR_HI_MASK 0xff
+#define DESC_STATUS_SHIFT 8
+#define DESC_STATUS_MASK 0x3ff
+#define DESC_LEN_SHIFT 18
+#define DESC_LEN_MASK 0x7fff
+#define DESC_ADDR_LO 0x04
+
+/* HW supports 40-bit addressing hence the */
+#define DESC_SIZE (WORDS_PER_DESC * sizeof(u32))
+
+/* Default RX buffer allocation size */
+#define RX_BUF_LENGTH 2048
+
+/* Body(1500) + EH_SIZE(14) + VLANTAG(4) + BRCMTAG(4) + FCS(4) = 1526.
+ * 1536 is multiple of 256 bytes
+ */
+#define ENET_BRCM_TAG_LEN 4
+#define ENET_PAD 10
+#define UMAC_MAX_MTU_SIZE (ETH_DATA_LEN + ETH_HLEN + VLAN_HLEN + \
+ ENET_BRCM_TAG_LEN + ETH_FCS_LEN + ENET_PAD)
+
+/* Transmit status block */
+struct bcm_tsb {
+ u32 pcp_dei_vid;
+#define PCP_DEI_MASK 0xf
+#define VID_SHIFT 4
+#define VID_MASK 0xfff
+ u32 l4_ptr_dest_map;
+#define L4_CSUM_PTR_MASK 0x1ff
+#define L4_PTR_SHIFT 9
+#define L4_PTR_MASK 0x1ff
+#define L4_UDP (1 << 18)
+#define L4_LENGTH_VALID (1 << 19)
+#define DEST_MAP_SHIFT 20
+#define DEST_MAP_MASK 0x1ff
+};
+
+/* Receive status block uses the same
+ * definitions as the DMA descriptor
+ */
+struct bcm_rsb {
+ u32 rx_status_len;
+ u32 brcm_egress_tag;
+};
+
+/* Common Receive/Transmit status bits */
+#define DESC_L4_CSUM (1 << 7)
+#define DESC_SOP (1 << 8)
+#define DESC_EOP (1 << 9)
+
+/* Receive Status bits */
+#define RX_STATUS_UCAST 0
+#define RX_STATUS_BCAST 0x04
+#define RX_STATUS_MCAST 0x08
+#define RX_STATUS_L2_MCAST 0x0c
+#define RX_STATUS_ERR (1 << 4)
+#define RX_STATUS_OVFLOW (1 << 5)
+#define RX_STATUS_PARSE_FAIL (1 << 6)
+
+/* Transmit Status bits */
+#define TX_STATUS_VLAN_NO_ACT 0x00
+#define TX_STATUS_VLAN_PCP_TSB 0x01
+#define TX_STATUS_VLAN_QUEUE 0x02
+#define TX_STATUS_VLAN_VID_TSB 0x03
+#define TX_STATUS_OWR_CRC (1 << 2)
+#define TX_STATUS_APP_CRC (1 << 3)
+#define TX_STATUS_BRCM_TAG_NO_ACT 0
+#define TX_STATUS_BRCM_TAG_ZERO 0x10
+#define TX_STATUS_BRCM_TAG_ONE_QUEUE 0x20
+#define TX_STATUS_BRCM_TAG_ONE_TSB 0x30
+#define TX_STATUS_SKIP_BYTES (1 << 6)
+
+/* Specific register definitions */
+#define SYS_PORT_TOPCTRL_OFFSET 0
+#define REV_CNTL 0x00
+#define REV_MASK 0xffff
+
+#define RX_FLUSH_CNTL 0x04
+#define RX_FLUSH (1 << 0)
+
+#define TX_FLUSH_CNTL 0x08
+#define TX_FLUSH (1 << 0)
+
+#define MISC_CNTL 0x0c
+#define SYS_CLK_SEL (1 << 0)
+#define TDMA_EOP_SEL (1 << 1)
+
+/* Level-2 Interrupt controller offsets and defines */
+#define SYS_PORT_INTRL2_0_OFFSET 0x200
+#define SYS_PORT_INTRL2_1_OFFSET 0x240
+#define INTRL2_CPU_STATUS 0x00
+#define INTRL2_CPU_SET 0x04
+#define INTRL2_CPU_CLEAR 0x08
+#define INTRL2_CPU_MASK_STATUS 0x0c
+#define INTRL2_CPU_MASK_SET 0x10
+#define INTRL2_CPU_MASK_CLEAR 0x14
+
+/* Level-2 instance 0 interrupt bits */
+#define INTRL2_0_GISB_ERR (1 << 0)
+#define INTRL2_0_RBUF_OVFLOW (1 << 1)
+#define INTRL2_0_TBUF_UNDFLOW (1 << 2)
+#define INTRL2_0_MPD (1 << 3)
+#define INTRL2_0_BRCM_MATCH_TAG (1 << 4)
+#define INTRL2_0_RDMA_MBDONE (1 << 5)
+#define INTRL2_0_OVER_MAX_THRESH (1 << 6)
+#define INTRL2_0_BELOW_HYST_THRESH (1 << 7)
+#define INTRL2_0_FREE_LIST_EMPTY (1 << 8)
+#define INTRL2_0_TX_RING_FULL (1 << 9)
+#define INTRL2_0_DESC_ALLOC_ERR (1 << 10)
+#define INTRL2_0_UNEXP_PKTSIZE_ACK (1 << 11)
+
+/* RXCHK offset and defines */
+#define SYS_PORT_RXCHK_OFFSET 0x300
+
+#define RXCHK_CONTROL 0x00
+#define RXCHK_EN (1 << 0)
+#define RXCHK_SKIP_FCS (1 << 1)
+#define RXCHK_BAD_CSUM_DIS (1 << 2)
+#define RXCHK_BRCM_TAG_EN (1 << 3)
+#define RXCHK_BRCM_TAG_MATCH_SHIFT 4
+#define RXCHK_BRCM_TAG_MATCH_MASK 0xff
+#define RXCHK_PARSE_TNL (1 << 12)
+#define RXCHK_VIOL_EN (1 << 13)
+#define RXCHK_VIOL_DIS (1 << 14)
+#define RXCHK_INCOM_PKT (1 << 15)
+#define RXCHK_V6_DUPEXT_EN (1 << 16)
+#define RXCHK_V6_DUPEXT_DIS (1 << 17)
+#define RXCHK_ETHERTYPE_DIS (1 << 18)
+#define RXCHK_L2_HDR_DIS (1 << 19)
+#define RXCHK_L3_HDR_DIS (1 << 20)
+#define RXCHK_MAC_RX_ERR_DIS (1 << 21)
+#define RXCHK_PARSE_AUTH (1 << 22)
+
+#define RXCHK_BRCM_TAG0 0x04
+#define RXCHK_BRCM_TAG(i) ((i) * RXCHK_BRCM_TAG0)
+#define RXCHK_BRCM_TAG0_MASK 0x24
+#define RXCHK_BRCM_TAG_MASK(i) ((i) * RXCHK_BRCM_TAG0_MASK)
+#define RXCHK_BRCM_TAG_MATCH_STATUS 0x44
+#define RXCHK_ETHERTYPE 0x48
+#define RXCHK_BAD_CSUM_CNTR 0x4C
+#define RXCHK_OTHER_DISC_CNTR 0x50
+
+/* TXCHCK offsets and defines */
+#define SYS_PORT_TXCHK_OFFSET 0x380
+#define TXCHK_PKT_RDY_THRESH 0x00
+
+/* Receive buffer offset and defines */
+#define SYS_PORT_RBUF_OFFSET 0x400
+
+#define RBUF_CONTROL 0x00
+#define RBUF_RSB_EN (1 << 0)
+#define RBUF_4B_ALGN (1 << 1)
+#define RBUF_BRCM_TAG_STRIP (1 << 2)
+#define RBUF_BAD_PKT_DISC (1 << 3)
+#define RBUF_RESUME_THRESH_SHIFT 4
+#define RBUF_RESUME_THRESH_MASK 0xff
+#define RBUF_OK_TO_SEND_SHIFT 12
+#define RBUF_OK_TO_SEND_MASK 0xff
+#define RBUF_CRC_REPLACE (1 << 20)
+#define RBUF_OK_TO_SEND_MODE (1 << 21)
+#define RBUF_RSB_SWAP (1 << 22)
+#define RBUF_ACPI_EN (1 << 23)
+
+#define RBUF_PKT_RDY_THRESH 0x04
+
+#define RBUF_STATUS 0x08
+#define RBUF_WOL_MODE (1 << 0)
+#define RBUF_MPD (1 << 1)
+#define RBUF_ACPI (1 << 2)
+
+#define RBUF_OVFL_DISC_CNTR 0x0c
+#define RBUF_ERR_PKT_CNTR 0x10
+
+/* Transmit buffer offset and defines */
+#define SYS_PORT_TBUF_OFFSET 0x600
+
+#define TBUF_CONTROL 0x00
+#define TBUF_BP_EN (1 << 0)
+#define TBUF_MAX_PKT_THRESH_SHIFT 1
+#define TBUF_MAX_PKT_THRESH_MASK 0x1f
+#define TBUF_FULL_THRESH_SHIFT 8
+#define TBUF_FULL_THRESH_MASK 0x1f
+
+/* UniMAC offset and defines */
+#define SYS_PORT_UMAC_OFFSET 0x800
+
+#define UMAC_CMD 0x008
+#define CMD_TX_EN (1 << 0)
+#define CMD_RX_EN (1 << 1)
+#define CMD_SPEED_SHIFT 2
+#define CMD_SPEED_10 0
+#define CMD_SPEED_100 1
+#define CMD_SPEED_1000 2
+#define CMD_SPEED_2500 3
+#define CMD_SPEED_MASK 3
+#define CMD_PROMISC (1 << 4)
+#define CMD_PAD_EN (1 << 5)
+#define CMD_CRC_FWD (1 << 6)
+#define CMD_PAUSE_FWD (1 << 7)
+#define CMD_RX_PAUSE_IGNORE (1 << 8)
+#define CMD_TX_ADDR_INS (1 << 9)
+#define CMD_HD_EN (1 << 10)
+#define CMD_SW_RESET (1 << 13)
+#define CMD_LCL_LOOP_EN (1 << 15)
+#define CMD_AUTO_CONFIG (1 << 22)
+#define CMD_CNTL_FRM_EN (1 << 23)
+#define CMD_NO_LEN_CHK (1 << 24)
+#define CMD_RMT_LOOP_EN (1 << 25)
+#define CMD_PRBL_EN (1 << 27)
+#define CMD_TX_PAUSE_IGNORE (1 << 28)
+#define CMD_TX_RX_EN (1 << 29)
+#define CMD_RUNT_FILTER_DIS (1 << 30)
+
+#define UMAC_MAC0 0x00c
+#define UMAC_MAC1 0x010
+#define UMAC_MAX_FRAME_LEN 0x014
+
+#define UMAC_TX_FLUSH 0x334
+
+#define UMAC_MIB_START 0x400
+
+/* There is a 0xC gap between the end of RX and beginning of TX stats and then
+ * between the end of TX stats and the beginning of the RX RUNT
+ */
+#define UMAC_MIB_STAT_OFFSET 0xc
+
+#define UMAC_MIB_CTRL 0x580
+#define MIB_RX_CNT_RST (1 << 0)
+#define MIB_RUNT_CNT_RST (1 << 1)
+#define MIB_TX_CNT_RST (1 << 2)
+#define UMAC_MDF_CTRL 0x650
+#define UMAC_MDF_ADDR 0x654
+
+/* Receive DMA offset and defines */
+#define SYS_PORT_RDMA_OFFSET 0x2000
+
+#define RDMA_CONTROL 0x1000
+#define RDMA_EN (1 << 0)
+#define RDMA_RING_CFG (1 << 1)
+#define RDMA_DISC_EN (1 << 2)
+#define RDMA_BUF_DATA_OFFSET_SHIFT 4
+#define RDMA_BUF_DATA_OFFSET_MASK 0x3ff
+
+#define RDMA_STATUS 0x1004
+#define RDMA_DISABLED (1 << 0)
+#define RDMA_DESC_RAM_INIT_BUSY (1 << 1)
+#define RDMA_BP_STATUS (1 << 2)
+
+#define RDMA_SCB_BURST_SIZE 0x1008
+
+#define RDMA_RING_BUF_SIZE 0x100c
+#define RDMA_RING_SIZE_SHIFT 16
+
+#define RDMA_WRITE_PTR_HI 0x1010
+#define RDMA_WRITE_PTR_LO 0x1014
+#define RDMA_PROD_INDEX 0x1018
+#define RDMA_PROD_INDEX_MASK 0xffff
+
+#define RDMA_CONS_INDEX 0x101c
+#define RDMA_CONS_INDEX_MASK 0xffff
+
+#define RDMA_START_ADDR_HI 0x1020
+#define RDMA_START_ADDR_LO 0x1024
+#define RDMA_END_ADDR_HI 0x1028
+#define RDMA_END_ADDR_LO 0x102c
+
+#define RDMA_MBDONE_INTR 0x1030
+#define RDMA_INTR_THRESH_MASK 0xff
+#define RDMA_TIMEOUT_SHIFT 16
+#define RDMA_TIMEOUT_MASK 0xffff
+
+#define RDMA_XON_XOFF_THRESH 0x1034
+#define RDMA_XON_XOFF_THRESH_MASK 0xffff
+#define RDMA_XOFF_THRESH_SHIFT 16
+
+#define RDMA_READ_PTR_HI 0x1038
+#define RDMA_READ_PTR_LO 0x103c
+
+#define RDMA_OVERRIDE 0x1040
+#define RDMA_LE_MODE (1 << 0)
+#define RDMA_REG_MODE (1 << 1)
+
+#define RDMA_TEST 0x1044
+#define RDMA_TP_OUT_SEL (1 << 0)
+#define RDMA_MEM_SEL (1 << 1)
+
+#define RDMA_DEBUG 0x1048
+
+/* Transmit DMA offset and defines */
+#define TDMA_NUM_RINGS 32 /* rings = queues */
+#define TDMA_PORT_SIZE DESC_SIZE /* two 32-bits words */
+
+#define SYS_PORT_TDMA_OFFSET 0x4000
+#define TDMA_WRITE_PORT_OFFSET 0x0000
+#define TDMA_WRITE_PORT_HI(i) (TDMA_WRITE_PORT_OFFSET + \
+ (i) * TDMA_PORT_SIZE)
+#define TDMA_WRITE_PORT_LO(i) (TDMA_WRITE_PORT_OFFSET + \
+ sizeof(u32) + (i) * TDMA_PORT_SIZE)
+
+#define TDMA_READ_PORT_OFFSET (TDMA_WRITE_PORT_OFFSET + \
+ (TDMA_NUM_RINGS * TDMA_PORT_SIZE))
+#define TDMA_READ_PORT_HI(i) (TDMA_READ_PORT_OFFSET + \
+ (i) * TDMA_PORT_SIZE)
+#define TDMA_READ_PORT_LO(i) (TDMA_READ_PORT_OFFSET + \
+ sizeof(u32) + (i) * TDMA_PORT_SIZE)
+
+#define TDMA_READ_PORT_CMD_OFFSET (TDMA_READ_PORT_OFFSET + \
+ (TDMA_NUM_RINGS * TDMA_PORT_SIZE))
+#define TDMA_READ_PORT_CMD(i) (TDMA_READ_PORT_CMD_OFFSET + \
+ (i) * sizeof(u32))
+
+#define TDMA_DESC_RING_00_BASE (TDMA_READ_PORT_CMD_OFFSET + \
+ (TDMA_NUM_RINGS * sizeof(u32)))
+
+/* Register offsets and defines relatives to a specific ring number */
+#define RING_HEAD_TAIL_PTR 0x00
+#define RING_HEAD_MASK 0x7ff
+#define RING_TAIL_SHIFT 11
+#define RING_TAIL_MASK 0x7ff
+#define RING_FLUSH (1 << 24)
+#define RING_EN (1 << 25)
+
+#define RING_COUNT 0x04
+#define RING_COUNT_MASK 0x7ff
+#define RING_BUFF_DONE_SHIFT 11
+#define RING_BUFF_DONE_MASK 0x7ff
+
+#define RING_MAX_HYST 0x08
+#define RING_MAX_THRESH_MASK 0x7ff
+#define RING_HYST_THRESH_SHIFT 11
+#define RING_HYST_THRESH_MASK 0x7ff
+
+#define RING_INTR_CONTROL 0x0c
+#define RING_INTR_THRESH_MASK 0x7ff
+#define RING_EMPTY_INTR_EN (1 << 15)
+#define RING_TIMEOUT_SHIFT 16
+#define RING_TIMEOUT_MASK 0xffff
+
+#define RING_PROD_CONS_INDEX 0x10
+#define RING_PROD_INDEX_MASK 0xffff
+#define RING_CONS_INDEX_SHIFT 16
+#define RING_CONS_INDEX_MASK 0xffff
+
+#define RING_MAPPING 0x14
+#define RING_QID_MASK 0x3
+#define RING_PORT_ID_SHIFT 3
+#define RING_PORT_ID_MASK 0x7
+#define RING_IGNORE_STATUS (1 << 6)
+#define RING_FAILOVER_EN (1 << 7)
+#define RING_CREDIT_SHIFT 8
+#define RING_CREDIT_MASK 0xffff
+
+#define RING_PCP_DEI_VID 0x18
+#define RING_VID_MASK 0x7ff
+#define RING_DEI (1 << 12)
+#define RING_PCP_SHIFT 13
+#define RING_PCP_MASK 0x7
+#define RING_PKT_SIZE_ADJ_SHIFT 16
+#define RING_PKT_SIZE_ADJ_MASK 0xf
+
+#define TDMA_DESC_RING_SIZE 28
+
+/* Defininition for a given TX ring base address */
+#define TDMA_DESC_RING_BASE(i) (TDMA_DESC_RING_00_BASE + \
+ ((i) * TDMA_DESC_RING_SIZE))
+
+/* Ring indexed register addreses */
+#define TDMA_DESC_RING_HEAD_TAIL_PTR(i) (TDMA_DESC_RING_BASE(i) + \
+ RING_HEAD_TAIL_PTR)
+#define TDMA_DESC_RING_COUNT(i) (TDMA_DESC_RING_BASE(i) + \
+ RING_COUNT)
+#define TDMA_DESC_RING_MAX_HYST(i) (TDMA_DESC_RING_BASE(i) + \
+ RING_MAX_HYST)
+#define TDMA_DESC_RING_INTR_CONTROL(i) (TDMA_DESC_RING_BASE(i) + \
+ RING_INTR_CONTROL)
+#define TDMA_DESC_RING_PROD_CONS_INDEX(i) \
+ (TDMA_DESC_RING_BASE(i) + \
+ RING_PROD_CONS_INDEX)
+#define TDMA_DESC_RING_MAPPING(i) (TDMA_DESC_RING_BASE(i) + \
+ RING_MAPPING)
+#define TDMA_DESC_RING_PCP_DEI_VID(i) (TDMA_DESC_RING_BASE(i) + \
+ RING_PCP_DEI_VID)
+
+#define TDMA_CONTROL 0x600
+#define TDMA_EN (1 << 0)
+#define TSB_EN (1 << 1)
+#define TSB_SWAP (1 << 2)
+#define ACB_ALGO (1 << 3)
+#define BUF_DATA_OFFSET_SHIFT 4
+#define BUF_DATA_OFFSET_MASK 0x3ff
+#define VLAN_EN (1 << 14)
+#define SW_BRCM_TAG (1 << 15)
+#define WNC_KPT_SIZE_UPDATE (1 << 16)
+#define SYNC_PKT_SIZE (1 << 17)
+#define ACH_TXDONE_DELAY_SHIFT 18
+#define ACH_TXDONE_DELAY_MASK 0xff
+
+#define TDMA_STATUS 0x604
+#define TDMA_DISABLED (1 << 0)
+#define TDMA_LL_RAM_INIT_BUSY (1 << 1)
+
+#define TDMA_SCB_BURST_SIZE 0x608
+#define TDMA_OVER_MAX_THRESH_STATUS 0x60c
+#define TDMA_OVER_HYST_THRESH_STATUS 0x610
+#define TDMA_TPID 0x614
+
+#define TDMA_FREE_LIST_HEAD_TAIL_PTR 0x618
+#define TDMA_FREE_HEAD_MASK 0x7ff
+#define TDMA_FREE_TAIL_SHIFT 11
+#define TDMA_FREE_TAIL_MASK 0x7ff
+
+#define TDMA_FREE_LIST_COUNT 0x61c
+#define TDMA_FREE_LIST_COUNT_MASK 0x7ff
+
+#define TDMA_TIER2_ARB_CTRL 0x620
+#define TDMA_ARB_MODE_RR 0
+#define TDMA_ARB_MODE_WEIGHT_RR 0x1
+#define TDMA_ARB_MODE_STRICT 0x2
+#define TDMA_ARB_MODE_DEFICIT_RR 0x3
+#define TDMA_CREDIT_SHIFT 4
+#define TDMA_CREDIT_MASK 0xffff
+
+#define TDMA_TIER1_ARB_0_CTRL 0x624
+#define TDMA_ARB_EN (1 << 0)
+
+#define TDMA_TIER1_ARB_0_QUEUE_EN 0x628
+#define TDMA_TIER1_ARB_1_CTRL 0x62c
+#define TDMA_TIER1_ARB_1_QUEUE_EN 0x630
+#define TDMA_TIER1_ARB_2_CTRL 0x634
+#define TDMA_TIER1_ARB_2_QUEUE_EN 0x638
+#define TDMA_TIER1_ARB_3_CTRL 0x63c
+#define TDMA_TIER1_ARB_3_QUEUE_EN 0x640
+
+#define TDMA_SCB_ENDIAN_OVERRIDE 0x644
+#define TDMA_LE_MODE (1 << 0)
+#define TDMA_REG_MODE (1 << 1)
+
+#define TDMA_TEST 0x648
+#define TDMA_TP_OUT_SEL (1 << 0)
+#define TDMA_MEM_TM (1 << 1)
+
+#define TDMA_DEBUG 0x64c
+
+/* Transmit/Receive descriptor */
+struct dma_desc {
+ u32 addr_status_len;
+ u32 addr_lo;
+};
+
+/* Number of Receive hardware descriptor words */
+#define NUM_HW_RX_DESC_WORDS 1024
+/* Real number of usable descriptors */
+#define NUM_RX_DESC (NUM_HW_RX_DESC_WORDS / WORDS_PER_DESC)
+
+/* Internal linked-list RAM has up to 1536 entries */
+#define NUM_TX_DESC 1536
+
+#define WORDS_PER_DESC (sizeof(struct dma_desc) / sizeof(u32))
+
+/* Rx/Tx common counter group.*/
+struct bcm_sysport_pkt_counters {
+ u32 cnt_64; /* RO Received/Transmited 64 bytes packet */
+ u32 cnt_127; /* RO Rx/Tx 127 bytes packet */
+ u32 cnt_255; /* RO Rx/Tx 65-255 bytes packet */
+ u32 cnt_511; /* RO Rx/Tx 256-511 bytes packet */
+ u32 cnt_1023; /* RO Rx/Tx 512-1023 bytes packet */
+ u32 cnt_1518; /* RO Rx/Tx 1024-1518 bytes packet */
+ u32 cnt_mgv; /* RO Rx/Tx 1519-1522 good VLAN packet */
+ u32 cnt_2047; /* RO Rx/Tx 1522-2047 bytes packet*/
+ u32 cnt_4095; /* RO Rx/Tx 2048-4095 bytes packet*/
+ u32 cnt_9216; /* RO Rx/Tx 4096-9216 bytes packet*/
+};
+
+/* RSV, Receive Status Vector */
+struct bcm_sysport_rx_counters {
+ struct bcm_sysport_pkt_counters pkt_cnt;
+ u32 pkt; /* RO (0x428) Received pkt count*/
+ u32 bytes; /* RO Received byte count */
+ u32 mca; /* RO # of Received multicast pkt */
+ u32 bca; /* RO # of Receive broadcast pkt */
+ u32 fcs; /* RO # of Received FCS error */
+ u32 cf; /* RO # of Received control frame pkt*/
+ u32 pf; /* RO # of Received pause frame pkt */
+ u32 uo; /* RO # of unknown op code pkt */
+ u32 aln; /* RO # of alignment error count */
+ u32 flr; /* RO # of frame length out of range count */
+ u32 cde; /* RO # of code error pkt */
+ u32 fcr; /* RO # of carrier sense error pkt */
+ u32 ovr; /* RO # of oversize pkt*/
+ u32 jbr; /* RO # of jabber count */
+ u32 mtue; /* RO # of MTU error pkt*/
+ u32 pok; /* RO # of Received good pkt */
+ u32 uc; /* RO # of unicast pkt */
+ u32 ppp; /* RO # of PPP pkt */
+ u32 rcrc; /* RO (0x470),# of CRC match pkt */
+};
+
+/* TSV, Transmit Status Vector */
+struct bcm_sysport_tx_counters {
+ struct bcm_sysport_pkt_counters pkt_cnt;
+ u32 pkts; /* RO (0x4a8) Transmited pkt */
+ u32 mca; /* RO # of xmited multicast pkt */
+ u32 bca; /* RO # of xmited broadcast pkt */
+ u32 pf; /* RO # of xmited pause frame count */
+ u32 cf; /* RO # of xmited control frame count */
+ u32 fcs; /* RO # of xmited FCS error count */
+ u32 ovr; /* RO # of xmited oversize pkt */
+ u32 drf; /* RO # of xmited deferral pkt */
+ u32 edf; /* RO # of xmited Excessive deferral pkt*/
+ u32 scl; /* RO # of xmited single collision pkt */
+ u32 mcl; /* RO # of xmited multiple collision pkt*/
+ u32 lcl; /* RO # of xmited late collision pkt */
+ u32 ecl; /* RO # of xmited excessive collision pkt*/
+ u32 frg; /* RO # of xmited fragments pkt*/
+ u32 ncl; /* RO # of xmited total collision count */
+ u32 jbr; /* RO # of xmited jabber count*/
+ u32 bytes; /* RO # of xmited byte count */
+ u32 pok; /* RO # of xmited good pkt */
+ u32 uc; /* RO (0x0x4f0)# of xmited unitcast pkt */
+};
+
+struct bcm_sysport_mib {
+ struct bcm_sysport_rx_counters rx;
+ struct bcm_sysport_tx_counters tx;
+ u32 rx_runt_cnt;
+ u32 rx_runt_fcs;
+ u32 rx_runt_fcs_align;
+ u32 rx_runt_bytes;
+ u32 rxchk_bad_csum;
+ u32 rxchk_other_pkt_disc;
+ u32 rbuf_ovflow_cnt;
+ u32 rbuf_err_cnt;
+};
+
+/* HW maintains a large list of counters */
+enum bcm_sysport_stat_type {
+ BCM_SYSPORT_STAT_NETDEV = -1,
+ BCM_SYSPORT_STAT_MIB_RX,
+ BCM_SYSPORT_STAT_MIB_TX,
+ BCM_SYSPORT_STAT_RUNT,
+ BCM_SYSPORT_STAT_RXCHK,
+ BCM_SYSPORT_STAT_RBUF,
+};
+
+/* Macros to help define ethtool statistics */
+#define STAT_NETDEV(m) { \
+ .stat_string = __stringify(m), \
+ .stat_sizeof = sizeof(((struct net_device_stats *)0)->m), \
+ .stat_offset = offsetof(struct net_device_stats, m), \
+ .type = BCM_SYSPORT_STAT_NETDEV, \
+}
+
+#define STAT_MIB(str, m, _type) { \
+ .stat_string = str, \
+ .stat_sizeof = sizeof(((struct bcm_sysport_priv *)0)->m), \
+ .stat_offset = offsetof(struct bcm_sysport_priv, m), \
+ .type = _type, \
+}
+
+#define STAT_MIB_RX(str, m) STAT_MIB(str, m, BCM_SYSPORT_STAT_MIB_RX)
+#define STAT_MIB_TX(str, m) STAT_MIB(str, m, BCM_SYSPORT_STAT_MIB_TX)
+#define STAT_RUNT(str, m) STAT_MIB(str, m, BCM_SYSPORT_STAT_RUNT)
+
+#define STAT_RXCHK(str, m, ofs) { \
+ .stat_string = str, \
+ .stat_sizeof = sizeof(((struct bcm_sysport_priv *)0)->m), \
+ .stat_offset = offsetof(struct bcm_sysport_priv, m), \
+ .type = BCM_SYSPORT_STAT_RXCHK, \
+ .reg_offset = ofs, \
+}
+
+#define STAT_RBUF(str, m, ofs) { \
+ .stat_string = str, \
+ .stat_sizeof = sizeof(((struct bcm_sysport_priv *)0)->m), \
+ .stat_offset = offsetof(struct bcm_sysport_priv, m), \
+ .type = BCM_SYSPORT_STAT_RBUF, \
+ .reg_offset = ofs, \
+}
+
+struct bcm_sysport_stats {
+ char stat_string[ETH_GSTRING_LEN];
+ int stat_sizeof;
+ int stat_offset;
+ enum bcm_sysport_stat_type type;
+ /* reg offset from UMAC base for misc counters */
+ u16 reg_offset;
+};
+
+/* Software house keeping helper structure */
+struct bcm_sysport_cb {
+ struct sk_buff *skb; /* SKB for RX packets */
+ void __iomem *bd_addr; /* Buffer descriptor PHYS addr */
+
+ DEFINE_DMA_UNMAP_ADDR(dma_addr);
+ DEFINE_DMA_UNMAP_LEN(dma_len);
+};
+
+/* Software view of the TX ring */
+struct bcm_sysport_tx_ring {
+ spinlock_t lock; /* Ring lock for tx reclaim/xmit */
+ struct napi_struct napi; /* NAPI per tx queue */
+ dma_addr_t desc_dma; /* DMA cookie */
+ unsigned int index; /* Ring index */
+ unsigned int size; /* Ring current size */
+ unsigned int alloc_size; /* Ring one-time allocated size */
+ unsigned int desc_count; /* Number of descriptors */
+ unsigned int curr_desc; /* Current descriptor */
+ unsigned int c_index; /* Last consumer index */
+ unsigned int p_index; /* Current producer index */
+ struct bcm_sysport_cb *cbs; /* Transmit control blocks */
+ struct dma_desc *desc_cpu; /* CPU view of the descriptor */
+ struct bcm_sysport_priv *priv; /* private context backpointer */
+};
+
+/* Driver private structure */
+struct bcm_sysport_priv {
+ void __iomem *base;
+ u32 irq0_stat;
+ u32 irq0_mask;
+ u32 irq1_stat;
+ u32 irq1_mask;
+ struct napi_struct napi ____cacheline_aligned;
+ struct net_device *netdev;
+ struct platform_device *pdev;
+ int irq0;
+ int irq1;
+
+ /* Transmit rings */
+ struct bcm_sysport_tx_ring tx_rings[TDMA_NUM_RINGS];
+
+ /* Receive queue */
+ void __iomem *rx_bds;
+ void __iomem *rx_bd_assign_ptr;
+ unsigned int rx_bd_assign_index;
+ struct bcm_sysport_cb *rx_cbs;
+ unsigned int num_rx_bds;
+ unsigned int rx_read_ptr;
+ unsigned int rx_c_index;
+
+ /* PHY device */
+ struct device_node *phy_dn;
+ struct phy_device *phydev;
+ phy_interface_t phy_interface;
+ int old_pause;
+ int old_link;
+ int old_duplex;
+
+ /* Misc fields */
+ unsigned int rx_csum_en:1;
+ unsigned int tsb_en:1;
+ unsigned int crc_fwd:1;
+ u16 rev;
+
+ /* MIB related fields */
+ struct bcm_sysport_mib mib;
+
+ /* Ethtool */
+ u32 msg_enable;
+};
+#endif /* __BCM_SYSPORT_H */
diff --git a/drivers/net/ethernet/broadcom/bgmac.c b/drivers/net/ethernet/broadcom/bgmac.c
index 0297a79a38e1..05c6af6c418f 100644
--- a/drivers/net/ethernet/broadcom/bgmac.c
+++ b/drivers/net/ethernet/broadcom/bgmac.c
@@ -1436,7 +1436,7 @@ static int bgmac_probe(struct bcma_device *core)
return -ENOMEM;
net_dev->netdev_ops = &bgmac_netdev_ops;
net_dev->irq = core->irq;
- SET_ETHTOOL_OPS(net_dev, &bgmac_ethtool_ops);
+ net_dev->ethtool_ops = &bgmac_ethtool_ops;
bgmac = netdev_priv(net_dev);
bgmac->net_dev = net_dev;
bgmac->core = core;
diff --git a/drivers/net/ethernet/broadcom/bnx2.c b/drivers/net/ethernet/broadcom/bnx2.c
index 0ab83708b6a1..67d2b0047371 100644
--- a/drivers/net/ethernet/broadcom/bnx2.c
+++ b/drivers/net/ethernet/broadcom/bnx2.c
@@ -6916,8 +6916,8 @@ bnx2_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
}
}
else {
- ethtool_cmd_speed_set(cmd, -1);
- cmd->duplex = -1;
+ ethtool_cmd_speed_set(cmd, SPEED_UNKNOWN);
+ cmd->duplex = DUPLEX_UNKNOWN;
}
spin_unlock_bh(&bp->phy_lock);
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
index 4d8f8aba0ea5..4cab09d3f807 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
@@ -6,7 +6,7 @@
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation.
*
- * Maintained by: Eilon Greenstein <eilong@broadcom.com>
+ * Maintained by: Ariel Elior <ariel.elior@qlogic.com>
* Written by: Eliezer Tamir
* Based on code from Michael Chan's bnx2 driver
*/
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
index dd57c7c5a3da..47c5814114e1 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
@@ -6,7 +6,7 @@
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation.
*
- * Maintained by: Eilon Greenstein <eilong@broadcom.com>
+ * Maintained by: Ariel Elior <ariel.elior@qlogic.com>
* Written by: Eliezer Tamir
* Based on code from Michael Chan's bnx2 driver
* UDP CSUM errata workaround by Arik Gendelman
@@ -906,6 +906,18 @@ static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
bd_prod = RX_BD(bd_prod);
bd_cons = RX_BD(bd_cons);
+ /* A rmb() is required to ensure that the CQE is not read
+ * before it is written by the adapter DMA. PCI ordering
+ * rules will make sure the other fields are written before
+ * the marker at the end of struct eth_fast_path_rx_cqe
+ * but without rmb() a weakly ordered processor can process
+ * stale data. Without the barrier TPA state-machine might
+ * enter inconsistent state and kernel stack might be
+ * provided with incorrect packet description - these lead
+ * to various kernel crashed.
+ */
+ rmb();
+
cqe_fp_flags = cqe_fp->type_error_flags;
cqe_fp_type = cqe_fp_flags & ETH_FAST_PATH_RX_CQE_TYPE;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
index 3448cc033ca5..571427c7226b 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
@@ -6,7 +6,7 @@
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation.
*
- * Maintained by: Eilon Greenstein <eilong@broadcom.com>
+ * Maintained by: Ariel Elior <ariel.elior@qlogic.com>
* Written by: Eliezer Tamir
* Based on code from Michael Chan's bnx2 driver
* UDP CSUM errata workaround by Arik Gendelman
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c
index 97ea5421dd96..51a952c51cb1 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c
@@ -12,7 +12,7 @@
* license other than the GPL, without Broadcom's express prior written
* consent.
*
- * Maintained by: Eilon Greenstein <eilong@broadcom.com>
+ * Maintained by: Ariel Elior <ariel.elior@qlogic.com>
* Written by: Dmitry Kravkov
*
*/
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.h
index 804b8f64463e..c6939ecb02c5 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.h
@@ -12,7 +12,7 @@
* license other than the GPL, without Broadcom's express prior written
* consent.
*
- * Maintained by: Eilon Greenstein <eilong@broadcom.com>
+ * Maintained by: Ariel Elior <ariel.elior@qlogic.com>
* Written by: Dmitry Kravkov
*
*/
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
index b6de05e3149b..bd0600cf7266 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
@@ -6,7 +6,7 @@
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation.
*
- * Maintained by: Eilon Greenstein <eilong@broadcom.com>
+ * Maintained by: Ariel Elior <ariel.elior@qlogic.com>
* Written by: Eliezer Tamir
* Based on code from Michael Chan's bnx2 driver
* UDP CSUM errata workaround by Arik Gendelman
@@ -3316,7 +3316,7 @@ static u32 bnx2x_get_rxfh_indir_size(struct net_device *dev)
return T_ETH_INDIRECTION_TABLE_SIZE;
}
-static int bnx2x_get_rxfh_indir(struct net_device *dev, u32 *indir)
+static int bnx2x_get_rxfh(struct net_device *dev, u32 *indir, u8 *key)
{
struct bnx2x *bp = netdev_priv(dev);
u8 ind_table[T_ETH_INDIRECTION_TABLE_SIZE] = {0};
@@ -3340,14 +3340,15 @@ static int bnx2x_get_rxfh_indir(struct net_device *dev, u32 *indir)
return 0;
}
-static int bnx2x_set_rxfh_indir(struct net_device *dev, const u32 *indir)
+static int bnx2x_set_rxfh(struct net_device *dev, const u32 *indir,
+ const u8 *key)
{
struct bnx2x *bp = netdev_priv(dev);
size_t i;
for (i = 0; i < T_ETH_INDIRECTION_TABLE_SIZE; i++) {
/*
- * The same as in bnx2x_get_rxfh_indir: we can't use a memcpy()
+ * The same as in bnx2x_get_rxfh: we can't use a memcpy()
* as an internal storage of an indirection table is a u8 array
* while indir->ring_index points to an array of u32.
*
@@ -3471,8 +3472,8 @@ static const struct ethtool_ops bnx2x_ethtool_ops = {
.get_rxnfc = bnx2x_get_rxnfc,
.set_rxnfc = bnx2x_set_rxnfc,
.get_rxfh_indir_size = bnx2x_get_rxfh_indir_size,
- .get_rxfh_indir = bnx2x_get_rxfh_indir,
- .set_rxfh_indir = bnx2x_set_rxfh_indir,
+ .get_rxfh = bnx2x_get_rxfh,
+ .set_rxfh = bnx2x_set_rxfh,
.get_channels = bnx2x_get_channels,
.set_channels = bnx2x_set_channels,
.get_module_info = bnx2x_get_module_info,
@@ -3498,16 +3499,14 @@ static const struct ethtool_ops bnx2x_vf_ethtool_ops = {
.get_rxnfc = bnx2x_get_rxnfc,
.set_rxnfc = bnx2x_set_rxnfc,
.get_rxfh_indir_size = bnx2x_get_rxfh_indir_size,
- .get_rxfh_indir = bnx2x_get_rxfh_indir,
- .set_rxfh_indir = bnx2x_set_rxfh_indir,
+ .get_rxfh = bnx2x_get_rxfh,
+ .set_rxfh = bnx2x_set_rxfh,
.get_channels = bnx2x_get_channels,
.set_channels = bnx2x_set_channels,
};
void bnx2x_set_ethtool_ops(struct bnx2x *bp, struct net_device *netdev)
{
- if (IS_PF(bp))
- SET_ETHTOOL_OPS(netdev, &bnx2x_ethtool_ops);
- else /* vf */
- SET_ETHTOOL_OPS(netdev, &bnx2x_vf_ethtool_ops);
+ netdev->ethtool_ops = (IS_PF(bp)) ?
+ &bnx2x_ethtool_ops : &bnx2x_vf_ethtool_ops;
}
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_file_hdr.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_file_hdr.h
index f572ae164fce..8aafd9b5d6a2 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_file_hdr.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_file_hdr.h
@@ -6,8 +6,8 @@
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation.
*
- * Maintained by: Eilon Greenstein <eilong@broadcom.com>
- * Written by: Vladislav Zolotarov <vladz@broadcom.com>
+ * Maintained by: Ariel Elior <ariel.elior@qlogic.com>
+ * Written by: Vladislav Zolotarov
* Based on the original idea of John Wright <john.wright@hp.com>.
*/
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init.h
index c2dfea7968f4..bd90e50bd8e6 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init.h
@@ -7,9 +7,9 @@
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation.
*
- * Maintained by: Eilon Greenstein <eilong@broadcom.com>
+ * Maintained by: Ariel Elior <ariel.elior@qlogic.com>
* Written by: Eliezer Tamir
- * Modified by: Vladislav Zolotarov <vladz@broadcom.com>
+ * Modified by: Vladislav Zolotarov
*/
#ifndef BNX2X_INIT_H
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init_ops.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init_ops.h
index 8ab0dd900960..5669ed2e87d0 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init_ops.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init_ops.h
@@ -8,8 +8,8 @@
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation.
*
- * Maintained by: Eilon Greenstein <eilong@broadcom.com>
- * Written by: Vladislav Zolotarov <vladz@broadcom.com>
+ * Maintained by: Ariel Elior <ariel.elior@qlogic.com>
+ * Written by: Vladislav Zolotarov
*/
#ifndef BNX2X_INIT_OPS_H
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
index 9b6b3d7304b6..53fb4fa61b40 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
@@ -2218,7 +2218,6 @@ int bnx2x_update_pfc(struct link_params *params,
*/
u32 val;
struct bnx2x *bp = params->bp;
- int bnx2x_status = 0;
u8 bmac_loopback = (params->loopback_mode == LOOPBACK_BMAC);
if (params->feature_config_flags & FEATURE_CONFIG_PFC_ENABLED)
@@ -2232,7 +2231,7 @@ int bnx2x_update_pfc(struct link_params *params,
bnx2x_update_pfc_nig(params, vars, pfc_params);
if (!vars->link_up)
- return bnx2x_status;
+ return 0;
DP(NETIF_MSG_LINK, "About to update PFC in BMAC\n");
@@ -2246,7 +2245,7 @@ int bnx2x_update_pfc(struct link_params *params,
== 0) {
DP(NETIF_MSG_LINK, "About to update PFC in EMAC\n");
bnx2x_emac_enable(params, vars, 0);
- return bnx2x_status;
+ return 0;
}
if (CHIP_IS_E2(bp))
bnx2x_update_pfc_bmac2(params, vars, bmac_loopback);
@@ -2260,7 +2259,7 @@ int bnx2x_update_pfc(struct link_params *params,
val = 1;
REG_WR(bp, NIG_REG_BMAC0_PAUSE_OUT_EN + params->port*4, val);
}
- return bnx2x_status;
+ return 0;
}
static int bnx2x_bmac1_enable(struct link_params *params,
@@ -3703,7 +3702,8 @@ static void bnx2x_warpcore_restart_AN_KR(struct bnx2x_phy *phy,
static void bnx2x_warpcore_enable_AN_KR(struct bnx2x_phy *phy,
struct link_params *params,
struct link_vars *vars) {
- u16 lane, i, cl72_ctrl, an_adv = 0;
+ u16 lane, i, cl72_ctrl, an_adv = 0, val;
+ u32 wc_lane_config;
struct bnx2x *bp = params->bp;
static struct bnx2x_reg_set reg_set[] = {
{MDIO_WC_DEVAD, MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2, 0x7},
@@ -3822,15 +3822,27 @@ static void bnx2x_warpcore_enable_AN_KR(struct bnx2x_phy *phy,
/* Enable Auto-Detect to support 1G over CL37 as well */
bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X1, 0x10);
-
+ wc_lane_config = REG_RD(bp, params->shmem_base +
+ offsetof(struct shmem_region, dev_info.
+ shared_hw_config.wc_lane_config));
+ bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_RX0_PCI_CTRL + (lane << 4), &val);
/* Force cl48 sync_status LOW to avoid getting stuck in CL73
* parallel-detect loop when CL73 and CL37 are enabled.
*/
- CL22_WR_OVER_CL45(bp, phy, MDIO_REG_BANK_AER_BLOCK,
- MDIO_AER_BLOCK_AER_REG, 0);
+ val |= 1 << 11;
+
+ /* Restore Polarity settings in case it was run over by
+ * previous link owner
+ */
+ if (wc_lane_config &
+ (SHARED_HW_CFG_RX_LANE0_POL_FLIP_ENABLED << lane))
+ val |= 3 << 2;
+ else
+ val &= ~(3 << 2);
bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
- MDIO_WC_REG_RXB_ANA_RX_CONTROL_PCI, 0x0800);
- bnx2x_set_aer_mmd(params, phy);
+ MDIO_WC_REG_RX0_PCI_CTRL + (lane << 4),
+ val);
bnx2x_disable_kr2(params, vars, phy);
}
@@ -6473,7 +6485,6 @@ int bnx2x_test_link(struct link_params *params, struct link_vars *vars,
static int bnx2x_link_initialize(struct link_params *params,
struct link_vars *vars)
{
- int rc = 0;
u8 phy_index, non_ext_phy;
struct bnx2x *bp = params->bp;
/* In case of external phy existence, the line speed would be the
@@ -6546,7 +6557,7 @@ static int bnx2x_link_initialize(struct link_params *params,
NIG_STATUS_XGXS0_LINK_STATUS |
NIG_STATUS_SERDES0_LINK_STATUS |
NIG_MASK_MI_INT));
- return rc;
+ return 0;
}
static void bnx2x_int_link_reset(struct bnx2x_phy *phy,
@@ -12461,6 +12472,7 @@ static int bnx2x_avoid_link_flap(struct link_params *params,
u32 dont_clear_stat, lfa_sts;
struct bnx2x *bp = params->bp;
+ bnx2x_set_mdio_emac_per_phy(bp, params);
/* Sync the link parameters */
bnx2x_link_status_update(params, vars);
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index 3a8e51ed5bec..2887034523e0 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -6,7 +6,7 @@
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation.
*
- * Maintained by: Eilon Greenstein <eilong@broadcom.com>
+ * Maintained by: Ariel Elior <ariel.elior@qlogic.com>
* Written by: Eliezer Tamir
* Based on code from Michael Chan's bnx2 driver
* UDP CSUM errata workaround by Arik Gendelman
@@ -10053,6 +10053,24 @@ static void bnx2x_prev_unload_close_mac(struct bnx2x *bp,
#define BCM_5710_UNDI_FW_MF_VERS (0x05)
#define BNX2X_PREV_UNDI_MF_PORT(p) (BAR_TSTRORM_INTMEM + 0x150c + ((p) << 4))
#define BNX2X_PREV_UNDI_MF_FUNC(f) (BAR_TSTRORM_INTMEM + 0x184c + ((f) << 4))
+
+static bool bnx2x_prev_is_after_undi(struct bnx2x *bp)
+{
+ /* UNDI marks its presence in DORQ -
+ * it initializes CID offset for normal bell to 0x7
+ */
+ if (!(REG_RD(bp, MISC_REG_RESET_REG_1) &
+ MISC_REGISTERS_RESET_REG_1_RST_DORQ))
+ return false;
+
+ if (REG_RD(bp, DORQ_REG_NORM_CID_OFST) == 0x7) {
+ BNX2X_DEV_INFO("UNDI previously loaded\n");
+ return true;
+ }
+
+ return false;
+}
+
static bool bnx2x_prev_unload_undi_fw_supports_mf(struct bnx2x *bp)
{
u8 major, minor, version;
@@ -10302,6 +10320,10 @@ static int bnx2x_prev_unload_uncommon(struct bnx2x *bp)
BNX2X_DEV_INFO("Path is unmarked\n");
+ /* Cannot proceed with FLR if UNDI is loaded, since FW does not match */
+ if (bnx2x_prev_is_after_undi(bp))
+ goto out;
+
/* If function has FLR capabilities, and existing FW version matches
* the one required, then FLR will be sufficient to clean any residue
* left by previous driver
@@ -10322,6 +10344,7 @@ static int bnx2x_prev_unload_uncommon(struct bnx2x *bp)
BNX2X_DEV_INFO("Could not FLR\n");
+out:
/* Close the MCP request, return failure*/
rc = bnx2x_prev_mcp_done(bp);
if (!rc)
@@ -10360,19 +10383,13 @@ static int bnx2x_prev_unload_common(struct bnx2x *bp)
/* close LLH filters towards the BRB */
bnx2x_set_rx_filter(&bp->link_params, 0);
- /* Check if the UNDI driver was previously loaded
- * UNDI driver initializes CID offset for normal bell to 0x7
- */
- if (reset_reg & MISC_REGISTERS_RESET_REG_1_RST_DORQ) {
- tmp_reg = REG_RD(bp, DORQ_REG_NORM_CID_OFST);
- if (tmp_reg == 0x7) {
- BNX2X_DEV_INFO("UNDI previously loaded\n");
- prev_undi = true;
- /* clear the UNDI indication */
- REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0);
- /* clear possible idle check errors */
- REG_RD(bp, NIG_REG_NIG_INT_STS_CLR_0);
- }
+ /* Check if the UNDI driver was previously loaded */
+ if (bnx2x_prev_is_after_undi(bp)) {
+ prev_undi = true;
+ /* clear the UNDI indication */
+ REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0);
+ /* clear possible idle check errors */
+ REG_RD(bp, NIG_REG_NIG_INT_STS_CLR_0);
}
if (!CHIP_IS_E1x(bp))
/* block FW from writing to host */
@@ -13283,8 +13300,8 @@ static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
netdev_reset_tc(bp->dev);
del_timer_sync(&bp->timer);
- cancel_delayed_work(&bp->sp_task);
- cancel_delayed_work(&bp->period_task);
+ cancel_delayed_work_sync(&bp->sp_task);
+ cancel_delayed_work_sync(&bp->period_task);
spin_lock_bh(&bp->stats_lock);
bp->stats_state = STATS_STATE_DISABLED;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
index d725317c4277..b1936044767a 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
@@ -12,7 +12,7 @@
* license other than the GPL, without Broadcom's express prior written
* consent.
*
- * Maintained by: Eilon Greenstein <eilong@broadcom.com>
+ * Maintained by: Ariel Elior <ariel.elior@qlogic.com>
* Written by: Vladislav Zolotarov
*
*/
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
index 80f6c790ed88..718ecd294661 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
@@ -12,7 +12,7 @@
* license other than the GPL, without Broadcom's express prior written
* consent.
*
- * Maintained by: Eilon Greenstein <eilong@broadcom.com>
+ * Maintained by: Ariel Elior <ariel.elior@qlogic.com>
* Written by: Vladislav Zolotarov
*
*/
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
index faf01488d26e..eda8583f6fc0 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
@@ -12,9 +12,9 @@
* license other than the GPL, without Broadcom's express prior written
* consent.
*
- * Maintained by: Eilon Greenstein <eilong@broadcom.com>
- * Written by: Shmulik Ravid <shmulikr@broadcom.com>
- * Ariel Elior <ariele@broadcom.com>
+ * Maintained by: Ariel Elior <ariel.elior@qlogic.com>
+ * Written by: Shmulik Ravid
+ * Ariel Elior <ariel.elior@qlogic.com>
*
*/
#include "bnx2x.h"
@@ -1071,8 +1071,10 @@ void bnx2x_iov_init_dq(struct bnx2x *bp)
REG_WR(bp, DORQ_REG_VF_TYPE_MIN_MCID_0, 0);
REG_WR(bp, DORQ_REG_VF_TYPE_MAX_MCID_0, 0x1ffff);
- /* set the VF doorbell threshold */
- REG_WR(bp, DORQ_REG_VF_USAGE_CT_LIMIT, 4);
+ /* set the VF doorbell threshold. This threshold represents the amount
+ * of doorbells allowed in the main DORQ fifo for a specific VF.
+ */
+ REG_WR(bp, DORQ_REG_VF_USAGE_CT_LIMIT, 64);
}
void bnx2x_iov_init_dmae(struct bnx2x *bp)
@@ -2576,7 +2578,8 @@ int bnx2x_get_vf_config(struct net_device *dev, int vfidx,
ivi->vf = vfidx;
ivi->qos = 0;
- ivi->tx_rate = 10000; /* always 10G. TBA take from link struct */
+ ivi->max_tx_rate = 10000; /* always 10G. TBA take from link struct */
+ ivi->min_tx_rate = 0;
ivi->spoofchk = 1; /*always enabled */
if (vf->state == VF_ENABLED) {
/* mac and vlan are in vlan_mac objects */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h
index 6929adba52f9..96c575e147a5 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h
@@ -12,9 +12,9 @@
* license other than the GPL, without Broadcom's express prior written
* consent.
*
- * Maintained by: Eilon Greenstein <eilong@broadcom.com>
- * Written by: Shmulik Ravid <shmulikr@broadcom.com>
- * Ariel Elior <ariele@broadcom.com>
+ * Maintained by: Ariel Elior <ariel.elior@qlogic.com>
+ * Written by: Shmulik Ravid
+ * Ariel Elior <ariel.elior@qlogic.com>
*/
#ifndef BNX2X_SRIOV_H
#define BNX2X_SRIOV_H
@@ -571,7 +571,7 @@ static inline void __iomem *bnx2x_vf_doorbells(struct bnx2x *bp)
return NULL;
}
-static inline void bnx2x_vf_pci_dealloc(struct bnx2 *bp) {return 0; }
+static inline void bnx2x_vf_pci_dealloc(struct bnx2x *bp) {}
static inline int bnx2x_vf_pci_alloc(struct bnx2x *bp) {return 0; }
static inline void bnx2x_pf_set_vfs_vlan(struct bnx2x *bp) {}
static inline int bnx2x_sriov_configure(struct pci_dev *dev, int num_vfs) {return 0; }
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c
index 3b75070411aa..ca47665f94bf 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c
@@ -6,7 +6,7 @@
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation.
*
- * Maintained by: Eilon Greenstein <eilong@broadcom.com>
+ * Maintained by: Ariel Elior <ariel.elior@qlogic.com>
* Written by: Eliezer Tamir
* Based on code from Michael Chan's bnx2 driver
* UDP CSUM errata workaround by Arik Gendelman
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h
index f35845006cdd..2beceaefdeea 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h
@@ -6,7 +6,7 @@
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation.
*
- * Maintained by: Eilon Greenstein <eilong@broadcom.com>
+ * Maintained by: Ariel Elior <ariel.elior@qlogic.com>
* Written by: Eliezer Tamir
* Based on code from Michael Chan's bnx2 driver
* UDP CSUM errata workaround by Arik Gendelman
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
index 784c7155b98a..d712d0ddd719 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
@@ -12,9 +12,9 @@
* license other than the GPL, without Broadcom's express prior written
* consent.
*
- * Maintained by: Eilon Greenstein <eilong@broadcom.com>
- * Written by: Shmulik Ravid <shmulikr@broadcom.com>
- * Ariel Elior <ariele@broadcom.com>
+ * Maintained by: Ariel Elior <ariel.elior@qlogic.com>
+ * Written by: Shmulik Ravid
+ * Ariel Elior <ariel.elior@qlogic.com>
*/
#include "bnx2x.h"
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.h
index c922b81170e5..e21e706762c9 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.h
@@ -12,8 +12,8 @@
* license other than the GPL, without Broadcom's express prior written
* consent.
*
- * Maintained by: Eilon Greenstein <eilong@broadcom.com>
- * Written by: Ariel Elior <ariele@broadcom.com>
+ * Maintained by: Ariel Elior <ariel.elior@qlogic.com>
+ * Written by: Ariel Elior <ariel.elior@qlogic.com>
*/
#ifndef VF_PF_IF_H
#define VF_PF_IF_H
diff --git a/drivers/net/ethernet/broadcom/cnic.c b/drivers/net/ethernet/broadcom/cnic.c
index 4dd48d2fa804..8244e2b14bb4 100644
--- a/drivers/net/ethernet/broadcom/cnic.c
+++ b/drivers/net/ethernet/broadcom/cnic.c
@@ -608,6 +608,10 @@ static int cnic_unregister_device(struct cnic_dev *dev, int ulp_type)
pr_err("%s: Bad type %d\n", __func__, ulp_type);
return -EINVAL;
}
+
+ if (ulp_type == CNIC_ULP_ISCSI)
+ cnic_send_nlmsg(cp, ISCSI_KEVENT_IF_DOWN, NULL);
+
mutex_lock(&cnic_lock);
if (rcu_dereference(cp->ulp_ops[ulp_type])) {
RCU_INIT_POINTER(cp->ulp_ops[ulp_type], NULL);
@@ -620,9 +624,7 @@ static int cnic_unregister_device(struct cnic_dev *dev, int ulp_type)
}
mutex_unlock(&cnic_lock);
- if (ulp_type == CNIC_ULP_ISCSI)
- cnic_send_nlmsg(cp, ISCSI_KEVENT_IF_DOWN, NULL);
- else if (ulp_type == CNIC_ULP_FCOE)
+ if (ulp_type == CNIC_ULP_FCOE)
dev->fcoe_cap = NULL;
synchronize_rcu();
@@ -1039,21 +1041,17 @@ static int cnic_alloc_uio_rings(struct cnic_dev *dev, int pages)
struct cnic_local *cp = dev->cnic_priv;
struct cnic_uio_dev *udev;
- read_lock(&cnic_dev_lock);
list_for_each_entry(udev, &cnic_udev_list, list) {
if (udev->pdev == dev->pcidev) {
udev->dev = dev;
if (__cnic_alloc_uio_rings(udev, pages)) {
udev->dev = NULL;
- read_unlock(&cnic_dev_lock);
return -ENOMEM;
}
cp->udev = udev;
- read_unlock(&cnic_dev_lock);
return 0;
}
}
- read_unlock(&cnic_dev_lock);
udev = kzalloc(sizeof(struct cnic_uio_dev), GFP_ATOMIC);
if (!udev)
@@ -1067,9 +1065,7 @@ static int cnic_alloc_uio_rings(struct cnic_dev *dev, int pages)
if (__cnic_alloc_uio_rings(udev, pages))
goto err_udev;
- write_lock(&cnic_dev_lock);
list_add(&udev->list, &cnic_udev_list);
- write_unlock(&cnic_dev_lock);
pci_dev_get(udev->pdev);
@@ -5624,20 +5620,27 @@ static void cnic_rcv_netevent(struct cnic_local *cp, unsigned long event,
{
int if_type;
- rcu_read_lock();
for (if_type = 0; if_type < MAX_CNIC_ULP_TYPE; if_type++) {
struct cnic_ulp_ops *ulp_ops;
void *ctx;
- ulp_ops = rcu_dereference(cp->ulp_ops[if_type]);
- if (!ulp_ops || !ulp_ops->indicate_netevent)
+ mutex_lock(&cnic_lock);
+ ulp_ops = rcu_dereference_protected(cp->ulp_ops[if_type],
+ lockdep_is_held(&cnic_lock));
+ if (!ulp_ops || !ulp_ops->indicate_netevent) {
+ mutex_unlock(&cnic_lock);
continue;
+ }
ctx = cp->ulp_handle[if_type];
+ set_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[if_type]);
+ mutex_unlock(&cnic_lock);
+
ulp_ops->indicate_netevent(ctx, event, vlan_id);
+
+ clear_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[if_type]);
}
- rcu_read_unlock();
}
/* netdev event handler */
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
index 0966bd04375f..5ba1cfbd60da 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
@@ -2481,7 +2481,7 @@ static int bcmgenet_probe(struct platform_device *pdev)
dev_set_drvdata(&pdev->dev, dev);
ether_addr_copy(dev->dev_addr, macaddr);
dev->watchdog_timeo = 2 * HZ;
- SET_ETHTOOL_OPS(dev, &bcmgenet_ethtool_ops);
+ dev->ethtool_ops = &bcmgenet_ethtool_ops;
dev->netdev_ops = &bcmgenet_netdev_ops;
netif_napi_add(dev, &priv->napi, bcmgenet_poll, 64);
diff --git a/drivers/net/ethernet/broadcom/genet/bcmmii.c b/drivers/net/ethernet/broadcom/genet/bcmmii.c
index 4608673beaff..add8d8596084 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmmii.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmmii.c
@@ -298,6 +298,7 @@ int bcmgenet_mii_config(struct net_device *dev)
static int bcmgenet_mii_probe(struct net_device *dev)
{
struct bcmgenet_priv *priv = netdev_priv(dev);
+ struct device_node *dn = priv->pdev->dev.of_node;
struct phy_device *phydev;
unsigned int phy_flags;
int ret;
@@ -307,15 +308,19 @@ static int bcmgenet_mii_probe(struct net_device *dev)
return 0;
}
- if (priv->phy_dn)
- phydev = of_phy_connect(dev, priv->phy_dn,
- bcmgenet_mii_setup, 0,
- priv->phy_interface);
- else
- phydev = of_phy_connect_fixed_link(dev,
- bcmgenet_mii_setup,
- priv->phy_interface);
+ /* In the case of a fixed PHY, the DT node associated
+ * to the PHY is the Ethernet MAC DT node.
+ */
+ if (of_phy_is_fixed_link(dn)) {
+ ret = of_phy_register_fixed_link(dn);
+ if (ret)
+ return ret;
+
+ priv->phy_dn = dn;
+ }
+ phydev = of_phy_connect(dev, priv->phy_dn, bcmgenet_mii_setup, 0,
+ priv->phy_interface);
if (!phydev) {
pr_err("could not attach to PHY\n");
return -ENODEV;
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index e5d95c5ce1ad..df2792d8383d 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -4,7 +4,7 @@
* Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
* Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
* Copyright (C) 2004 Sun Microsystems Inc.
- * Copyright (C) 2005-2013 Broadcom Corporation.
+ * Copyright (C) 2005-2014 Broadcom Corporation.
*
* Firmware is:
* Derived from proprietary unpublished source code,
@@ -94,10 +94,10 @@ static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
#define DRV_MODULE_NAME "tg3"
#define TG3_MAJ_NUM 3
-#define TG3_MIN_NUM 136
+#define TG3_MIN_NUM 137
#define DRV_MODULE_VERSION \
__stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
-#define DRV_MODULE_RELDATE "Jan 03, 2014"
+#define DRV_MODULE_RELDATE "May 11, 2014"
#define RESET_KIND_SHUTDOWN 0
#define RESET_KIND_INIT 1
@@ -3224,7 +3224,7 @@ static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
return 0;
}
-#define NVRAM_CMD_TIMEOUT 10000
+#define NVRAM_CMD_TIMEOUT 100
static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
{
@@ -7871,9 +7871,7 @@ tg3_tso_bug_end:
return NETDEV_TX_OK;
}
-/* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
- * support TG3_FLAG_HW_TSO_1 or firmware TSO only.
- */
+/* hard_start_xmit for all devices */
static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct tg3 *tp = netdev_priv(dev);
@@ -7884,6 +7882,10 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
struct tg3_napi *tnapi;
struct netdev_queue *txq;
unsigned int last;
+ struct iphdr *iph = NULL;
+ struct tcphdr *tcph = NULL;
+ __sum16 tcp_csum = 0, ip_csum = 0;
+ __be16 ip_tot_len = 0;
txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
tnapi = &tp->napi[skb_get_queue_mapping(skb)];
@@ -7915,7 +7917,6 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
mss = skb_shinfo(skb)->gso_size;
if (mss) {
- struct iphdr *iph;
u32 tcp_opt_len, hdr_len;
if (skb_cow_head(skb, 0))
@@ -7927,27 +7928,31 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb) - ETH_HLEN;
if (!skb_is_gso_v6(skb)) {
+ if (unlikely((ETH_HLEN + hdr_len) > 80) &&
+ tg3_flag(tp, TSO_BUG))
+ return tg3_tso_bug(tp, skb);
+
+ ip_csum = iph->check;
+ ip_tot_len = iph->tot_len;
iph->check = 0;
iph->tot_len = htons(mss + hdr_len);
}
- if (unlikely((ETH_HLEN + hdr_len) > 80) &&
- tg3_flag(tp, TSO_BUG))
- return tg3_tso_bug(tp, skb);
-
base_flags |= (TXD_FLAG_CPU_PRE_DMA |
TXD_FLAG_CPU_POST_DMA);
+ tcph = tcp_hdr(skb);
+ tcp_csum = tcph->check;
+
if (tg3_flag(tp, HW_TSO_1) ||
tg3_flag(tp, HW_TSO_2) ||
tg3_flag(tp, HW_TSO_3)) {
- tcp_hdr(skb)->check = 0;
+ tcph->check = 0;
base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
- } else
- tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
- iph->daddr, 0,
- IPPROTO_TCP,
- 0);
+ } else {
+ tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
+ 0, IPPROTO_TCP, 0);
+ }
if (tg3_flag(tp, HW_TSO_3)) {
mss |= (hdr_len & 0xc) << 12;
@@ -8047,6 +8052,18 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
if (would_hit_hwbug) {
tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, i);
+ if (mss) {
+ /* If it's a TSO packet, do GSO instead of
+ * allocating and copying to a large linear SKB
+ */
+ if (ip_tot_len) {
+ iph->check = ip_csum;
+ iph->tot_len = ip_tot_len;
+ }
+ tcph->check = tcp_csum;
+ return tg3_tso_bug(tp, skb);
+ }
+
/* If the workaround fails due to memory/mapping
* failure, silently drop this packet.
*/
@@ -11876,9 +11893,9 @@ static int tg3_get_eeprom_len(struct net_device *dev)
static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
{
struct tg3 *tp = netdev_priv(dev);
- int ret;
+ int ret, cpmu_restore = 0;
u8 *pd;
- u32 i, offset, len, b_offset, b_count;
+ u32 i, offset, len, b_offset, b_count, cpmu_val = 0;
__be32 val;
if (tg3_flag(tp, NO_NVRAM))
@@ -11890,6 +11907,19 @@ static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
eeprom->magic = TG3_EEPROM_MAGIC;
+ /* Override clock, link aware and link idle modes */
+ if (tg3_flag(tp, CPMU_PRESENT)) {
+ cpmu_val = tr32(TG3_CPMU_CTRL);
+ if (cpmu_val & (CPMU_CTRL_LINK_AWARE_MODE |
+ CPMU_CTRL_LINK_IDLE_MODE)) {
+ tw32(TG3_CPMU_CTRL, cpmu_val &
+ ~(CPMU_CTRL_LINK_AWARE_MODE |
+ CPMU_CTRL_LINK_IDLE_MODE));
+ cpmu_restore = 1;
+ }
+ }
+ tg3_override_clk(tp);
+
if (offset & 3) {
/* adjustments to start on required 4 byte boundary */
b_offset = offset & 3;
@@ -11900,7 +11930,7 @@ static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
}
ret = tg3_nvram_read_be32(tp, offset-b_offset, &val);
if (ret)
- return ret;
+ goto eeprom_done;
memcpy(data, ((char *)&val) + b_offset, b_count);
len -= b_count;
offset += b_count;
@@ -11912,10 +11942,20 @@ static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
for (i = 0; i < (len - (len & 3)); i += 4) {
ret = tg3_nvram_read_be32(tp, offset + i, &val);
if (ret) {
+ if (i)
+ i -= 4;
eeprom->len += i;
- return ret;
+ goto eeprom_done;
}
memcpy(pd + i, &val, 4);
+ if (need_resched()) {
+ if (signal_pending(current)) {
+ eeprom->len += i;
+ ret = -EINTR;
+ goto eeprom_done;
+ }
+ cond_resched();
+ }
}
eeprom->len += i;
@@ -11926,11 +11966,19 @@ static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
b_offset = offset + len - b_count;
ret = tg3_nvram_read_be32(tp, b_offset, &val);
if (ret)
- return ret;
+ goto eeprom_done;
memcpy(pd, &val, b_count);
eeprom->len += b_count;
}
- return 0;
+ ret = 0;
+
+eeprom_done:
+ /* Restore clock, link aware and link idle modes */
+ tg3_restore_clk(tp);
+ if (cpmu_restore)
+ tw32(TG3_CPMU_CTRL, cpmu_val);
+
+ return ret;
}
static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
@@ -12484,7 +12532,7 @@ static u32 tg3_get_rxfh_indir_size(struct net_device *dev)
return size;
}
-static int tg3_get_rxfh_indir(struct net_device *dev, u32 *indir)
+static int tg3_get_rxfh(struct net_device *dev, u32 *indir, u8 *key)
{
struct tg3 *tp = netdev_priv(dev);
int i;
@@ -12495,7 +12543,7 @@ static int tg3_get_rxfh_indir(struct net_device *dev, u32 *indir)
return 0;
}
-static int tg3_set_rxfh_indir(struct net_device *dev, const u32 *indir)
+static int tg3_set_rxfh(struct net_device *dev, const u32 *indir, const u8 *key)
{
struct tg3 *tp = netdev_priv(dev);
size_t i;
@@ -14027,8 +14075,8 @@ static const struct ethtool_ops tg3_ethtool_ops = {
.get_sset_count = tg3_get_sset_count,
.get_rxnfc = tg3_get_rxnfc,
.get_rxfh_indir_size = tg3_get_rxfh_indir_size,
- .get_rxfh_indir = tg3_get_rxfh_indir,
- .set_rxfh_indir = tg3_set_rxfh_indir,
+ .get_rxfh = tg3_get_rxfh,
+ .set_rxfh = tg3_set_rxfh,
.get_channels = tg3_get_channels,
.set_channels = tg3_set_channels,
.get_ts_info = tg3_get_ts_info,
diff --git a/drivers/net/ethernet/broadcom/tg3.h b/drivers/net/ethernet/broadcom/tg3.h
index 04321e5a356e..461accaf0aa4 100644
--- a/drivers/net/ethernet/broadcom/tg3.h
+++ b/drivers/net/ethernet/broadcom/tg3.h
@@ -4,7 +4,7 @@
* Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
* Copyright (C) 2001 Jeff Garzik (jgarzik@pobox.com)
* Copyright (C) 2004 Sun Microsystems Inc.
- * Copyright (C) 2007-2013 Broadcom Corporation.
+ * Copyright (C) 2007-2014 Broadcom Corporation.
*/
#ifndef _T3_H
diff --git a/drivers/net/ethernet/brocade/bna/bnad_ethtool.c b/drivers/net/ethernet/brocade/bna/bnad_ethtool.c
index f9e150825bb5..882cad71ad62 100644
--- a/drivers/net/ethernet/brocade/bna/bnad_ethtool.c
+++ b/drivers/net/ethernet/brocade/bna/bnad_ethtool.c
@@ -266,8 +266,8 @@ bnad_get_settings(struct net_device *netdev, struct ethtool_cmd *cmd)
ethtool_cmd_speed_set(cmd, SPEED_10000);
cmd->duplex = DUPLEX_FULL;
} else {
- ethtool_cmd_speed_set(cmd, -1);
- cmd->duplex = -1;
+ ethtool_cmd_speed_set(cmd, SPEED_UNKNOWN);
+ cmd->duplex = DUPLEX_UNKNOWN;
}
cmd->transceiver = XCVR_EXTERNAL;
cmd->maxtxpkt = 0;
@@ -1137,5 +1137,5 @@ static const struct ethtool_ops bnad_ethtool_ops = {
void
bnad_set_ethtool_ops(struct net_device *netdev)
{
- SET_ETHTOOL_OPS(netdev, &bnad_ethtool_ops);
+ netdev->ethtool_ops = &bnad_ethtool_ops;
}
diff --git a/drivers/net/ethernet/calxeda/xgmac.c b/drivers/net/ethernet/calxeda/xgmac.c
index 521dfea44b83..25d6b2a10e4e 100644
--- a/drivers/net/ethernet/calxeda/xgmac.c
+++ b/drivers/net/ethernet/calxeda/xgmac.c
@@ -1737,7 +1737,7 @@ static int xgmac_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, ndev);
ether_setup(ndev);
ndev->netdev_ops = &xgmac_netdev_ops;
- SET_ETHTOOL_OPS(ndev, &xgmac_ethtool_ops);
+ ndev->ethtool_ops = &xgmac_ethtool_ops;
spin_lock_init(&priv->stats_lock);
INIT_WORK(&priv->tx_timeout_work, xgmac_tx_timeout_work);
diff --git a/drivers/net/ethernet/chelsio/cxgb/cxgb2.c b/drivers/net/ethernet/chelsio/cxgb/cxgb2.c
index 05613a85ce61..186566bfdbc8 100644
--- a/drivers/net/ethernet/chelsio/cxgb/cxgb2.c
+++ b/drivers/net/ethernet/chelsio/cxgb/cxgb2.c
@@ -580,8 +580,8 @@ static int get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
ethtool_cmd_speed_set(cmd, p->link_config.speed);
cmd->duplex = p->link_config.duplex;
} else {
- ethtool_cmd_speed_set(cmd, -1);
- cmd->duplex = -1;
+ ethtool_cmd_speed_set(cmd, SPEED_UNKNOWN);
+ cmd->duplex = DUPLEX_UNKNOWN;
}
cmd->port = (cmd->supported & SUPPORTED_TP) ? PORT_TP : PORT_FIBRE;
@@ -1100,7 +1100,7 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
netif_napi_add(netdev, &adapter->napi, t1_poll, 64);
- SET_ETHTOOL_OPS(netdev, &t1_ethtool_ops);
+ netdev->ethtool_ops = &t1_ethtool_ops;
}
if (t1_init_sw_modules(adapter, bi) < 0) {
diff --git a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
index 07bbb711b7e5..5d9cce053cc9 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
@@ -1809,8 +1809,8 @@ static int get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
ethtool_cmd_speed_set(cmd, p->link_config.speed);
cmd->duplex = p->link_config.duplex;
} else {
- ethtool_cmd_speed_set(cmd, -1);
- cmd->duplex = -1;
+ ethtool_cmd_speed_set(cmd, SPEED_UNKNOWN);
+ cmd->duplex = DUPLEX_UNKNOWN;
}
cmd->port = (cmd->supported & SUPPORTED_TP) ? PORT_TP : PORT_FIBRE;
@@ -3291,7 +3291,7 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
netdev->features |= NETIF_F_HIGHDMA;
netdev->netdev_ops = &cxgb_netdev_ops;
- SET_ETHTOOL_OPS(netdev, &cxgb_ethtool_ops);
+ netdev->ethtool_ops = &cxgb_ethtool_ops;
}
pci_set_drvdata(pdev, adapter);
diff --git a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c
index c0a9dd55f4e5..b0cbb2b7fd48 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c
@@ -185,7 +185,7 @@ static struct net_device *get_iff_from_mac(struct adapter *adapter,
if (ether_addr_equal(dev->dev_addr, mac)) {
rcu_read_lock();
if (vlan && vlan != VLAN_VID_MASK) {
- dev = __vlan_find_dev_deep(dev, htons(ETH_P_8021Q), vlan);
+ dev = __vlan_find_dev_deep_rcu(dev, htons(ETH_P_8021Q), vlan);
} else if (netif_is_bond_slave(dev)) {
struct net_device *upper_dev;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
index 32db37709263..f503dce4ab17 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
@@ -357,11 +357,17 @@ enum {
MAX_OFLD_QSETS = 16, /* # of offload Tx/Rx queue sets */
MAX_CTRL_QUEUES = NCHAN, /* # of control Tx queues */
MAX_RDMA_QUEUES = NCHAN, /* # of streaming RDMA Rx queues */
+ MAX_RDMA_CIQS = NCHAN, /* # of RDMA concentrator IQs */
+ MAX_ISCSI_QUEUES = NCHAN, /* # of streaming iSCSI Rx queues */
};
enum {
- MAX_EGRQ = 128, /* max # of egress queues, including FLs */
- MAX_INGQ = 64 /* max # of interrupt-capable ingress queues */
+ INGQ_EXTRAS = 2, /* firmware event queue and */
+ /* forwarded interrupts */
+ MAX_EGRQ = MAX_ETH_QSETS*2 + MAX_OFLD_QSETS*2
+ + MAX_CTRL_QUEUES + MAX_RDMA_QUEUES + MAX_ISCSI_QUEUES,
+ MAX_INGQ = MAX_ETH_QSETS + MAX_OFLD_QSETS + MAX_RDMA_QUEUES
+ + MAX_RDMA_CIQS + MAX_ISCSI_QUEUES + INGQ_EXTRAS,
};
struct adapter;
@@ -538,6 +544,7 @@ struct sge {
struct sge_eth_rxq ethrxq[MAX_ETH_QSETS];
struct sge_ofld_rxq ofldrxq[MAX_OFLD_QSETS];
struct sge_ofld_rxq rdmarxq[MAX_RDMA_QUEUES];
+ struct sge_ofld_rxq rdmaciq[MAX_RDMA_CIQS];
struct sge_rspq fw_evtq ____cacheline_aligned_in_smp;
struct sge_rspq intrq ____cacheline_aligned_in_smp;
@@ -548,8 +555,10 @@ struct sge {
u16 ethtxq_rover; /* Tx queue to clean up next */
u16 ofldqsets; /* # of active offload queue sets */
u16 rdmaqs; /* # of available RDMA Rx queues */
+ u16 rdmaciqs; /* # of available RDMA concentrator IQs */
u16 ofld_rxq[MAX_OFLD_QSETS];
u16 rdma_rxq[NCHAN];
+ u16 rdma_ciq[NCHAN];
u16 timer_val[SGE_NTIMERS];
u8 counter_val[SGE_NCOUNTERS];
u32 fl_pg_order; /* large page allocation size */
@@ -577,6 +586,7 @@ struct sge {
#define for_each_ethrxq(sge, i) for (i = 0; i < (sge)->ethqsets; i++)
#define for_each_ofldrxq(sge, i) for (i = 0; i < (sge)->ofldqsets; i++)
#define for_each_rdmarxq(sge, i) for (i = 0; i < (sge)->rdmaqs; i++)
+#define for_each_rdmaciq(sge, i) for (i = 0; i < (sge)->rdmaciqs; i++)
struct l2t_data;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index 24e16e3301e0..2f8d6b910383 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -818,12 +818,17 @@ static void name_msix_vecs(struct adapter *adap)
for_each_rdmarxq(&adap->sge, i)
snprintf(adap->msix_info[msi_idx++].desc, n, "%s-rdma%d",
adap->port[0]->name, i);
+
+ for_each_rdmaciq(&adap->sge, i)
+ snprintf(adap->msix_info[msi_idx++].desc, n, "%s-rdma-ciq%d",
+ adap->port[0]->name, i);
}
static int request_msix_queue_irqs(struct adapter *adap)
{
struct sge *s = &adap->sge;
- int err, ethqidx, ofldqidx = 0, rdmaqidx = 0, msi_index = 2;
+ int err, ethqidx, ofldqidx = 0, rdmaqidx = 0, rdmaciqqidx = 0;
+ int msi_index = 2;
err = request_irq(adap->msix_info[1].vec, t4_sge_intr_msix, 0,
adap->msix_info[1].desc, &s->fw_evtq);
@@ -857,9 +862,21 @@ static int request_msix_queue_irqs(struct adapter *adap)
goto unwind;
msi_index++;
}
+ for_each_rdmaciq(s, rdmaciqqidx) {
+ err = request_irq(adap->msix_info[msi_index].vec,
+ t4_sge_intr_msix, 0,
+ adap->msix_info[msi_index].desc,
+ &s->rdmaciq[rdmaciqqidx].rspq);
+ if (err)
+ goto unwind;
+ msi_index++;
+ }
return 0;
unwind:
+ while (--rdmaciqqidx >= 0)
+ free_irq(adap->msix_info[--msi_index].vec,
+ &s->rdmaciq[rdmaciqqidx].rspq);
while (--rdmaqidx >= 0)
free_irq(adap->msix_info[--msi_index].vec,
&s->rdmarxq[rdmaqidx].rspq);
@@ -885,6 +902,8 @@ static void free_msix_queue_irqs(struct adapter *adap)
free_irq(adap->msix_info[msi_index++].vec, &s->ofldrxq[i].rspq);
for_each_rdmarxq(s, i)
free_irq(adap->msix_info[msi_index++].vec, &s->rdmarxq[i].rspq);
+ for_each_rdmaciq(s, i)
+ free_irq(adap->msix_info[msi_index++].vec, &s->rdmaciq[i].rspq);
}
/**
@@ -1047,7 +1066,8 @@ freeout: t4_free_sge_resources(adap);
if (msi_idx > 0)
msi_idx++;
err = t4_sge_alloc_rxq(adap, &q->rspq, false, dev, msi_idx,
- &q->fl, uldrx_handler);
+ q->fl.size ? &q->fl : NULL,
+ uldrx_handler);
if (err)
goto freeout;
memset(&q->stats, 0, sizeof(q->stats));
@@ -1064,13 +1084,28 @@ freeout: t4_free_sge_resources(adap);
if (msi_idx > 0)
msi_idx++;
err = t4_sge_alloc_rxq(adap, &q->rspq, false, adap->port[i],
- msi_idx, &q->fl, uldrx_handler);
+ msi_idx, q->fl.size ? &q->fl : NULL,
+ uldrx_handler);
if (err)
goto freeout;
memset(&q->stats, 0, sizeof(q->stats));
s->rdma_rxq[i] = q->rspq.abs_id;
}
+ for_each_rdmaciq(s, i) {
+ struct sge_ofld_rxq *q = &s->rdmaciq[i];
+
+ if (msi_idx > 0)
+ msi_idx++;
+ err = t4_sge_alloc_rxq(adap, &q->rspq, false, adap->port[i],
+ msi_idx, q->fl.size ? &q->fl : NULL,
+ uldrx_handler);
+ if (err)
+ goto freeout;
+ memset(&q->stats, 0, sizeof(q->stats));
+ s->rdma_ciq[i] = q->rspq.abs_id;
+ }
+
for_each_port(adap, i) {
/*
* Note that ->rdmarxq[i].rspq.cntxt_id below is 0 if we don't
@@ -2252,12 +2287,19 @@ static int get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
else if (p->port_type == FW_PORT_TYPE_FIBER_XFI ||
p->port_type == FW_PORT_TYPE_FIBER_XAUI)
cmd->port = PORT_FIBRE;
- else if (p->port_type == FW_PORT_TYPE_SFP) {
- if (p->mod_type == FW_PORT_MOD_TYPE_TWINAX_PASSIVE ||
- p->mod_type == FW_PORT_MOD_TYPE_TWINAX_ACTIVE)
+ else if (p->port_type == FW_PORT_TYPE_SFP ||
+ p->port_type == FW_PORT_TYPE_QSFP_10G ||
+ p->port_type == FW_PORT_TYPE_QSFP) {
+ if (p->mod_type == FW_PORT_MOD_TYPE_LR ||
+ p->mod_type == FW_PORT_MOD_TYPE_SR ||
+ p->mod_type == FW_PORT_MOD_TYPE_ER ||
+ p->mod_type == FW_PORT_MOD_TYPE_LRM)
+ cmd->port = PORT_FIBRE;
+ else if (p->mod_type == FW_PORT_MOD_TYPE_TWINAX_PASSIVE ||
+ p->mod_type == FW_PORT_MOD_TYPE_TWINAX_ACTIVE)
cmd->port = PORT_DA;
else
- cmd->port = PORT_FIBRE;
+ cmd->port = PORT_OTHER;
} else
cmd->port = PORT_OTHER;
@@ -2461,8 +2503,7 @@ static unsigned int qtimer_val(const struct adapter *adap,
}
/**
- * set_rxq_intr_params - set a queue's interrupt holdoff parameters
- * @adap: the adapter
+ * set_rspq_intr_params - set a queue's interrupt holdoff parameters
* @q: the Rx queue
* @us: the hold-off time in us, or 0 to disable timer
* @cnt: the hold-off packet count, or 0 to disable counter
@@ -2470,9 +2511,11 @@ static unsigned int qtimer_val(const struct adapter *adap,
* Sets an Rx queue's interrupt hold-off time and packet count. At least
* one of the two needs to be enabled for the queue to generate interrupts.
*/
-static int set_rxq_intr_params(struct adapter *adap, struct sge_rspq *q,
- unsigned int us, unsigned int cnt)
+static int set_rspq_intr_params(struct sge_rspq *q,
+ unsigned int us, unsigned int cnt)
{
+ struct adapter *adap = q->adap;
+
if ((us | cnt) == 0)
cnt = 1;
@@ -2499,24 +2542,34 @@ static int set_rxq_intr_params(struct adapter *adap, struct sge_rspq *q,
return 0;
}
-static int set_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
+/**
+ * set_rx_intr_params - set a net devices's RX interrupt holdoff paramete!
+ * @dev: the network device
+ * @us: the hold-off time in us, or 0 to disable timer
+ * @cnt: the hold-off packet count, or 0 to disable counter
+ *
+ * Set the RX interrupt hold-off parameters for a network device.
+ */
+static int set_rx_intr_params(struct net_device *dev,
+ unsigned int us, unsigned int cnt)
{
- const struct port_info *pi = netdev_priv(dev);
+ int i, err;
+ struct port_info *pi = netdev_priv(dev);
struct adapter *adap = pi->adapter;
- struct sge_rspq *q;
- int i;
- int r = 0;
-
- for (i = pi->first_qset; i < pi->first_qset + pi->nqsets; i++) {
- q = &adap->sge.ethrxq[i].rspq;
- r = set_rxq_intr_params(adap, q, c->rx_coalesce_usecs,
- c->rx_max_coalesced_frames);
- if (r) {
- dev_err(&dev->dev, "failed to set coalesce %d\n", r);
- break;
- }
+ struct sge_eth_rxq *q = &adap->sge.ethrxq[pi->first_qset];
+
+ for (i = 0; i < pi->nqsets; i++, q++) {
+ err = set_rspq_intr_params(&q->rspq, us, cnt);
+ if (err)
+ return err;
}
- return r;
+ return 0;
+}
+
+static int set_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
+{
+ return set_rx_intr_params(dev, c->rx_coalesce_usecs,
+ c->rx_max_coalesced_frames);
}
static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
@@ -2732,7 +2785,7 @@ static u32 get_rss_table_size(struct net_device *dev)
return pi->rss_size;
}
-static int get_rss_table(struct net_device *dev, u32 *p)
+static int get_rss_table(struct net_device *dev, u32 *p, u8 *key)
{
const struct port_info *pi = netdev_priv(dev);
unsigned int n = pi->rss_size;
@@ -2742,7 +2795,7 @@ static int get_rss_table(struct net_device *dev, u32 *p)
return 0;
}
-static int set_rss_table(struct net_device *dev, const u32 *p)
+static int set_rss_table(struct net_device *dev, const u32 *p, const u8 *key)
{
unsigned int i;
struct port_info *pi = netdev_priv(dev);
@@ -2844,8 +2897,8 @@ static const struct ethtool_ops cxgb_ethtool_ops = {
.set_wol = set_wol,
.get_rxnfc = get_rxnfc,
.get_rxfh_indir_size = get_rss_table_size,
- .get_rxfh_indir = get_rss_table,
- .set_rxfh_indir = set_rss_table,
+ .get_rxfh = get_rss_table,
+ .set_rxfh = set_rss_table,
.flash_device = set_flash,
};
@@ -3386,6 +3439,77 @@ unsigned int cxgb4_best_mtu(const unsigned short *mtus, unsigned short mtu,
EXPORT_SYMBOL(cxgb4_best_mtu);
/**
+ * cxgb4_best_aligned_mtu - find best MTU, [hopefully] data size aligned
+ * @mtus: the HW MTU table
+ * @header_size: Header Size
+ * @data_size_max: maximum Data Segment Size
+ * @data_size_align: desired Data Segment Size Alignment (2^N)
+ * @mtu_idxp: HW MTU Table Index return value pointer (possibly NULL)
+ *
+ * Similar to cxgb4_best_mtu() but instead of searching the Hardware
+ * MTU Table based solely on a Maximum MTU parameter, we break that
+ * parameter up into a Header Size and Maximum Data Segment Size, and
+ * provide a desired Data Segment Size Alignment. If we find an MTU in
+ * the Hardware MTU Table which will result in a Data Segment Size with
+ * the requested alignment _and_ that MTU isn't "too far" from the
+ * closest MTU, then we'll return that rather than the closest MTU.
+ */
+unsigned int cxgb4_best_aligned_mtu(const unsigned short *mtus,
+ unsigned short header_size,
+ unsigned short data_size_max,
+ unsigned short data_size_align,
+ unsigned int *mtu_idxp)
+{
+ unsigned short max_mtu = header_size + data_size_max;
+ unsigned short data_size_align_mask = data_size_align - 1;
+ int mtu_idx, aligned_mtu_idx;
+
+ /* Scan the MTU Table till we find an MTU which is larger than our
+ * Maximum MTU or we reach the end of the table. Along the way,
+ * record the last MTU found, if any, which will result in a Data
+ * Segment Length matching the requested alignment.
+ */
+ for (mtu_idx = 0, aligned_mtu_idx = -1; mtu_idx < NMTUS; mtu_idx++) {
+ unsigned short data_size = mtus[mtu_idx] - header_size;
+
+ /* If this MTU minus the Header Size would result in a
+ * Data Segment Size of the desired alignment, remember it.
+ */
+ if ((data_size & data_size_align_mask) == 0)
+ aligned_mtu_idx = mtu_idx;
+
+ /* If we're not at the end of the Hardware MTU Table and the
+ * next element is larger than our Maximum MTU, drop out of
+ * the loop.
+ */
+ if (mtu_idx+1 < NMTUS && mtus[mtu_idx+1] > max_mtu)
+ break;
+ }
+
+ /* If we fell out of the loop because we ran to the end of the table,
+ * then we just have to use the last [largest] entry.
+ */
+ if (mtu_idx == NMTUS)
+ mtu_idx--;
+
+ /* If we found an MTU which resulted in the requested Data Segment
+ * Length alignment and that's "not far" from the largest MTU which is
+ * less than or equal to the maximum MTU, then use that.
+ */
+ if (aligned_mtu_idx >= 0 &&
+ mtu_idx - aligned_mtu_idx <= 1)
+ mtu_idx = aligned_mtu_idx;
+
+ /* If the caller has passed in an MTU Index pointer, pass the
+ * MTU Index back. Return the MTU value.
+ */
+ if (mtu_idxp)
+ *mtu_idxp = mtu_idx;
+ return mtus[mtu_idx];
+}
+EXPORT_SYMBOL(cxgb4_best_aligned_mtu);
+
+/**
* cxgb4_port_chan - get the HW channel of a port
* @dev: the net device for the port
*
@@ -3782,7 +3906,9 @@ static void uld_attach(struct adapter *adap, unsigned int uld)
lli.mtus = adap->params.mtus;
if (uld == CXGB4_ULD_RDMA) {
lli.rxq_ids = adap->sge.rdma_rxq;
+ lli.ciq_ids = adap->sge.rdma_ciq;
lli.nrxq = adap->sge.rdmaqs;
+ lli.nciq = adap->sge.rdmaciqs;
} else if (uld == CXGB4_ULD_ISCSI) {
lli.rxq_ids = adap->sge.ofld_rxq;
lli.nrxq = adap->sge.ofldqsets;
@@ -4061,7 +4187,7 @@ static int update_root_dev_clip(struct net_device *dev)
/* Parse all bond and vlan devices layered on top of the physical dev */
for (i = 0; i < VLAN_N_VID; i++) {
- root_dev = __vlan_find_dev_deep(dev, htons(ETH_P_8021Q), i);
+ root_dev = __vlan_find_dev_deep_rcu(dev, htons(ETH_P_8021Q), i);
if (!root_dev)
continue;
@@ -5528,13 +5654,41 @@ static int adap_init0(struct adapter *adap)
#undef FW_PARAM_PFVF
#undef FW_PARAM_DEV
- /*
- * These are finalized by FW initialization, load their values now.
+ /* The MTU/MSS Table is initialized by now, so load their values. If
+ * we're initializing the adapter, then we'll make any modifications
+ * we want to the MTU/MSS Table and also initialize the congestion
+ * parameters.
*/
t4_read_mtu_tbl(adap, adap->params.mtus, NULL);
- t4_load_mtus(adap, adap->params.mtus, adap->params.a_wnd,
- adap->params.b_wnd);
+ if (state != DEV_STATE_INIT) {
+ int i;
+
+ /* The default MTU Table contains values 1492 and 1500.
+ * However, for TCP, it's better to have two values which are
+ * a multiple of 8 +/- 4 bytes apart near this popular MTU.
+ * This allows us to have a TCP Data Payload which is a
+ * multiple of 8 regardless of what combination of TCP Options
+ * are in use (always a multiple of 4 bytes) which is
+ * important for performance reasons. For instance, if no
+ * options are in use, then we have a 20-byte IP header and a
+ * 20-byte TCP header. In this case, a 1500-byte MSS would
+ * result in a TCP Data Payload of 1500 - 40 == 1460 bytes
+ * which is not a multiple of 8. So using an MSS of 1488 in
+ * this case results in a TCP Data Payload of 1448 bytes which
+ * is a multiple of 8. On the other hand, if 12-byte TCP Time
+ * Stamps have been negotiated, then an MTU of 1500 bytes
+ * results in a TCP Data Payload of 1448 bytes which, as
+ * above, is a multiple of 8 bytes ...
+ */
+ for (i = 0; i < NMTUS; i++)
+ if (adap->params.mtus[i] == 1492) {
+ adap->params.mtus[i] = 1488;
+ break;
+ }
+ t4_load_mtus(adap, adap->params.mtus, adap->params.a_wnd,
+ adap->params.b_wnd);
+ }
t4_init_tp_params(adap);
adap->flags |= FW_OK;
return 0;
@@ -5669,12 +5823,12 @@ static inline bool is_x_10g_port(const struct link_config *lc)
(lc->supported & FW_PORT_CAP_SPEED_40G) != 0;
}
-static inline void init_rspq(struct sge_rspq *q, u8 timer_idx, u8 pkt_cnt_idx,
+static inline void init_rspq(struct adapter *adap, struct sge_rspq *q,
+ unsigned int us, unsigned int cnt,
unsigned int size, unsigned int iqe_size)
{
- q->intr_params = QINTR_TIMER_IDX(timer_idx) |
- (pkt_cnt_idx < SGE_NCOUNTERS ? QINTR_CNT_EN : 0);
- q->pktcnt_idx = pkt_cnt_idx < SGE_NCOUNTERS ? pkt_cnt_idx : 0;
+ q->adap = adap;
+ set_rspq_intr_params(q, us, cnt);
q->iqe_len = iqe_size;
q->size = size;
}
@@ -5688,6 +5842,7 @@ static void cfg_queues(struct adapter *adap)
{
struct sge *s = &adap->sge;
int i, q10g = 0, n10g = 0, qidx = 0;
+ int ciq_size;
for_each_port(adap, i)
n10g += is_x_10g_port(&adap2pinfo(adap, i)->link_cfg);
@@ -5726,12 +5881,13 @@ static void cfg_queues(struct adapter *adap)
s->ofldqsets = adap->params.nports;
/* For RDMA one Rx queue per channel suffices */
s->rdmaqs = adap->params.nports;
+ s->rdmaciqs = adap->params.nports;
}
for (i = 0; i < ARRAY_SIZE(s->ethrxq); i++) {
struct sge_eth_rxq *r = &s->ethrxq[i];
- init_rspq(&r->rspq, 0, 0, 1024, 64);
+ init_rspq(adap, &r->rspq, 5, 10, 1024, 64);
r->fl.size = 72;
}
@@ -5747,7 +5903,7 @@ static void cfg_queues(struct adapter *adap)
for (i = 0; i < ARRAY_SIZE(s->ofldrxq); i++) {
struct sge_ofld_rxq *r = &s->ofldrxq[i];
- init_rspq(&r->rspq, 0, 0, 1024, 64);
+ init_rspq(adap, &r->rspq, 5, 1, 1024, 64);
r->rspq.uld = CXGB4_ULD_ISCSI;
r->fl.size = 72;
}
@@ -5755,13 +5911,26 @@ static void cfg_queues(struct adapter *adap)
for (i = 0; i < ARRAY_SIZE(s->rdmarxq); i++) {
struct sge_ofld_rxq *r = &s->rdmarxq[i];
- init_rspq(&r->rspq, 0, 0, 511, 64);
+ init_rspq(adap, &r->rspq, 5, 1, 511, 64);
r->rspq.uld = CXGB4_ULD_RDMA;
r->fl.size = 72;
}
- init_rspq(&s->fw_evtq, 6, 0, 512, 64);
- init_rspq(&s->intrq, 6, 0, 2 * MAX_INGQ, 64);
+ ciq_size = 64 + adap->vres.cq.size + adap->tids.nftids;
+ if (ciq_size > SGE_MAX_IQ_SIZE) {
+ CH_WARN(adap, "CIQ size too small for available IQs\n");
+ ciq_size = SGE_MAX_IQ_SIZE;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(s->rdmaciq); i++) {
+ struct sge_ofld_rxq *r = &s->rdmaciq[i];
+
+ init_rspq(adap, &r->rspq, 5, 1, ciq_size, 64);
+ r->rspq.uld = CXGB4_ULD_RDMA;
+ }
+
+ init_rspq(adap, &s->fw_evtq, 0, 1, 1024, 64);
+ init_rspq(adap, &s->intrq, 0, 1, 2 * MAX_INGQ, 64);
}
/*
@@ -5808,9 +5977,9 @@ static int enable_msix(struct adapter *adap)
want = s->max_ethqsets + EXTRA_VECS;
if (is_offload(adap)) {
- want += s->rdmaqs + s->ofldqsets;
+ want += s->rdmaqs + s->rdmaciqs + s->ofldqsets;
/* need nchan for each possible ULD */
- ofld_need = 2 * nchan;
+ ofld_need = 3 * nchan;
}
need = adap->params.nports + EXTRA_VECS + ofld_need;
@@ -6076,7 +6245,7 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
netdev->priv_flags |= IFF_UNICAST_FLT;
netdev->netdev_ops = &cxgb4_netdev_ops;
- SET_ETHTOOL_OPS(netdev, &cxgb_ethtool_ops);
+ netdev->ethtool_ops = &cxgb_ethtool_ops;
}
pci_set_drvdata(pdev, adapter);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
index e274a047528f..55e9daf7f9d4 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
@@ -232,8 +232,10 @@ struct cxgb4_lld_info {
const struct cxgb4_virt_res *vr; /* assorted HW resources */
const unsigned short *mtus; /* MTU table */
const unsigned short *rxq_ids; /* the ULD's Rx queue ids */
+ const unsigned short *ciq_ids; /* the ULD's concentrator IQ ids */
unsigned short nrxq; /* # of Rx queues */
unsigned short ntxq; /* # of Tx queues */
+ unsigned short nciq; /* # of concentrator IQ */
unsigned char nchan:4; /* # of channels */
unsigned char nports:4; /* # of ports */
unsigned char wr_cred; /* WR 16-byte credits */
@@ -274,6 +276,11 @@ unsigned int cxgb4_port_viid(const struct net_device *dev);
unsigned int cxgb4_port_idx(const struct net_device *dev);
unsigned int cxgb4_best_mtu(const unsigned short *mtus, unsigned short mtu,
unsigned int *idx);
+unsigned int cxgb4_best_aligned_mtu(const unsigned short *mtus,
+ unsigned short header_size,
+ unsigned short data_size_max,
+ unsigned short data_size_align,
+ unsigned int *mtu_idxp);
void cxgb4_get_tcp_stats(struct pci_dev *pdev, struct tp_tcp_stats *v4,
struct tp_tcp_stats *v6);
void cxgb4_iscsi_init(struct net_device *dev, unsigned int tag_mask,
diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c
index e249528c8e60..dd4355d248e4 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c
@@ -1697,7 +1697,8 @@ int t4_ethrx_handler(struct sge_rspq *q, const __be64 *rsp,
return handle_trace_pkt(q->adap, si);
pkt = (const struct cpl_rx_pkt *)rsp;
- csum_ok = pkt->csum_calc && !pkt->err_vec;
+ csum_ok = pkt->csum_calc && !pkt->err_vec &&
+ (q->netdev->features & NETIF_F_RXCSUM);
if ((pkt->l2info & htonl(RXF_TCP)) &&
(q->netdev->features & NETIF_F_GRO) && csum_ok && !pkt->ip_frag) {
do_gro(rxq, si, pkt);
@@ -1720,8 +1721,7 @@ int t4_ethrx_handler(struct sge_rspq *q, const __be64 *rsp,
rxq->stats.pkts++;
- if (csum_ok && (q->netdev->features & NETIF_F_RXCSUM) &&
- (pkt->l2info & htonl(RXF_UDP | RXF_TCP))) {
+ if (csum_ok && (pkt->l2info & htonl(RXF_UDP | RXF_TCP))) {
if (!pkt->ip_frag) {
skb->ip_summed = CHECKSUM_UNNECESSARY;
rxq->stats.rx_cso++;
@@ -2215,7 +2215,6 @@ int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq,
iq->cntxt_id = ntohs(c.iqid);
iq->abs_id = ntohs(c.physiqid);
iq->size--; /* subtract status entry */
- iq->adap = adap;
iq->netdev = dev;
iq->handler = hnd;
@@ -2515,6 +2514,10 @@ void t4_free_sge_resources(struct adapter *adap)
if (oq->rspq.desc)
free_rspq_fl(adap, &oq->rspq, &oq->fl);
}
+ for (i = 0, oq = adap->sge.rdmaciq; i < adap->sge.rdmaciqs; i++, oq++) {
+ if (oq->rspq.desc)
+ free_rspq_fl(adap, &oq->rspq, &oq->fl);
+ }
/* clean up offload Tx queues */
for (i = 0; i < ARRAY_SIZE(adap->sge.ofldtxq); i++) {
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.h b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.h
index 1d1623be9f1e..71b799b5b0f4 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.h
@@ -68,6 +68,7 @@ enum {
SGE_MAX_WR_LEN = 512, /* max WR size in bytes */
SGE_NTIMERS = 6, /* # of interrupt holdoff timer values */
SGE_NCOUNTERS = 4, /* # of interrupt packet counter values */
+ SGE_MAX_IQ_SIZE = 65520,
SGE_TIMER_RSTRT_CNTR = 6, /* restart RX packet threshold counter */
SGE_TIMER_UPD_CIDX = 7, /* update cidx only */
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h b/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h
index f2738c710789..973eb11aa98a 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h
@@ -227,6 +227,7 @@ struct cpl_pass_open_req {
#define DELACK(x) ((x) << 5)
#define ULP_MODE(x) ((x) << 8)
#define RCV_BUFSIZ(x) ((x) << 12)
+#define RCV_BUFSIZ_MASK 0x3FFU
#define DSCP(x) ((x) << 22)
#define SMAC_SEL(x) ((u64)(x) << 28)
#define L2T_IDX(x) ((u64)(x) << 36)
@@ -278,6 +279,15 @@ struct cpl_pass_accept_rpl {
__be64 opt0;
};
+struct cpl_t5_pass_accept_rpl {
+ WR_HDR;
+ union opcode_tid ot;
+ __be32 opt2;
+ __be64 opt0;
+ __be32 iss;
+ __be32 rsvd;
+};
+
struct cpl_act_open_req {
WR_HDR;
union opcode_tid ot;
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
index 52859288de7b..ff1cdd1788b5 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
@@ -2664,7 +2664,7 @@ static int cxgb4vf_pci_probe(struct pci_dev *pdev,
netdev->priv_flags |= IFF_UNICAST_FLT;
netdev->netdev_ops = &cxgb4vf_netdev_ops;
- SET_ETHTOOL_OPS(netdev, &cxgb4vf_ethtool_ops);
+ netdev->ethtool_ops = &cxgb4vf_ethtool_ops;
/*
* Initialize the hardware/software state for the port.
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
index 9d88c1d50b49..bdfa80ca5e31 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
@@ -1510,7 +1510,8 @@ int t4vf_ethrx_handler(struct sge_rspq *rspq, const __be64 *rsp,
{
struct sk_buff *skb;
const struct cpl_rx_pkt *pkt = (void *)rsp;
- bool csum_ok = pkt->csum_calc && !pkt->err_vec;
+ bool csum_ok = pkt->csum_calc && !pkt->err_vec &&
+ (rspq->netdev->features & NETIF_F_RXCSUM);
struct sge_eth_rxq *rxq = container_of(rspq, struct sge_eth_rxq, rspq);
/*
@@ -1538,8 +1539,8 @@ int t4vf_ethrx_handler(struct sge_rspq *rspq, const __be64 *rsp,
skb_record_rx_queue(skb, rspq->idx);
rxq->stats.pkts++;
- if (csum_ok && (rspq->netdev->features & NETIF_F_RXCSUM) &&
- !pkt->err_vec && (be32_to_cpu(pkt->l2info) & (RXF_UDP|RXF_TCP))) {
+ if (csum_ok && !pkt->err_vec &&
+ (be32_to_cpu(pkt->l2info) & (RXF_UDP|RXF_TCP))) {
if (!pkt->ip_frag)
skb->ip_summed = CHECKSUM_UNNECESSARY;
else {
diff --git a/drivers/net/ethernet/cisco/enic/enic.h b/drivers/net/ethernet/cisco/enic/enic.h
index e35c8e0202ad..14f465f239d6 100644
--- a/drivers/net/ethernet/cisco/enic/enic.h
+++ b/drivers/net/ethernet/cisco/enic/enic.h
@@ -43,6 +43,8 @@
#define ENIC_CQ_MAX (ENIC_WQ_MAX + ENIC_RQ_MAX)
#define ENIC_INTR_MAX (ENIC_CQ_MAX + 2)
+#define ENIC_AIC_LARGE_PKT_DIFF 3
+
struct enic_msix_entry {
int requested;
char devname[IFNAMSIZ];
@@ -50,6 +52,33 @@ struct enic_msix_entry {
void *devid;
};
+/* Store only the lower range. Higher range is given by fw. */
+struct enic_intr_mod_range {
+ u32 small_pkt_range_start;
+ u32 large_pkt_range_start;
+};
+
+struct enic_intr_mod_table {
+ u32 rx_rate;
+ u32 range_percent;
+};
+
+#define ENIC_MAX_LINK_SPEEDS 3
+#define ENIC_LINK_SPEED_10G 10000
+#define ENIC_LINK_SPEED_4G 4000
+#define ENIC_LINK_40G_INDEX 2
+#define ENIC_LINK_10G_INDEX 1
+#define ENIC_LINK_4G_INDEX 0
+#define ENIC_RX_COALESCE_RANGE_END 125
+#define ENIC_AIC_TS_BREAK 100
+
+struct enic_rx_coal {
+ u32 small_pkt_range_start;
+ u32 large_pkt_range_start;
+ u32 range_end;
+ u32 use_adaptive_rx_coalesce;
+};
+
/* priv_flags */
#define ENIC_SRIOV_ENABLED (1 << 0)
@@ -85,13 +114,12 @@ struct enic {
u32 msg_enable;
spinlock_t devcmd_lock;
u8 mac_addr[ETH_ALEN];
- u8 mc_addr[ENIC_MULTICAST_PERFECT_FILTERS][ETH_ALEN];
- u8 uc_addr[ENIC_UNICAST_PERFECT_FILTERS][ETH_ALEN];
unsigned int flags;
unsigned int priv_flags;
unsigned int mc_count;
unsigned int uc_count;
u32 port_mtu;
+ struct enic_rx_coal rx_coalesce_setting;
u32 rx_coalesce_usecs;
u32 tx_coalesce_usecs;
#ifdef CONFIG_PCI_IOV
diff --git a/drivers/net/ethernet/cisco/enic/enic_dev.c b/drivers/net/ethernet/cisco/enic/enic_dev.c
index 4b6e5695b263..3e27df522847 100644
--- a/drivers/net/ethernet/cisco/enic/enic_dev.c
+++ b/drivers/net/ethernet/cisco/enic/enic_dev.c
@@ -88,7 +88,7 @@ int enic_dev_packet_filter(struct enic *enic, int directed, int multicast,
return err;
}
-int enic_dev_add_addr(struct enic *enic, u8 *addr)
+int enic_dev_add_addr(struct enic *enic, const u8 *addr)
{
int err;
@@ -99,7 +99,7 @@ int enic_dev_add_addr(struct enic *enic, u8 *addr)
return err;
}
-int enic_dev_del_addr(struct enic *enic, u8 *addr)
+int enic_dev_del_addr(struct enic *enic, const u8 *addr)
{
int err;
diff --git a/drivers/net/ethernet/cisco/enic/enic_dev.h b/drivers/net/ethernet/cisco/enic/enic_dev.h
index 129b14a4efb0..36ea1ab25f6a 100644
--- a/drivers/net/ethernet/cisco/enic/enic_dev.h
+++ b/drivers/net/ethernet/cisco/enic/enic_dev.h
@@ -45,8 +45,8 @@ int enic_dev_add_station_addr(struct enic *enic);
int enic_dev_del_station_addr(struct enic *enic);
int enic_dev_packet_filter(struct enic *enic, int directed, int multicast,
int broadcast, int promisc, int allmulti);
-int enic_dev_add_addr(struct enic *enic, u8 *addr);
-int enic_dev_del_addr(struct enic *enic, u8 *addr);
+int enic_dev_add_addr(struct enic *enic, const u8 *addr);
+int enic_dev_del_addr(struct enic *enic, const u8 *addr);
int enic_vlan_rx_add_vid(struct net_device *netdev, __be16 proto, u16 vid);
int enic_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid);
int enic_dev_notify_unset(struct enic *enic);
diff --git a/drivers/net/ethernet/cisco/enic/enic_ethtool.c b/drivers/net/ethernet/cisco/enic/enic_ethtool.c
index 47e3562f4866..2e50b5489d20 100644
--- a/drivers/net/ethernet/cisco/enic/enic_ethtool.c
+++ b/drivers/net/ethernet/cisco/enic/enic_ethtool.c
@@ -79,6 +79,17 @@ static const struct enic_stat enic_rx_stats[] = {
static const unsigned int enic_n_tx_stats = ARRAY_SIZE(enic_tx_stats);
static const unsigned int enic_n_rx_stats = ARRAY_SIZE(enic_rx_stats);
+void enic_intr_coal_set_rx(struct enic *enic, u32 timer)
+{
+ int i;
+ int intr;
+
+ for (i = 0; i < enic->rq_count; i++) {
+ intr = enic_msix_rq_intr(enic, i);
+ vnic_intr_coalescing_timer_set(&enic->intr[intr], timer);
+ }
+}
+
static int enic_get_settings(struct net_device *netdev,
struct ethtool_cmd *ecmd)
{
@@ -93,8 +104,8 @@ static int enic_get_settings(struct net_device *netdev,
ethtool_cmd_speed_set(ecmd, vnic_dev_port_speed(enic->vdev));
ecmd->duplex = DUPLEX_FULL;
} else {
- ethtool_cmd_speed_set(ecmd, -1);
- ecmd->duplex = -1;
+ ethtool_cmd_speed_set(ecmd, SPEED_UNKNOWN);
+ ecmd->duplex = DUPLEX_UNKNOWN;
}
ecmd->autoneg = AUTONEG_DISABLE;
@@ -178,9 +189,14 @@ static int enic_get_coalesce(struct net_device *netdev,
struct ethtool_coalesce *ecmd)
{
struct enic *enic = netdev_priv(netdev);
+ struct enic_rx_coal *rxcoal = &enic->rx_coalesce_setting;
ecmd->tx_coalesce_usecs = enic->tx_coalesce_usecs;
ecmd->rx_coalesce_usecs = enic->rx_coalesce_usecs;
+ if (rxcoal->use_adaptive_rx_coalesce)
+ ecmd->use_adaptive_rx_coalesce = 1;
+ ecmd->rx_coalesce_usecs_low = rxcoal->small_pkt_range_start;
+ ecmd->rx_coalesce_usecs_high = rxcoal->range_end;
return 0;
}
@@ -191,17 +207,31 @@ static int enic_set_coalesce(struct net_device *netdev,
struct enic *enic = netdev_priv(netdev);
u32 tx_coalesce_usecs;
u32 rx_coalesce_usecs;
+ u32 rx_coalesce_usecs_low;
+ u32 rx_coalesce_usecs_high;
+ u32 coalesce_usecs_max;
unsigned int i, intr;
+ struct enic_rx_coal *rxcoal = &enic->rx_coalesce_setting;
+ coalesce_usecs_max = vnic_dev_get_intr_coal_timer_max(enic->vdev);
tx_coalesce_usecs = min_t(u32, ecmd->tx_coalesce_usecs,
- vnic_dev_get_intr_coal_timer_max(enic->vdev));
+ coalesce_usecs_max);
rx_coalesce_usecs = min_t(u32, ecmd->rx_coalesce_usecs,
- vnic_dev_get_intr_coal_timer_max(enic->vdev));
+ coalesce_usecs_max);
+
+ rx_coalesce_usecs_low = min_t(u32, ecmd->rx_coalesce_usecs_low,
+ coalesce_usecs_max);
+ rx_coalesce_usecs_high = min_t(u32, ecmd->rx_coalesce_usecs_high,
+ coalesce_usecs_max);
switch (vnic_dev_get_intr_mode(enic->vdev)) {
case VNIC_DEV_INTR_MODE_INTX:
if (tx_coalesce_usecs != rx_coalesce_usecs)
return -EINVAL;
+ if (ecmd->use_adaptive_rx_coalesce ||
+ ecmd->rx_coalesce_usecs_low ||
+ ecmd->rx_coalesce_usecs_high)
+ return -EOPNOTSUPP;
intr = enic_legacy_io_intr();
vnic_intr_coalescing_timer_set(&enic->intr[intr],
@@ -210,6 +240,10 @@ static int enic_set_coalesce(struct net_device *netdev,
case VNIC_DEV_INTR_MODE_MSI:
if (tx_coalesce_usecs != rx_coalesce_usecs)
return -EINVAL;
+ if (ecmd->use_adaptive_rx_coalesce ||
+ ecmd->rx_coalesce_usecs_low ||
+ ecmd->rx_coalesce_usecs_high)
+ return -EOPNOTSUPP;
vnic_intr_coalescing_timer_set(&enic->intr[0],
tx_coalesce_usecs);
@@ -221,12 +255,27 @@ static int enic_set_coalesce(struct net_device *netdev,
tx_coalesce_usecs);
}
- for (i = 0; i < enic->rq_count; i++) {
- intr = enic_msix_rq_intr(enic, i);
- vnic_intr_coalescing_timer_set(&enic->intr[intr],
- rx_coalesce_usecs);
+ if (rxcoal->use_adaptive_rx_coalesce) {
+ if (!ecmd->use_adaptive_rx_coalesce) {
+ rxcoal->use_adaptive_rx_coalesce = 0;
+ enic_intr_coal_set_rx(enic, rx_coalesce_usecs);
+ }
+ } else {
+ if (ecmd->use_adaptive_rx_coalesce)
+ rxcoal->use_adaptive_rx_coalesce = 1;
+ else
+ enic_intr_coal_set_rx(enic, rx_coalesce_usecs);
}
+ if (ecmd->rx_coalesce_usecs_high) {
+ if (rx_coalesce_usecs_high <
+ (rx_coalesce_usecs_low + ENIC_AIC_LARGE_PKT_DIFF))
+ return -EINVAL;
+ rxcoal->range_end = rx_coalesce_usecs_high;
+ rxcoal->small_pkt_range_start = rx_coalesce_usecs_low;
+ rxcoal->large_pkt_range_start = rx_coalesce_usecs_low +
+ ENIC_AIC_LARGE_PKT_DIFF;
+ }
break;
default:
break;
@@ -253,5 +302,5 @@ static const struct ethtool_ops enic_ethtool_ops = {
void enic_set_ethtool_ops(struct net_device *netdev)
{
- SET_ETHTOOL_OPS(netdev, &enic_ethtool_ops);
+ netdev->ethtool_ops = &enic_ethtool_ops;
}
diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c
index 2945718ce806..f32f828b7f3d 100644
--- a/drivers/net/ethernet/cisco/enic/enic_main.c
+++ b/drivers/net/ethernet/cisco/enic/enic_main.c
@@ -38,6 +38,7 @@
#include <linux/rtnetlink.h>
#include <linux/prefetch.h>
#include <net/ip6_checksum.h>
+#include <linux/ktime.h>
#include "cq_enet_desc.h"
#include "vnic_dev.h"
@@ -72,6 +73,35 @@ MODULE_LICENSE("GPL");
MODULE_VERSION(DRV_VERSION);
MODULE_DEVICE_TABLE(pci, enic_id_table);
+#define ENIC_LARGE_PKT_THRESHOLD 1000
+#define ENIC_MAX_COALESCE_TIMERS 10
+/* Interrupt moderation table, which will be used to decide the
+ * coalescing timer values
+ * {rx_rate in Mbps, mapping percentage of the range}
+ */
+struct enic_intr_mod_table mod_table[ENIC_MAX_COALESCE_TIMERS + 1] = {
+ {4000, 0},
+ {4400, 10},
+ {5060, 20},
+ {5230, 30},
+ {5540, 40},
+ {5820, 50},
+ {6120, 60},
+ {6435, 70},
+ {6745, 80},
+ {7000, 90},
+ {0xFFFFFFFF, 100}
+};
+
+/* This table helps the driver to pick different ranges for rx coalescing
+ * timer depending on the link speed.
+ */
+struct enic_intr_mod_range mod_range[ENIC_MAX_LINK_SPEEDS] = {
+ {0, 0}, /* 0 - 4 Gbps */
+ {0, 3}, /* 4 - 10 Gbps */
+ {3, 6}, /* 10 - 40 Gbps */
+};
+
int enic_is_dynamic(struct enic *enic)
{
return enic->pdev->device == PCI_DEVICE_ID_CISCO_VIC_ENET_DYN;
@@ -586,8 +616,71 @@ static struct rtnl_link_stats64 *enic_get_stats(struct net_device *netdev,
return net_stats;
}
+static int enic_mc_sync(struct net_device *netdev, const u8 *mc_addr)
+{
+ struct enic *enic = netdev_priv(netdev);
+
+ if (enic->mc_count == ENIC_MULTICAST_PERFECT_FILTERS) {
+ unsigned int mc_count = netdev_mc_count(netdev);
+
+ netdev_warn(netdev, "Registering only %d out of %d multicast addresses\n",
+ ENIC_MULTICAST_PERFECT_FILTERS, mc_count);
+
+ return -ENOSPC;
+ }
+
+ enic_dev_add_addr(enic, mc_addr);
+ enic->mc_count++;
+
+ return 0;
+}
+
+static int enic_mc_unsync(struct net_device *netdev, const u8 *mc_addr)
+{
+ struct enic *enic = netdev_priv(netdev);
+
+ enic_dev_del_addr(enic, mc_addr);
+ enic->mc_count--;
+
+ return 0;
+}
+
+static int enic_uc_sync(struct net_device *netdev, const u8 *uc_addr)
+{
+ struct enic *enic = netdev_priv(netdev);
+
+ if (enic->uc_count == ENIC_UNICAST_PERFECT_FILTERS) {
+ unsigned int uc_count = netdev_uc_count(netdev);
+
+ netdev_warn(netdev, "Registering only %d out of %d unicast addresses\n",
+ ENIC_UNICAST_PERFECT_FILTERS, uc_count);
+
+ return -ENOSPC;
+ }
+
+ enic_dev_add_addr(enic, uc_addr);
+ enic->uc_count++;
+
+ return 0;
+}
+
+static int enic_uc_unsync(struct net_device *netdev, const u8 *uc_addr)
+{
+ struct enic *enic = netdev_priv(netdev);
+
+ enic_dev_del_addr(enic, uc_addr);
+ enic->uc_count--;
+
+ return 0;
+}
+
void enic_reset_addr_lists(struct enic *enic)
{
+ struct net_device *netdev = enic->netdev;
+
+ __dev_uc_unsync(netdev, NULL);
+ __dev_mc_unsync(netdev, NULL);
+
enic->mc_count = 0;
enic->uc_count = 0;
enic->flags = 0;
@@ -654,112 +747,6 @@ static int enic_set_mac_address(struct net_device *netdev, void *p)
return enic_dev_add_station_addr(enic);
}
-static void enic_update_multicast_addr_list(struct enic *enic)
-{
- struct net_device *netdev = enic->netdev;
- struct netdev_hw_addr *ha;
- unsigned int mc_count = netdev_mc_count(netdev);
- u8 mc_addr[ENIC_MULTICAST_PERFECT_FILTERS][ETH_ALEN];
- unsigned int i, j;
-
- if (mc_count > ENIC_MULTICAST_PERFECT_FILTERS) {
- netdev_warn(netdev, "Registering only %d out of %d "
- "multicast addresses\n",
- ENIC_MULTICAST_PERFECT_FILTERS, mc_count);
- mc_count = ENIC_MULTICAST_PERFECT_FILTERS;
- }
-
- /* Is there an easier way? Trying to minimize to
- * calls to add/del multicast addrs. We keep the
- * addrs from the last call in enic->mc_addr and
- * look for changes to add/del.
- */
-
- i = 0;
- netdev_for_each_mc_addr(ha, netdev) {
- if (i == mc_count)
- break;
- memcpy(mc_addr[i++], ha->addr, ETH_ALEN);
- }
-
- for (i = 0; i < enic->mc_count; i++) {
- for (j = 0; j < mc_count; j++)
- if (ether_addr_equal(enic->mc_addr[i], mc_addr[j]))
- break;
- if (j == mc_count)
- enic_dev_del_addr(enic, enic->mc_addr[i]);
- }
-
- for (i = 0; i < mc_count; i++) {
- for (j = 0; j < enic->mc_count; j++)
- if (ether_addr_equal(mc_addr[i], enic->mc_addr[j]))
- break;
- if (j == enic->mc_count)
- enic_dev_add_addr(enic, mc_addr[i]);
- }
-
- /* Save the list to compare against next time
- */
-
- for (i = 0; i < mc_count; i++)
- memcpy(enic->mc_addr[i], mc_addr[i], ETH_ALEN);
-
- enic->mc_count = mc_count;
-}
-
-static void enic_update_unicast_addr_list(struct enic *enic)
-{
- struct net_device *netdev = enic->netdev;
- struct netdev_hw_addr *ha;
- unsigned int uc_count = netdev_uc_count(netdev);
- u8 uc_addr[ENIC_UNICAST_PERFECT_FILTERS][ETH_ALEN];
- unsigned int i, j;
-
- if (uc_count > ENIC_UNICAST_PERFECT_FILTERS) {
- netdev_warn(netdev, "Registering only %d out of %d "
- "unicast addresses\n",
- ENIC_UNICAST_PERFECT_FILTERS, uc_count);
- uc_count = ENIC_UNICAST_PERFECT_FILTERS;
- }
-
- /* Is there an easier way? Trying to minimize to
- * calls to add/del unicast addrs. We keep the
- * addrs from the last call in enic->uc_addr and
- * look for changes to add/del.
- */
-
- i = 0;
- netdev_for_each_uc_addr(ha, netdev) {
- if (i == uc_count)
- break;
- memcpy(uc_addr[i++], ha->addr, ETH_ALEN);
- }
-
- for (i = 0; i < enic->uc_count; i++) {
- for (j = 0; j < uc_count; j++)
- if (ether_addr_equal(enic->uc_addr[i], uc_addr[j]))
- break;
- if (j == uc_count)
- enic_dev_del_addr(enic, enic->uc_addr[i]);
- }
-
- for (i = 0; i < uc_count; i++) {
- for (j = 0; j < enic->uc_count; j++)
- if (ether_addr_equal(uc_addr[i], enic->uc_addr[j]))
- break;
- if (j == enic->uc_count)
- enic_dev_add_addr(enic, uc_addr[i]);
- }
-
- /* Save the list to compare against next time
- */
-
- for (i = 0; i < uc_count; i++)
- memcpy(enic->uc_addr[i], uc_addr[i], ETH_ALEN);
-
- enic->uc_count = uc_count;
-}
-
/* netif_tx_lock held, BHs disabled */
static void enic_set_rx_mode(struct net_device *netdev)
{
@@ -782,9 +769,9 @@ static void enic_set_rx_mode(struct net_device *netdev)
}
if (!promisc) {
- enic_update_unicast_addr_list(enic);
+ __dev_uc_sync(netdev, enic_uc_sync, enic_uc_unsync);
if (!allmulti)
- enic_update_multicast_addr_list(enic);
+ __dev_mc_sync(netdev, enic_mc_sync, enic_mc_unsync);
}
}
@@ -979,6 +966,15 @@ static int enic_rq_alloc_buf(struct vnic_rq *rq)
return 0;
}
+static void enic_intr_update_pkt_size(struct vnic_rx_bytes_counter *pkt_size,
+ u32 pkt_len)
+{
+ if (ENIC_LARGE_PKT_THRESHOLD <= pkt_len)
+ pkt_size->large_pkt_bytes_cnt += pkt_len;
+ else
+ pkt_size->small_pkt_bytes_cnt += pkt_len;
+}
+
static void enic_rq_indicate_buf(struct vnic_rq *rq,
struct cq_desc *cq_desc, struct vnic_rq_buf *buf,
int skipped, void *opaque)
@@ -986,6 +982,7 @@ static void enic_rq_indicate_buf(struct vnic_rq *rq,
struct enic *enic = vnic_dev_priv(rq->vdev);
struct net_device *netdev = enic->netdev;
struct sk_buff *skb;
+ struct vnic_cq *cq = &enic->cq[enic_cq_rq(enic, rq->index)];
u8 type, color, eop, sop, ingress_port, vlan_stripped;
u8 fcoe, fcoe_sof, fcoe_fc_crc_ok, fcoe_enc_error, fcoe_eof;
@@ -1056,6 +1053,9 @@ static void enic_rq_indicate_buf(struct vnic_rq *rq,
napi_gro_receive(&enic->napi[q_number], skb);
else
netif_receive_skb(skb);
+ if (enic->rx_coalesce_setting.use_adaptive_rx_coalesce)
+ enic_intr_update_pkt_size(&cq->pkt_size_counter,
+ bytes_written);
} else {
/* Buffer overflow
@@ -1134,6 +1134,64 @@ static int enic_poll(struct napi_struct *napi, int budget)
return rq_work_done;
}
+static void enic_set_int_moderation(struct enic *enic, struct vnic_rq *rq)
+{
+ unsigned int intr = enic_msix_rq_intr(enic, rq->index);
+ struct vnic_cq *cq = &enic->cq[enic_cq_rq(enic, rq->index)];
+ u32 timer = cq->tobe_rx_coal_timeval;
+
+ if (cq->tobe_rx_coal_timeval != cq->cur_rx_coal_timeval) {
+ vnic_intr_coalescing_timer_set(&enic->intr[intr], timer);
+ cq->cur_rx_coal_timeval = cq->tobe_rx_coal_timeval;
+ }
+}
+
+static void enic_calc_int_moderation(struct enic *enic, struct vnic_rq *rq)
+{
+ struct enic_rx_coal *rx_coal = &enic->rx_coalesce_setting;
+ struct vnic_cq *cq = &enic->cq[enic_cq_rq(enic, rq->index)];
+ struct vnic_rx_bytes_counter *pkt_size_counter = &cq->pkt_size_counter;
+ int index;
+ u32 timer;
+ u32 range_start;
+ u32 traffic;
+ u64 delta;
+ ktime_t now = ktime_get();
+
+ delta = ktime_us_delta(now, cq->prev_ts);
+ if (delta < ENIC_AIC_TS_BREAK)
+ return;
+ cq->prev_ts = now;
+
+ traffic = pkt_size_counter->large_pkt_bytes_cnt +
+ pkt_size_counter->small_pkt_bytes_cnt;
+ /* The table takes Mbps
+ * traffic *= 8 => bits
+ * traffic *= (10^6 / delta) => bps
+ * traffic /= 10^6 => Mbps
+ *
+ * Combining, traffic *= (8 / delta)
+ */
+
+ traffic <<= 3;
+ traffic = delta > UINT_MAX ? 0 : traffic / (u32)delta;
+
+ for (index = 0; index < ENIC_MAX_COALESCE_TIMERS; index++)
+ if (traffic < mod_table[index].rx_rate)
+ break;
+ range_start = (pkt_size_counter->small_pkt_bytes_cnt >
+ pkt_size_counter->large_pkt_bytes_cnt << 1) ?
+ rx_coal->small_pkt_range_start :
+ rx_coal->large_pkt_range_start;
+ timer = range_start + ((rx_coal->range_end - range_start) *
+ mod_table[index].range_percent / 100);
+ /* Damping */
+ cq->tobe_rx_coal_timeval = (timer + cq->tobe_rx_coal_timeval) >> 1;
+
+ pkt_size_counter->large_pkt_bytes_cnt = 0;
+ pkt_size_counter->small_pkt_bytes_cnt = 0;
+}
+
static int enic_poll_msix(struct napi_struct *napi, int budget)
{
struct net_device *netdev = napi->dev;
@@ -1171,6 +1229,13 @@ static int enic_poll_msix(struct napi_struct *napi, int budget)
if (err)
work_done = work_to_do;
+ if (enic->rx_coalesce_setting.use_adaptive_rx_coalesce)
+ /* Call the function which refreshes
+ * the intr coalescing timer value based on
+ * the traffic. This is supported only in
+ * the case of MSI-x mode
+ */
+ enic_calc_int_moderation(enic, &enic->rq[rq]);
if (work_done < work_to_do) {
@@ -1179,6 +1244,8 @@ static int enic_poll_msix(struct napi_struct *napi, int budget)
*/
napi_complete(napi);
+ if (enic->rx_coalesce_setting.use_adaptive_rx_coalesce)
+ enic_set_int_moderation(enic, &enic->rq[rq]);
vnic_intr_unmask(&enic->intr[intr]);
}
@@ -1314,6 +1381,42 @@ static void enic_synchronize_irqs(struct enic *enic)
}
}
+static void enic_set_rx_coal_setting(struct enic *enic)
+{
+ unsigned int speed;
+ int index = -1;
+ struct enic_rx_coal *rx_coal = &enic->rx_coalesce_setting;
+
+ /* If intr mode is not MSIX, do not do adaptive coalescing */
+ if (VNIC_DEV_INTR_MODE_MSIX != vnic_dev_get_intr_mode(enic->vdev)) {
+ netdev_info(enic->netdev, "INTR mode is not MSIX, Not initializing adaptive coalescing");
+ return;
+ }
+
+ /* 1. Read the link speed from fw
+ * 2. Pick the default range for the speed
+ * 3. Update it in enic->rx_coalesce_setting
+ */
+ speed = vnic_dev_port_speed(enic->vdev);
+ if (ENIC_LINK_SPEED_10G < speed)
+ index = ENIC_LINK_40G_INDEX;
+ else if (ENIC_LINK_SPEED_4G < speed)
+ index = ENIC_LINK_10G_INDEX;
+ else
+ index = ENIC_LINK_4G_INDEX;
+
+ rx_coal->small_pkt_range_start = mod_range[index].small_pkt_range_start;
+ rx_coal->large_pkt_range_start = mod_range[index].large_pkt_range_start;
+ rx_coal->range_end = ENIC_RX_COALESCE_RANGE_END;
+
+ /* Start with the value provided by UCSM */
+ for (index = 0; index < enic->rq_count; index++)
+ enic->cq[index].cur_rx_coal_timeval =
+ enic->config.intr_timer_usec;
+
+ rx_coal->use_adaptive_rx_coalesce = 1;
+}
+
static int enic_dev_notify_set(struct enic *enic)
{
int err;
@@ -2231,6 +2334,7 @@ static int enic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
enic->notify_timer.function = enic_notify_timer;
enic->notify_timer.data = (unsigned long)enic;
+ enic_set_rx_coal_setting(enic);
INIT_WORK(&enic->reset, enic_reset);
INIT_WORK(&enic->change_mtu_work, enic_change_mtu_work);
@@ -2250,6 +2354,9 @@ static int enic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
}
enic->tx_coalesce_usecs = enic->config.intr_timer_usec;
+ /* rx coalesce time already got initialized. This gets used
+ * if adaptive coal is turned off
+ */
enic->rx_coalesce_usecs = enic->tx_coalesce_usecs;
if (enic_is_dynamic(enic) || enic_is_sriov_vf(enic))
diff --git a/drivers/net/ethernet/cisco/enic/vnic_cq.h b/drivers/net/ethernet/cisco/enic/vnic_cq.h
index 579315cbe803..4e6aa65857f7 100644
--- a/drivers/net/ethernet/cisco/enic/vnic_cq.h
+++ b/drivers/net/ethernet/cisco/enic/vnic_cq.h
@@ -50,6 +50,11 @@ struct vnic_cq_ctrl {
u32 pad10;
};
+struct vnic_rx_bytes_counter {
+ unsigned int small_pkt_bytes_cnt;
+ unsigned int large_pkt_bytes_cnt;
+};
+
struct vnic_cq {
unsigned int index;
struct vnic_dev *vdev;
@@ -58,6 +63,10 @@ struct vnic_cq {
unsigned int to_clean;
unsigned int last_color;
unsigned int interrupt_offset;
+ struct vnic_rx_bytes_counter pkt_size_counter;
+ unsigned int cur_rx_coal_timeval;
+ unsigned int tobe_rx_coal_timeval;
+ ktime_t prev_ts;
};
static inline unsigned int vnic_cq_service(struct vnic_cq *cq,
diff --git a/drivers/net/ethernet/cisco/enic/vnic_dev.c b/drivers/net/ethernet/cisco/enic/vnic_dev.c
index 69dd92598b7e..e86a45cb9e68 100644
--- a/drivers/net/ethernet/cisco/enic/vnic_dev.c
+++ b/drivers/net/ethernet/cisco/enic/vnic_dev.c
@@ -657,7 +657,7 @@ int vnic_dev_packet_filter(struct vnic_dev *vdev, int directed, int multicast,
return err;
}
-int vnic_dev_add_addr(struct vnic_dev *vdev, u8 *addr)
+int vnic_dev_add_addr(struct vnic_dev *vdev, const u8 *addr)
{
u64 a0 = 0, a1 = 0;
int wait = 1000;
@@ -674,7 +674,7 @@ int vnic_dev_add_addr(struct vnic_dev *vdev, u8 *addr)
return err;
}
-int vnic_dev_del_addr(struct vnic_dev *vdev, u8 *addr)
+int vnic_dev_del_addr(struct vnic_dev *vdev, const u8 *addr)
{
u64 a0 = 0, a1 = 0;
int wait = 1000;
diff --git a/drivers/net/ethernet/cisco/enic/vnic_dev.h b/drivers/net/ethernet/cisco/enic/vnic_dev.h
index e670029862a1..1f3b301f8225 100644
--- a/drivers/net/ethernet/cisco/enic/vnic_dev.h
+++ b/drivers/net/ethernet/cisco/enic/vnic_dev.h
@@ -95,8 +95,8 @@ int vnic_dev_stats_dump(struct vnic_dev *vdev, struct vnic_stats **stats);
int vnic_dev_hang_notify(struct vnic_dev *vdev);
int vnic_dev_packet_filter(struct vnic_dev *vdev, int directed, int multicast,
int broadcast, int promisc, int allmulti);
-int vnic_dev_add_addr(struct vnic_dev *vdev, u8 *addr);
-int vnic_dev_del_addr(struct vnic_dev *vdev, u8 *addr);
+int vnic_dev_add_addr(struct vnic_dev *vdev, const u8 *addr);
+int vnic_dev_del_addr(struct vnic_dev *vdev, const u8 *addr);
int vnic_dev_get_mac_addr(struct vnic_dev *vdev, u8 *mac_addr);
int vnic_dev_notify_set(struct vnic_dev *vdev, u16 intr);
int vnic_dev_notify_unset(struct vnic_dev *vdev);
diff --git a/drivers/net/ethernet/davicom/dm9000.c b/drivers/net/ethernet/davicom/dm9000.c
index 8c4b93be333b..13723c96d1a2 100644
--- a/drivers/net/ethernet/davicom/dm9000.c
+++ b/drivers/net/ethernet/davicom/dm9000.c
@@ -109,6 +109,7 @@ typedef struct board_info {
u8 imr_all;
unsigned int flags;
+ unsigned int in_timeout:1;
unsigned int in_suspend:1;
unsigned int wake_supported:1;
@@ -187,13 +188,13 @@ dm9000_reset(board_info_t *db)
* The essential point is that we have to do a double reset, and the
* instruction is to set LBK into MAC internal loopback mode.
*/
- iow(db, DM9000_NCR, 0x03);
+ iow(db, DM9000_NCR, NCR_RST | NCR_MAC_LBK);
udelay(100); /* Application note says at least 20 us */
if (ior(db, DM9000_NCR) & 1)
dev_err(db->dev, "dm9000 did not respond to first reset\n");
iow(db, DM9000_NCR, 0);
- iow(db, DM9000_NCR, 0x03);
+ iow(db, DM9000_NCR, NCR_RST | NCR_MAC_LBK);
udelay(100);
if (ior(db, DM9000_NCR) & 1)
dev_err(db->dev, "dm9000 did not respond to second reset\n");
@@ -273,7 +274,7 @@ static void dm9000_dumpblk_32bit(void __iomem *reg, int count)
*/
static void dm9000_msleep(board_info_t *db, unsigned int ms)
{
- if (db->in_suspend)
+ if (db->in_suspend || db->in_timeout)
mdelay(ms);
else
msleep(ms);
@@ -334,7 +335,8 @@ dm9000_phy_write(struct net_device *dev,
unsigned long reg_save;
dm9000_dbg(db, 5, "phy_write[%02x] = %04x\n", reg, value);
- mutex_lock(&db->addr_lock);
+ if (!db->in_timeout)
+ mutex_lock(&db->addr_lock);
spin_lock_irqsave(&db->lock, flags);
@@ -365,7 +367,8 @@ dm9000_phy_write(struct net_device *dev,
writeb(reg_save, db->io_addr);
spin_unlock_irqrestore(&db->lock, flags);
- mutex_unlock(&db->addr_lock);
+ if (!db->in_timeout)
+ mutex_unlock(&db->addr_lock);
}
/* dm9000_set_io
@@ -882,6 +885,18 @@ dm9000_hash_table(struct net_device *dev)
spin_unlock_irqrestore(&db->lock, flags);
}
+static void
+dm9000_mask_interrupts(board_info_t *db)
+{
+ iow(db, DM9000_IMR, IMR_PAR);
+}
+
+static void
+dm9000_unmask_interrupts(board_info_t *db)
+{
+ iow(db, DM9000_IMR, db->imr_all);
+}
+
/*
* Initialize dm9000 board
*/
@@ -894,6 +909,9 @@ dm9000_init_dm9000(struct net_device *dev)
dm9000_dbg(db, 1, "entering %s\n", __func__);
+ dm9000_reset(db);
+ dm9000_mask_interrupts(db);
+
/* I/O mode */
db->io_mode = ior(db, DM9000_ISR) >> 6; /* ISR bit7:6 keeps I/O mode */
@@ -941,9 +959,6 @@ dm9000_init_dm9000(struct net_device *dev)
db->imr_all = imr;
- /* Enable TX/RX interrupt mask */
- iow(db, DM9000_IMR, imr);
-
/* Init Driver variable */
db->tx_pkt_cnt = 0;
db->queue_pkt_len = 0;
@@ -959,17 +974,19 @@ static void dm9000_timeout(struct net_device *dev)
/* Save previous register address */
spin_lock_irqsave(&db->lock, flags);
+ db->in_timeout = 1;
reg_save = readb(db->io_addr);
netif_stop_queue(dev);
- dm9000_reset(db);
dm9000_init_dm9000(dev);
+ dm9000_unmask_interrupts(db);
/* We can accept TX packets again */
dev->trans_start = jiffies; /* prevent tx timeout */
netif_wake_queue(dev);
/* Restore previous register address */
writeb(reg_save, db->io_addr);
+ db->in_timeout = 0;
spin_unlock_irqrestore(&db->lock, flags);
}
@@ -1093,7 +1110,6 @@ dm9000_rx(struct net_device *dev)
if (rxbyte & DM9000_PKT_ERR) {
dev_warn(db->dev, "status check fail: %d\n", rxbyte);
iow(db, DM9000_RCR, 0x00); /* Stop Device */
- iow(db, DM9000_ISR, IMR_PAR); /* Stop INT request */
return;
}
@@ -1193,9 +1209,7 @@ static irqreturn_t dm9000_interrupt(int irq, void *dev_id)
/* Save previous register address */
reg_save = readb(db->io_addr);
- /* Disable all interrupts */
- iow(db, DM9000_IMR, IMR_PAR);
-
+ dm9000_mask_interrupts(db);
/* Got DM9000 interrupt status */
int_status = ior(db, DM9000_ISR); /* Got ISR */
iow(db, DM9000_ISR, int_status); /* Clear ISR status */
@@ -1218,9 +1232,7 @@ static irqreturn_t dm9000_interrupt(int irq, void *dev_id)
}
}
- /* Re-enable interrupt mask */
- iow(db, DM9000_IMR, db->imr_all);
-
+ dm9000_unmask_interrupts(db);
/* Restore previous register address */
writeb(reg_save, db->io_addr);
@@ -1292,6 +1304,9 @@ dm9000_open(struct net_device *dev)
* may work, and tell the user that this is a problem */
if (irqflags == IRQF_TRIGGER_NONE)
+ irqflags = irq_get_trigger_type(dev->irq);
+
+ if (irqflags == IRQF_TRIGGER_NONE)
dev_warn(db->dev, "WARNING: no IRQ resource flags set.\n");
irqflags |= IRQF_SHARED;
@@ -1301,11 +1316,14 @@ dm9000_open(struct net_device *dev)
mdelay(1); /* delay needs by DM9000B */
/* Initialize DM9000 board */
- dm9000_reset(db);
dm9000_init_dm9000(dev);
if (request_irq(dev->irq, dm9000_interrupt, irqflags, dev->name, dev))
return -EAGAIN;
+ /* Now that we have an interrupt handler hooked up we can unmask
+ * our interrupts
+ */
+ dm9000_unmask_interrupts(db);
/* Init driver variable */
db->dbug_cnt = 0;
@@ -1313,7 +1331,8 @@ dm9000_open(struct net_device *dev)
mii_check_media(&db->mii, netif_msg_link(db), 1);
netif_start_queue(dev);
- dm9000_schedule_poll(db);
+ /* Poll initial link status */
+ schedule_delayed_work(&db->phy_poll, 1);
return 0;
}
@@ -1326,7 +1345,7 @@ dm9000_shutdown(struct net_device *dev)
/* RESET device */
dm9000_phy_write(dev, 0, MII_BMCR, BMCR_RESET); /* PHY RESET */
iow(db, DM9000_GPR, 0x01); /* Power-Down PHY */
- iow(db, DM9000_IMR, IMR_PAR); /* Disable all interrupt */
+ dm9000_mask_interrupts(db);
iow(db, DM9000_RCR, 0x00); /* Disable RX */
}
@@ -1547,12 +1566,7 @@ dm9000_probe(struct platform_device *pdev)
db->flags |= DM9000_PLATF_SIMPLE_PHY;
#endif
- /* Fixing bug on dm9000_probe, takeover dm9000_reset(db),
- * Need 'NCR_MAC_LBK' bit to indeed stable our DM9000 fifo
- * while probe stage.
- */
-
- iow(db, DM9000_NCR, NCR_MAC_LBK | NCR_RST);
+ dm9000_reset(db);
/* try multiple times, DM9000 sometimes gets the read wrong */
for (i = 0; i < 8; i++) {
@@ -1695,8 +1709,8 @@ dm9000_drv_resume(struct device *dev)
/* reset if we were not in wake mode to ensure if
* the device was powered off it is in a known state */
if (!db->wake_state) {
- dm9000_reset(db);
dm9000_init_dm9000(ndev);
+ dm9000_unmask_interrupts(db);
}
netif_device_attach(ndev);
diff --git a/drivers/net/ethernet/dec/tulip/tulip_core.c b/drivers/net/ethernet/dec/tulip/tulip_core.c
index 1642de78aac8..861660841ce2 100644
--- a/drivers/net/ethernet/dec/tulip/tulip_core.c
+++ b/drivers/net/ethernet/dec/tulip/tulip_core.c
@@ -1703,7 +1703,7 @@ static int tulip_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
#ifdef CONFIG_TULIP_NAPI
netif_napi_add(dev, &tp->napi, tulip_poll, 16);
#endif
- SET_ETHTOOL_OPS(dev, &ops);
+ dev->ethtool_ops = &ops;
if (register_netdev(dev))
goto err_out_free_ring;
diff --git a/drivers/net/ethernet/dec/tulip/uli526x.c b/drivers/net/ethernet/dec/tulip/uli526x.c
index aa801a6af7b9..80afec335a11 100644
--- a/drivers/net/ethernet/dec/tulip/uli526x.c
+++ b/drivers/net/ethernet/dec/tulip/uli526x.c
@@ -962,8 +962,8 @@ ULi_ethtool_gset(struct uli526x_board_info *db, struct ethtool_cmd *ecmd)
}
if(db->link_failed)
{
- ethtool_cmd_speed_set(ecmd, -1);
- ecmd->duplex = -1;
+ ethtool_cmd_speed_set(ecmd, SPEED_UNKNOWN);
+ ecmd->duplex = DUPLEX_UNKNOWN;
}
if (db->media_mode & ULI526X_AUTO)
diff --git a/drivers/net/ethernet/dlink/dl2k.c b/drivers/net/ethernet/dlink/dl2k.c
index 4fb756d219f7..1274b6fdac8a 100644
--- a/drivers/net/ethernet/dlink/dl2k.c
+++ b/drivers/net/ethernet/dlink/dl2k.c
@@ -227,7 +227,7 @@ rio_probe1 (struct pci_dev *pdev, const struct pci_device_id *ent)
}
dev->netdev_ops = &netdev_ops;
dev->watchdog_timeo = TX_TIMEOUT;
- SET_ETHTOOL_OPS(dev, &ethtool_ops);
+ dev->ethtool_ops = &ethtool_ops;
#if 0
dev->features = NETIF_F_IP_CSUM;
#endif
@@ -1185,8 +1185,8 @@ static int rio_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
ethtool_cmd_speed_set(cmd, np->speed);
cmd->duplex = np->full_duplex ? DUPLEX_FULL : DUPLEX_HALF;
} else {
- ethtool_cmd_speed_set(cmd, -1);
- cmd->duplex = -1;
+ ethtool_cmd_speed_set(cmd, SPEED_UNKNOWN);
+ cmd->duplex = DUPLEX_UNKNOWN;
}
if ( np->an_enable)
cmd->autoneg = AUTONEG_ENABLE;
diff --git a/drivers/net/ethernet/dlink/sundance.c b/drivers/net/ethernet/dlink/sundance.c
index d9e5ca0d48c1..433c1e185442 100644
--- a/drivers/net/ethernet/dlink/sundance.c
+++ b/drivers/net/ethernet/dlink/sundance.c
@@ -577,7 +577,7 @@ static int sundance_probe1(struct pci_dev *pdev,
/* The chip-specific entries in the device structure. */
dev->netdev_ops = &netdev_ops;
- SET_ETHTOOL_OPS(dev, &ethtool_ops);
+ dev->ethtool_ops = &ethtool_ops;
dev->watchdog_timeo = TX_TIMEOUT;
pci_set_drvdata(pdev, dev);
diff --git a/drivers/net/ethernet/ec_bhf.c b/drivers/net/ethernet/ec_bhf.c
index 4884205e56ee..056b44b93477 100644
--- a/drivers/net/ethernet/ec_bhf.c
+++ b/drivers/net/ethernet/ec_bhf.c
@@ -134,17 +134,17 @@ struct ec_bhf_priv {
struct pci_dev *dev;
- void * __iomem io;
- void * __iomem dma_io;
+ void __iomem *io;
+ void __iomem *dma_io;
struct hrtimer hrtimer;
int tx_dma_chan;
int rx_dma_chan;
- void * __iomem ec_io;
- void * __iomem fifo_io;
- void * __iomem mii_io;
- void * __iomem mac_io;
+ void __iomem *ec_io;
+ void __iomem *fifo_io;
+ void __iomem *mii_io;
+ void __iomem *mac_io;
struct bhf_dma rx_buf;
struct rx_desc *rx_descs;
@@ -297,7 +297,7 @@ static int ec_bhf_setup_offsets(struct ec_bhf_priv *priv)
{
struct device *dev = PRIV_TO_DEV(priv);
unsigned block_count, i;
- void * __iomem ec_info;
+ void __iomem *ec_info;
dev_dbg(dev, "Info block:\n");
dev_dbg(dev, "Type of function: %x\n", (unsigned)ioread16(priv->io));
@@ -569,8 +569,8 @@ static int ec_bhf_probe(struct pci_dev *dev, const struct pci_device_id *id)
{
struct net_device *net_dev;
struct ec_bhf_priv *priv;
- void * __iomem dma_io;
- void * __iomem io;
+ void __iomem *dma_io;
+ void __iomem *io;
int err = 0;
err = pci_enable_device(dev);
@@ -615,7 +615,7 @@ static int ec_bhf_probe(struct pci_dev *dev, const struct pci_device_id *id)
}
net_dev = alloc_etherdev(sizeof(struct ec_bhf_priv));
- if (net_dev == 0) {
+ if (net_dev == NULL) {
err = -ENOMEM;
goto err_unmap_dma_io;
}
diff --git a/drivers/net/ethernet/emulex/benet/be.h b/drivers/net/ethernet/emulex/benet/be.h
index 97db5a7179df..2e7c5553955e 100644
--- a/drivers/net/ethernet/emulex/benet/be.h
+++ b/drivers/net/ethernet/emulex/benet/be.h
@@ -120,6 +120,9 @@ static inline char *nic_name(struct pci_dev *pdev)
#define MAX_VFS 30 /* Max VFs supported by BE3 FW */
#define FW_VER_LEN 32
+#define RSS_INDIR_TABLE_LEN 128
+#define RSS_HASH_KEY_LEN 40
+
struct be_dma_mem {
void *va;
dma_addr_t dma;
@@ -371,6 +374,7 @@ enum vf_state {
#define BE_FLAGS_LINK_STATUS_INIT 1
#define BE_FLAGS_WORKER_SCHEDULED (1 << 3)
#define BE_FLAGS_VLAN_PROMISC (1 << 4)
+#define BE_FLAGS_MCAST_PROMISC (1 << 5)
#define BE_FLAGS_NAPI_ENABLED (1 << 9)
#define BE_FLAGS_QNQ_ASYNC_EVT_RCVD (1 << 11)
#define BE_FLAGS_VXLAN_OFFLOADS (1 << 12)
@@ -409,6 +413,13 @@ struct be_resources {
u32 if_cap_flags;
};
+struct rss_info {
+ u64 rss_flags;
+ u8 rsstable[RSS_INDIR_TABLE_LEN];
+ u8 rss_queue[RSS_INDIR_TABLE_LEN];
+ u8 rss_hkey[RSS_HASH_KEY_LEN];
+};
+
struct be_adapter {
struct pci_dev *pdev;
struct net_device *netdev;
@@ -445,7 +456,7 @@ struct be_adapter {
struct be_drv_stats drv_stats;
struct be_aic_obj aic_obj[MAX_EVT_QS];
u16 vlans_added;
- u8 vlan_tag[VLAN_N_VID];
+ unsigned long vids[BITS_TO_LONGS(VLAN_N_VID)];
u8 vlan_prio_bmap; /* Available Priority BitMap */
u16 recommended_prio; /* Recommended Priority */
struct be_dma_mem rx_filter; /* Cmd DMA mem for rx-filter */
@@ -507,7 +518,7 @@ struct be_adapter {
u32 msg_enable;
int be_get_temp_freq;
u8 pf_number;
- u64 rss_flags;
+ struct rss_info rss_info;
};
#define be_physfn(adapter) (!adapter->virtfn)
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c
index d1ec15af0d24..f4ea3490f446 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.c
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.c
@@ -52,8 +52,7 @@ static struct be_cmd_priv_map cmd_priv_map[] = {
}
};
-static bool be_cmd_allowed(struct be_adapter *adapter, u8 opcode,
- u8 subsystem)
+static bool be_cmd_allowed(struct be_adapter *adapter, u8 opcode, u8 subsystem)
{
int i;
int num_entries = sizeof(cmd_priv_map)/sizeof(struct be_cmd_priv_map);
@@ -120,21 +119,28 @@ static struct be_cmd_resp_hdr *be_decode_resp_hdr(u32 tag0, u32 tag1)
return (void *)addr;
}
-static int be_mcc_compl_process(struct be_adapter *adapter,
- struct be_mcc_compl *compl)
+static bool be_skip_err_log(u8 opcode, u16 base_status, u16 addl_status)
{
- u16 compl_status, extd_status;
- struct be_cmd_resp_hdr *resp_hdr;
- u8 opcode = 0, subsystem = 0;
-
- /* Just swap the status to host endian; mcc tag is opaquely copied
- * from mcc_wrb */
- be_dws_le_to_cpu(compl, 4);
-
- compl_status = (compl->status >> CQE_STATUS_COMPL_SHIFT) &
- CQE_STATUS_COMPL_MASK;
+ if (base_status == MCC_STATUS_NOT_SUPPORTED ||
+ base_status == MCC_STATUS_ILLEGAL_REQUEST ||
+ addl_status == MCC_ADDL_STATUS_TOO_MANY_INTERFACES ||
+ (opcode == OPCODE_COMMON_WRITE_FLASHROM &&
+ (base_status == MCC_STATUS_ILLEGAL_FIELD ||
+ addl_status == MCC_ADDL_STATUS_FLASH_IMAGE_CRC_MISMATCH)))
+ return true;
+ else
+ return false;
+}
- resp_hdr = be_decode_resp_hdr(compl->tag0, compl->tag1);
+/* Place holder for all the async MCC cmds wherein the caller is not in a busy
+ * loop (has not issued be_mcc_notify_wait())
+ */
+static void be_async_cmd_process(struct be_adapter *adapter,
+ struct be_mcc_compl *compl,
+ struct be_cmd_resp_hdr *resp_hdr)
+{
+ enum mcc_base_status base_status = base_status(compl->status);
+ u8 opcode = 0, subsystem = 0;
if (resp_hdr) {
opcode = resp_hdr->opcode;
@@ -144,61 +150,86 @@ static int be_mcc_compl_process(struct be_adapter *adapter,
if (opcode == OPCODE_LOWLEVEL_LOOPBACK_TEST &&
subsystem == CMD_SUBSYSTEM_LOWLEVEL) {
complete(&adapter->et_cmd_compl);
- return 0;
+ return;
}
- if (((opcode == OPCODE_COMMON_WRITE_FLASHROM) ||
- (opcode == OPCODE_COMMON_WRITE_OBJECT)) &&
- (subsystem == CMD_SUBSYSTEM_COMMON)) {
- adapter->flash_status = compl_status;
+ if ((opcode == OPCODE_COMMON_WRITE_FLASHROM ||
+ opcode == OPCODE_COMMON_WRITE_OBJECT) &&
+ subsystem == CMD_SUBSYSTEM_COMMON) {
+ adapter->flash_status = compl->status;
complete(&adapter->et_cmd_compl);
+ return;
}
- if (compl_status == MCC_STATUS_SUCCESS) {
- if (((opcode == OPCODE_ETH_GET_STATISTICS) ||
- (opcode == OPCODE_ETH_GET_PPORT_STATS)) &&
- (subsystem == CMD_SUBSYSTEM_ETH)) {
- be_parse_stats(adapter);
- adapter->stats_cmd_sent = false;
- }
- if (opcode == OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES &&
- subsystem == CMD_SUBSYSTEM_COMMON) {
+ if ((opcode == OPCODE_ETH_GET_STATISTICS ||
+ opcode == OPCODE_ETH_GET_PPORT_STATS) &&
+ subsystem == CMD_SUBSYSTEM_ETH &&
+ base_status == MCC_STATUS_SUCCESS) {
+ be_parse_stats(adapter);
+ adapter->stats_cmd_sent = false;
+ return;
+ }
+
+ if (opcode == OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES &&
+ subsystem == CMD_SUBSYSTEM_COMMON) {
+ if (base_status == MCC_STATUS_SUCCESS) {
struct be_cmd_resp_get_cntl_addnl_attribs *resp =
- (void *)resp_hdr;
+ (void *)resp_hdr;
adapter->drv_stats.be_on_die_temperature =
- resp->on_die_temperature;
- }
- } else {
- if (opcode == OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES)
+ resp->on_die_temperature;
+ } else {
adapter->be_get_temp_freq = 0;
+ }
+ return;
+ }
+}
+
+static int be_mcc_compl_process(struct be_adapter *adapter,
+ struct be_mcc_compl *compl)
+{
+ enum mcc_base_status base_status;
+ enum mcc_addl_status addl_status;
+ struct be_cmd_resp_hdr *resp_hdr;
+ u8 opcode = 0, subsystem = 0;
+
+ /* Just swap the status to host endian; mcc tag is opaquely copied
+ * from mcc_wrb */
+ be_dws_le_to_cpu(compl, 4);
+
+ base_status = base_status(compl->status);
+ addl_status = addl_status(compl->status);
+
+ resp_hdr = be_decode_resp_hdr(compl->tag0, compl->tag1);
+ if (resp_hdr) {
+ opcode = resp_hdr->opcode;
+ subsystem = resp_hdr->subsystem;
+ }
+
+ be_async_cmd_process(adapter, compl, resp_hdr);
- if (compl_status == MCC_STATUS_NOT_SUPPORTED ||
- compl_status == MCC_STATUS_ILLEGAL_REQUEST)
- goto done;
+ if (base_status != MCC_STATUS_SUCCESS &&
+ !be_skip_err_log(opcode, base_status, addl_status)) {
- if (compl_status == MCC_STATUS_UNAUTHORIZED_REQUEST) {
+ if (base_status == MCC_STATUS_UNAUTHORIZED_REQUEST) {
dev_warn(&adapter->pdev->dev,
"VF is not privileged to issue opcode %d-%d\n",
opcode, subsystem);
} else {
- extd_status = (compl->status >> CQE_STATUS_EXTD_SHIFT) &
- CQE_STATUS_EXTD_MASK;
dev_err(&adapter->pdev->dev,
"opcode %d-%d failed:status %d-%d\n",
- opcode, subsystem, compl_status, extd_status);
-
- if (extd_status == MCC_ADDL_STS_INSUFFICIENT_RESOURCES)
- return extd_status;
+ opcode, subsystem, base_status, addl_status);
}
}
-done:
- return compl_status;
+ return compl->status;
}
/* Link state evt is a string of bytes; no need for endian swapping */
static void be_async_link_state_process(struct be_adapter *adapter,
- struct be_async_event_link_state *evt)
+ struct be_mcc_compl *compl)
{
+ struct be_async_event_link_state *evt =
+ (struct be_async_event_link_state *)compl;
+
/* When link status changes, link speed must be re-queried from FW */
adapter->phy.link_speed = -1;
@@ -221,8 +252,11 @@ static void be_async_link_state_process(struct be_adapter *adapter,
/* Grp5 CoS Priority evt */
static void be_async_grp5_cos_priority_process(struct be_adapter *adapter,
- struct be_async_event_grp5_cos_priority *evt)
+ struct be_mcc_compl *compl)
{
+ struct be_async_event_grp5_cos_priority *evt =
+ (struct be_async_event_grp5_cos_priority *)compl;
+
if (evt->valid) {
adapter->vlan_prio_bmap = evt->available_priority_bmap;
adapter->recommended_prio &= ~VLAN_PRIO_MASK;
@@ -233,8 +267,11 @@ static void be_async_grp5_cos_priority_process(struct be_adapter *adapter,
/* Grp5 QOS Speed evt: qos_link_speed is in units of 10 Mbps */
static void be_async_grp5_qos_speed_process(struct be_adapter *adapter,
- struct be_async_event_grp5_qos_link_speed *evt)
+ struct be_mcc_compl *compl)
{
+ struct be_async_event_grp5_qos_link_speed *evt =
+ (struct be_async_event_grp5_qos_link_speed *)compl;
+
if (adapter->phy.link_speed >= 0 &&
evt->physical_port == adapter->port_num)
adapter->phy.link_speed = le16_to_cpu(evt->qos_link_speed) * 10;
@@ -242,8 +279,11 @@ static void be_async_grp5_qos_speed_process(struct be_adapter *adapter,
/*Grp5 PVID evt*/
static void be_async_grp5_pvid_state_process(struct be_adapter *adapter,
- struct be_async_event_grp5_pvid_state *evt)
+ struct be_mcc_compl *compl)
{
+ struct be_async_event_grp5_pvid_state *evt =
+ (struct be_async_event_grp5_pvid_state *)compl;
+
if (evt->enabled) {
adapter->pvid = le16_to_cpu(evt->tag) & VLAN_VID_MASK;
dev_info(&adapter->pdev->dev, "LPVID: %d\n", adapter->pvid);
@@ -253,26 +293,21 @@ static void be_async_grp5_pvid_state_process(struct be_adapter *adapter,
}
static void be_async_grp5_evt_process(struct be_adapter *adapter,
- u32 trailer, struct be_mcc_compl *evt)
+ struct be_mcc_compl *compl)
{
- u8 event_type = 0;
-
- event_type = (trailer >> ASYNC_TRAILER_EVENT_TYPE_SHIFT) &
- ASYNC_TRAILER_EVENT_TYPE_MASK;
+ u8 event_type = (compl->flags >> ASYNC_EVENT_TYPE_SHIFT) &
+ ASYNC_EVENT_TYPE_MASK;
switch (event_type) {
case ASYNC_EVENT_COS_PRIORITY:
- be_async_grp5_cos_priority_process(adapter,
- (struct be_async_event_grp5_cos_priority *)evt);
- break;
+ be_async_grp5_cos_priority_process(adapter, compl);
+ break;
case ASYNC_EVENT_QOS_SPEED:
- be_async_grp5_qos_speed_process(adapter,
- (struct be_async_event_grp5_qos_link_speed *)evt);
- break;
+ be_async_grp5_qos_speed_process(adapter, compl);
+ break;
case ASYNC_EVENT_PVID_STATE:
- be_async_grp5_pvid_state_process(adapter,
- (struct be_async_event_grp5_pvid_state *)evt);
- break;
+ be_async_grp5_pvid_state_process(adapter, compl);
+ break;
default:
dev_warn(&adapter->pdev->dev, "Unknown grp5 event 0x%x!\n",
event_type);
@@ -281,13 +316,13 @@ static void be_async_grp5_evt_process(struct be_adapter *adapter,
}
static void be_async_dbg_evt_process(struct be_adapter *adapter,
- u32 trailer, struct be_mcc_compl *cmp)
+ struct be_mcc_compl *cmp)
{
u8 event_type = 0;
struct be_async_event_qnq *evt = (struct be_async_event_qnq *) cmp;
- event_type = (trailer >> ASYNC_TRAILER_EVENT_TYPE_SHIFT) &
- ASYNC_TRAILER_EVENT_TYPE_MASK;
+ event_type = (cmp->flags >> ASYNC_EVENT_TYPE_SHIFT) &
+ ASYNC_EVENT_TYPE_MASK;
switch (event_type) {
case ASYNC_DEBUG_EVENT_TYPE_QNQ:
@@ -302,25 +337,33 @@ static void be_async_dbg_evt_process(struct be_adapter *adapter,
}
}
-static inline bool is_link_state_evt(u32 trailer)
+static inline bool is_link_state_evt(u32 flags)
{
- return ((trailer >> ASYNC_TRAILER_EVENT_CODE_SHIFT) &
- ASYNC_TRAILER_EVENT_CODE_MASK) ==
- ASYNC_EVENT_CODE_LINK_STATE;
+ return ((flags >> ASYNC_EVENT_CODE_SHIFT) & ASYNC_EVENT_CODE_MASK) ==
+ ASYNC_EVENT_CODE_LINK_STATE;
}
-static inline bool is_grp5_evt(u32 trailer)
+static inline bool is_grp5_evt(u32 flags)
{
- return (((trailer >> ASYNC_TRAILER_EVENT_CODE_SHIFT) &
- ASYNC_TRAILER_EVENT_CODE_MASK) ==
- ASYNC_EVENT_CODE_GRP_5);
+ return ((flags >> ASYNC_EVENT_CODE_SHIFT) & ASYNC_EVENT_CODE_MASK) ==
+ ASYNC_EVENT_CODE_GRP_5;
}
-static inline bool is_dbg_evt(u32 trailer)
+static inline bool is_dbg_evt(u32 flags)
{
- return (((trailer >> ASYNC_TRAILER_EVENT_CODE_SHIFT) &
- ASYNC_TRAILER_EVENT_CODE_MASK) ==
- ASYNC_EVENT_CODE_QNQ);
+ return ((flags >> ASYNC_EVENT_CODE_SHIFT) & ASYNC_EVENT_CODE_MASK) ==
+ ASYNC_EVENT_CODE_QNQ;
+}
+
+static void be_mcc_event_process(struct be_adapter *adapter,
+ struct be_mcc_compl *compl)
+{
+ if (is_link_state_evt(compl->flags))
+ be_async_link_state_process(adapter, compl);
+ else if (is_grp5_evt(compl->flags))
+ be_async_grp5_evt_process(adapter, compl);
+ else if (is_dbg_evt(compl->flags))
+ be_async_dbg_evt_process(adapter, compl);
}
static struct be_mcc_compl *be_mcc_compl_get(struct be_adapter *adapter)
@@ -362,21 +405,13 @@ int be_process_mcc(struct be_adapter *adapter)
struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
spin_lock(&adapter->mcc_cq_lock);
+
while ((compl = be_mcc_compl_get(adapter))) {
if (compl->flags & CQE_FLAGS_ASYNC_MASK) {
- /* Interpret flags as an async trailer */
- if (is_link_state_evt(compl->flags))
- be_async_link_state_process(adapter,
- (struct be_async_event_link_state *) compl);
- else if (is_grp5_evt(compl->flags))
- be_async_grp5_evt_process(adapter,
- compl->flags, compl);
- else if (is_dbg_evt(compl->flags))
- be_async_dbg_evt_process(adapter,
- compl->flags, compl);
+ be_mcc_event_process(adapter, compl);
} else if (compl->flags & CQE_FLAGS_COMPLETED_MASK) {
- status = be_mcc_compl_process(adapter, compl);
- atomic_dec(&mcc_obj->q.used);
+ status = be_mcc_compl_process(adapter, compl);
+ atomic_dec(&mcc_obj->q.used);
}
be_mcc_compl_use(compl);
num++;
@@ -436,7 +471,9 @@ static int be_mcc_notify_wait(struct be_adapter *adapter)
if (status == -EIO)
goto out;
- status = resp->status;
+ status = (resp->base_status |
+ ((resp->addl_status & CQE_ADDL_STATUS_MASK) <<
+ CQE_ADDL_STATUS_SHIFT));
out:
return status;
}
@@ -560,10 +597,8 @@ static bool lancer_provisioning_error(struct be_adapter *adapter)
u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
- sliport_err1 = ioread32(adapter->db +
- SLIPORT_ERROR1_OFFSET);
- sliport_err2 = ioread32(adapter->db +
- SLIPORT_ERROR2_OFFSET);
+ sliport_err1 = ioread32(adapter->db + SLIPORT_ERROR1_OFFSET);
+ sliport_err2 = ioread32(adapter->db + SLIPORT_ERROR2_OFFSET);
if (sliport_err1 == SLIPORT_ERROR_NO_RESOURCE1 &&
sliport_err2 == SLIPORT_ERROR_NO_RESOURCE2)
@@ -630,8 +665,7 @@ int be_fw_wait_ready(struct be_adapter *adapter)
if (stage == POST_STAGE_ARMFW_RDY)
return 0;
- dev_info(dev, "Waiting for POST, %ds elapsed\n",
- timeout);
+ dev_info(dev, "Waiting for POST, %ds elapsed\n", timeout);
if (msleep_interruptible(2000)) {
dev_err(dev, "Waiting for POST aborted\n");
return -EINTR;
@@ -649,8 +683,7 @@ static inline struct be_sge *nonembedded_sgl(struct be_mcc_wrb *wrb)
return &wrb->payload.sgl[0];
}
-static inline void fill_wrb_tags(struct be_mcc_wrb *wrb,
- unsigned long addr)
+static inline void fill_wrb_tags(struct be_mcc_wrb *wrb, unsigned long addr)
{
wrb->tag0 = addr & 0xFFFFFFFF;
wrb->tag1 = upper_32_bits(addr);
@@ -659,8 +692,9 @@ static inline void fill_wrb_tags(struct be_mcc_wrb *wrb,
/* Don't touch the hdr after it's prepared */
/* mem will be NULL for embedded commands */
static void be_wrb_cmd_hdr_prepare(struct be_cmd_req_hdr *req_hdr,
- u8 subsystem, u8 opcode, int cmd_len,
- struct be_mcc_wrb *wrb, struct be_dma_mem *mem)
+ u8 subsystem, u8 opcode, int cmd_len,
+ struct be_mcc_wrb *wrb,
+ struct be_dma_mem *mem)
{
struct be_sge *sge;
@@ -683,7 +717,7 @@ static void be_wrb_cmd_hdr_prepare(struct be_cmd_req_hdr *req_hdr,
}
static void be_cmd_page_addrs_prepare(struct phys_addr *pages, u32 max_pages,
- struct be_dma_mem *mem)
+ struct be_dma_mem *mem)
{
int i, buf_pages = min(PAGES_4K_SPANNED(mem->va, mem->size), max_pages);
u64 dma = (u64)mem->dma;
@@ -868,7 +902,8 @@ int be_cmd_eq_create(struct be_adapter *adapter, struct be_eq_obj *eqo)
req = embedded_payload(wrb);
be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
- OPCODE_COMMON_EQ_CREATE, sizeof(*req), wrb, NULL);
+ OPCODE_COMMON_EQ_CREATE, sizeof(*req), wrb,
+ NULL);
/* Support for EQ_CREATEv2 available only SH-R onwards */
if (!(BEx_chip(adapter) || lancer_chip(adapter)))
@@ -917,7 +952,8 @@ int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr,
req = embedded_payload(wrb);
be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
- OPCODE_COMMON_NTWK_MAC_QUERY, sizeof(*req), wrb, NULL);
+ OPCODE_COMMON_NTWK_MAC_QUERY, sizeof(*req), wrb,
+ NULL);
req->type = MAC_ADDRESS_TYPE_NETWORK;
if (permanent) {
req->permanent = 1;
@@ -940,7 +976,7 @@ err:
/* Uses synchronous MCCQ */
int be_cmd_pmac_add(struct be_adapter *adapter, u8 *mac_addr,
- u32 if_id, u32 *pmac_id, u32 domain)
+ u32 if_id, u32 *pmac_id, u32 domain)
{
struct be_mcc_wrb *wrb;
struct be_cmd_req_pmac_add *req;
@@ -956,7 +992,8 @@ int be_cmd_pmac_add(struct be_adapter *adapter, u8 *mac_addr,
req = embedded_payload(wrb);
be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
- OPCODE_COMMON_NTWK_PMAC_ADD, sizeof(*req), wrb, NULL);
+ OPCODE_COMMON_NTWK_PMAC_ADD, sizeof(*req), wrb,
+ NULL);
req->hdr.domain = domain;
req->if_id = cpu_to_le32(if_id);
@@ -1012,7 +1049,7 @@ err:
/* Uses Mbox */
int be_cmd_cq_create(struct be_adapter *adapter, struct be_queue_info *cq,
- struct be_queue_info *eq, bool no_delay, int coalesce_wm)
+ struct be_queue_info *eq, bool no_delay, int coalesce_wm)
{
struct be_mcc_wrb *wrb;
struct be_cmd_req_cq_create *req;
@@ -1028,17 +1065,18 @@ int be_cmd_cq_create(struct be_adapter *adapter, struct be_queue_info *cq,
ctxt = &req->context;
be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
- OPCODE_COMMON_CQ_CREATE, sizeof(*req), wrb, NULL);
+ OPCODE_COMMON_CQ_CREATE, sizeof(*req), wrb,
+ NULL);
req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
if (BEx_chip(adapter)) {
AMAP_SET_BITS(struct amap_cq_context_be, coalescwm, ctxt,
- coalesce_wm);
+ coalesce_wm);
AMAP_SET_BITS(struct amap_cq_context_be, nodelay,
- ctxt, no_delay);
+ ctxt, no_delay);
AMAP_SET_BITS(struct amap_cq_context_be, count, ctxt,
- __ilog2_u32(cq->len/256));
+ __ilog2_u32(cq->len / 256));
AMAP_SET_BITS(struct amap_cq_context_be, valid, ctxt, 1);
AMAP_SET_BITS(struct amap_cq_context_be, eventable, ctxt, 1);
AMAP_SET_BITS(struct amap_cq_context_be, eqid, ctxt, eq->id);
@@ -1053,14 +1091,12 @@ int be_cmd_cq_create(struct be_adapter *adapter, struct be_queue_info *cq,
AMAP_SET_BITS(struct amap_cq_context_v2, coalescwm,
ctxt, coalesce_wm);
AMAP_SET_BITS(struct amap_cq_context_v2, nodelay, ctxt,
- no_delay);
+ no_delay);
AMAP_SET_BITS(struct amap_cq_context_v2, count, ctxt,
- __ilog2_u32(cq->len/256));
+ __ilog2_u32(cq->len / 256));
AMAP_SET_BITS(struct amap_cq_context_v2, valid, ctxt, 1);
- AMAP_SET_BITS(struct amap_cq_context_v2, eventable,
- ctxt, 1);
- AMAP_SET_BITS(struct amap_cq_context_v2, eqid,
- ctxt, eq->id);
+ AMAP_SET_BITS(struct amap_cq_context_v2, eventable, ctxt, 1);
+ AMAP_SET_BITS(struct amap_cq_context_v2, eqid, ctxt, eq->id);
}
be_dws_cpu_to_le(ctxt, sizeof(req->context));
@@ -1088,8 +1124,8 @@ static u32 be_encoded_q_len(int q_len)
}
static int be_cmd_mccq_ext_create(struct be_adapter *adapter,
- struct be_queue_info *mccq,
- struct be_queue_info *cq)
+ struct be_queue_info *mccq,
+ struct be_queue_info *cq)
{
struct be_mcc_wrb *wrb;
struct be_cmd_req_mcc_ext_create *req;
@@ -1105,13 +1141,14 @@ static int be_cmd_mccq_ext_create(struct be_adapter *adapter,
ctxt = &req->context;
be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
- OPCODE_COMMON_MCC_CREATE_EXT, sizeof(*req), wrb, NULL);
+ OPCODE_COMMON_MCC_CREATE_EXT, sizeof(*req), wrb,
+ NULL);
req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
if (BEx_chip(adapter)) {
AMAP_SET_BITS(struct amap_mcc_context_be, valid, ctxt, 1);
AMAP_SET_BITS(struct amap_mcc_context_be, ring_size, ctxt,
- be_encoded_q_len(mccq->len));
+ be_encoded_q_len(mccq->len));
AMAP_SET_BITS(struct amap_mcc_context_be, cq_id, ctxt, cq->id);
} else {
req->hdr.version = 1;
@@ -1145,8 +1182,8 @@ static int be_cmd_mccq_ext_create(struct be_adapter *adapter,
}
static int be_cmd_mccq_org_create(struct be_adapter *adapter,
- struct be_queue_info *mccq,
- struct be_queue_info *cq)
+ struct be_queue_info *mccq,
+ struct be_queue_info *cq)
{
struct be_mcc_wrb *wrb;
struct be_cmd_req_mcc_create *req;
@@ -1162,13 +1199,14 @@ static int be_cmd_mccq_org_create(struct be_adapter *adapter,
ctxt = &req->context;
be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
- OPCODE_COMMON_MCC_CREATE, sizeof(*req), wrb, NULL);
+ OPCODE_COMMON_MCC_CREATE, sizeof(*req), wrb,
+ NULL);
req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
AMAP_SET_BITS(struct amap_mcc_context_be, valid, ctxt, 1);
AMAP_SET_BITS(struct amap_mcc_context_be, ring_size, ctxt,
- be_encoded_q_len(mccq->len));
+ be_encoded_q_len(mccq->len));
AMAP_SET_BITS(struct amap_mcc_context_be, cq_id, ctxt, cq->id);
be_dws_cpu_to_le(ctxt, sizeof(req->context));
@@ -1187,8 +1225,7 @@ static int be_cmd_mccq_org_create(struct be_adapter *adapter,
}
int be_cmd_mccq_create(struct be_adapter *adapter,
- struct be_queue_info *mccq,
- struct be_queue_info *cq)
+ struct be_queue_info *mccq, struct be_queue_info *cq)
{
int status;
@@ -1213,7 +1250,7 @@ int be_cmd_txq_create(struct be_adapter *adapter, struct be_tx_obj *txo)
req = embedded_payload(&wrb);
be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
- OPCODE_ETH_TX_CREATE, sizeof(*req), &wrb, NULL);
+ OPCODE_ETH_TX_CREATE, sizeof(*req), &wrb, NULL);
if (lancer_chip(adapter)) {
req->hdr.version = 1;
@@ -1250,8 +1287,8 @@ int be_cmd_txq_create(struct be_adapter *adapter, struct be_tx_obj *txo)
/* Uses MCC */
int be_cmd_rxq_create(struct be_adapter *adapter,
- struct be_queue_info *rxq, u16 cq_id, u16 frag_size,
- u32 if_id, u32 rss, u8 *rss_id)
+ struct be_queue_info *rxq, u16 cq_id, u16 frag_size,
+ u32 if_id, u32 rss, u8 *rss_id)
{
struct be_mcc_wrb *wrb;
struct be_cmd_req_eth_rx_create *req;
@@ -1268,7 +1305,7 @@ int be_cmd_rxq_create(struct be_adapter *adapter,
req = embedded_payload(wrb);
be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
- OPCODE_ETH_RX_CREATE, sizeof(*req), wrb, NULL);
+ OPCODE_ETH_RX_CREATE, sizeof(*req), wrb, NULL);
req->cq_id = cpu_to_le16(cq_id);
req->frag_size = fls(frag_size) - 1;
@@ -1295,7 +1332,7 @@ err:
* Uses Mbox
*/
int be_cmd_q_destroy(struct be_adapter *adapter, struct be_queue_info *q,
- int queue_type)
+ int queue_type)
{
struct be_mcc_wrb *wrb;
struct be_cmd_req_q_destroy *req;
@@ -1334,7 +1371,7 @@ int be_cmd_q_destroy(struct be_adapter *adapter, struct be_queue_info *q,
}
be_wrb_cmd_hdr_prepare(&req->hdr, subsys, opcode, sizeof(*req), wrb,
- NULL);
+ NULL);
req->id = cpu_to_le16(q->id);
status = be_mbox_notify_wait(adapter);
@@ -1361,7 +1398,7 @@ int be_cmd_rxq_destroy(struct be_adapter *adapter, struct be_queue_info *q)
req = embedded_payload(wrb);
be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
- OPCODE_ETH_RX_DESTROY, sizeof(*req), wrb, NULL);
+ OPCODE_ETH_RX_DESTROY, sizeof(*req), wrb, NULL);
req->id = cpu_to_le16(q->id);
status = be_mcc_notify_wait(adapter);
@@ -1384,7 +1421,8 @@ int be_cmd_if_create(struct be_adapter *adapter, u32 cap_flags, u32 en_flags,
req = embedded_payload(&wrb);
be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
- OPCODE_COMMON_NTWK_INTERFACE_CREATE, sizeof(*req), &wrb, NULL);
+ OPCODE_COMMON_NTWK_INTERFACE_CREATE,
+ sizeof(*req), &wrb, NULL);
req->hdr.domain = domain;
req->capability_flags = cpu_to_le32(cap_flags);
req->enable_flags = cpu_to_le32(en_flags);
@@ -1422,7 +1460,8 @@ int be_cmd_if_destroy(struct be_adapter *adapter, int interface_id, u32 domain)
req = embedded_payload(wrb);
be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
- OPCODE_COMMON_NTWK_INTERFACE_DESTROY, sizeof(*req), wrb, NULL);
+ OPCODE_COMMON_NTWK_INTERFACE_DESTROY,
+ sizeof(*req), wrb, NULL);
req->hdr.domain = domain;
req->interface_id = cpu_to_le32(interface_id);
@@ -1452,7 +1491,8 @@ int be_cmd_get_stats(struct be_adapter *adapter, struct be_dma_mem *nonemb_cmd)
hdr = nonemb_cmd->va;
be_wrb_cmd_hdr_prepare(hdr, CMD_SUBSYSTEM_ETH,
- OPCODE_ETH_GET_STATISTICS, nonemb_cmd->size, wrb, nonemb_cmd);
+ OPCODE_ETH_GET_STATISTICS, nonemb_cmd->size, wrb,
+ nonemb_cmd);
/* version 1 of the cmd is not supported only by BE2 */
if (BE2_chip(adapter))
@@ -1472,7 +1512,7 @@ err:
/* Lancer Stats */
int lancer_cmd_get_pport_stats(struct be_adapter *adapter,
- struct be_dma_mem *nonemb_cmd)
+ struct be_dma_mem *nonemb_cmd)
{
struct be_mcc_wrb *wrb;
@@ -1493,8 +1533,8 @@ int lancer_cmd_get_pport_stats(struct be_adapter *adapter,
req = nonemb_cmd->va;
be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
- OPCODE_ETH_GET_PPORT_STATS, nonemb_cmd->size, wrb,
- nonemb_cmd);
+ OPCODE_ETH_GET_PPORT_STATS, nonemb_cmd->size,
+ wrb, nonemb_cmd);
req->cmd_params.params.pport_num = cpu_to_le16(adapter->hba_port_num);
req->cmd_params.params.reset_stats = 0;
@@ -1553,7 +1593,8 @@ int be_cmd_link_status_query(struct be_adapter *adapter, u16 *link_speed,
req = embedded_payload(wrb);
be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
- OPCODE_COMMON_NTWK_LINK_STATUS_QUERY, sizeof(*req), wrb, NULL);
+ OPCODE_COMMON_NTWK_LINK_STATUS_QUERY,
+ sizeof(*req), wrb, NULL);
/* version 1 of the cmd is not supported only by BE2 */
if (!BE2_chip(adapter))
@@ -1598,8 +1639,8 @@ int be_cmd_get_die_temperature(struct be_adapter *adapter)
req = embedded_payload(wrb);
be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
- OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES, sizeof(*req),
- wrb, NULL);
+ OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES,
+ sizeof(*req), wrb, NULL);
be_mcc_notify(adapter);
@@ -1625,7 +1666,8 @@ int be_cmd_get_reg_len(struct be_adapter *adapter, u32 *log_size)
req = embedded_payload(wrb);
be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
- OPCODE_COMMON_MANAGE_FAT, sizeof(*req), wrb, NULL);
+ OPCODE_COMMON_MANAGE_FAT, sizeof(*req), wrb,
+ NULL);
req->fat_operation = cpu_to_le32(QUERY_FAT);
status = be_mcc_notify_wait(adapter);
if (!status) {
@@ -1655,8 +1697,8 @@ void be_cmd_get_regs(struct be_adapter *adapter, u32 buf_len, void *buf)
get_fat_cmd.size = sizeof(struct be_cmd_req_get_fat) + 60*1024;
get_fat_cmd.va = pci_alloc_consistent(adapter->pdev,
- get_fat_cmd.size,
- &get_fat_cmd.dma);
+ get_fat_cmd.size,
+ &get_fat_cmd.dma);
if (!get_fat_cmd.va) {
status = -ENOMEM;
dev_err(&adapter->pdev->dev,
@@ -1679,8 +1721,8 @@ void be_cmd_get_regs(struct be_adapter *adapter, u32 buf_len, void *buf)
payload_len = sizeof(struct be_cmd_req_get_fat) + buf_size;
be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
- OPCODE_COMMON_MANAGE_FAT, payload_len, wrb,
- &get_fat_cmd);
+ OPCODE_COMMON_MANAGE_FAT, payload_len,
+ wrb, &get_fat_cmd);
req->fat_operation = cpu_to_le32(RETRIEVE_FAT);
req->read_log_offset = cpu_to_le32(log_offset);
@@ -1691,8 +1733,8 @@ void be_cmd_get_regs(struct be_adapter *adapter, u32 buf_len, void *buf)
if (!status) {
struct be_cmd_resp_get_fat *resp = get_fat_cmd.va;
memcpy(buf + offset,
- resp->data_buffer,
- le32_to_cpu(resp->read_log_length));
+ resp->data_buffer,
+ le32_to_cpu(resp->read_log_length));
} else {
dev_err(&adapter->pdev->dev, "FAT Table Retrieve error\n");
goto err;
@@ -1702,14 +1744,13 @@ void be_cmd_get_regs(struct be_adapter *adapter, u32 buf_len, void *buf)
}
err:
pci_free_consistent(adapter->pdev, get_fat_cmd.size,
- get_fat_cmd.va,
- get_fat_cmd.dma);
+ get_fat_cmd.va, get_fat_cmd.dma);
spin_unlock_bh(&adapter->mcc_lock);
}
/* Uses synchronous mcc */
int be_cmd_get_fw_ver(struct be_adapter *adapter, char *fw_ver,
- char *fw_on_flash)
+ char *fw_on_flash)
{
struct be_mcc_wrb *wrb;
struct be_cmd_req_get_fw_version *req;
@@ -1726,7 +1767,8 @@ int be_cmd_get_fw_ver(struct be_adapter *adapter, char *fw_ver,
req = embedded_payload(wrb);
be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
- OPCODE_COMMON_GET_FW_VERSION, sizeof(*req), wrb, NULL);
+ OPCODE_COMMON_GET_FW_VERSION, sizeof(*req), wrb,
+ NULL);
status = be_mcc_notify_wait(adapter);
if (!status) {
struct be_cmd_resp_get_fw_version *resp = embedded_payload(wrb);
@@ -1759,7 +1801,8 @@ int be_cmd_modify_eqd(struct be_adapter *adapter, struct be_set_eqd *set_eqd,
req = embedded_payload(wrb);
be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
- OPCODE_COMMON_MODIFY_EQ_DELAY, sizeof(*req), wrb, NULL);
+ OPCODE_COMMON_MODIFY_EQ_DELAY, sizeof(*req), wrb,
+ NULL);
req->num_eq = cpu_to_le32(num);
for (i = 0; i < num; i++) {
@@ -1777,7 +1820,7 @@ err:
/* Uses sycnhronous mcc */
int be_cmd_vlan_config(struct be_adapter *adapter, u32 if_id, u16 *vtag_array,
- u32 num, bool promiscuous)
+ u32 num)
{
struct be_mcc_wrb *wrb;
struct be_cmd_req_vlan_config *req;
@@ -1793,19 +1836,16 @@ int be_cmd_vlan_config(struct be_adapter *adapter, u32 if_id, u16 *vtag_array,
req = embedded_payload(wrb);
be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
- OPCODE_COMMON_NTWK_VLAN_CONFIG, sizeof(*req), wrb, NULL);
+ OPCODE_COMMON_NTWK_VLAN_CONFIG, sizeof(*req),
+ wrb, NULL);
req->interface_id = if_id;
- req->promiscuous = promiscuous;
req->untagged = BE_IF_FLAGS_UNTAGGED & be_if_cap_flags(adapter) ? 1 : 0;
req->num_vlan = num;
- if (!promiscuous) {
- memcpy(req->normal_vlan, vtag_array,
- req->num_vlan * sizeof(vtag_array[0]));
- }
+ memcpy(req->normal_vlan, vtag_array,
+ req->num_vlan * sizeof(vtag_array[0]));
status = be_mcc_notify_wait(adapter);
-
err:
spin_unlock_bh(&adapter->mcc_lock);
return status;
@@ -1827,18 +1867,19 @@ int be_cmd_rx_filter(struct be_adapter *adapter, u32 flags, u32 value)
}
memset(req, 0, sizeof(*req));
be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
- OPCODE_COMMON_NTWK_RX_FILTER, sizeof(*req),
- wrb, mem);
+ OPCODE_COMMON_NTWK_RX_FILTER, sizeof(*req),
+ wrb, mem);
req->if_id = cpu_to_le32(adapter->if_handle);
if (flags & IFF_PROMISC) {
req->if_flags_mask = cpu_to_le32(BE_IF_FLAGS_PROMISCUOUS |
- BE_IF_FLAGS_VLAN_PROMISCUOUS |
- BE_IF_FLAGS_MCAST_PROMISCUOUS);
+ BE_IF_FLAGS_VLAN_PROMISCUOUS |
+ BE_IF_FLAGS_MCAST_PROMISCUOUS);
if (value == ON)
- req->if_flags = cpu_to_le32(BE_IF_FLAGS_PROMISCUOUS |
- BE_IF_FLAGS_VLAN_PROMISCUOUS |
- BE_IF_FLAGS_MCAST_PROMISCUOUS);
+ req->if_flags =
+ cpu_to_le32(BE_IF_FLAGS_PROMISCUOUS |
+ BE_IF_FLAGS_VLAN_PROMISCUOUS |
+ BE_IF_FLAGS_MCAST_PROMISCUOUS);
} else if (flags & IFF_ALLMULTI) {
req->if_flags_mask = req->if_flags =
cpu_to_le32(BE_IF_FLAGS_MCAST_PROMISCUOUS);
@@ -1867,7 +1908,7 @@ int be_cmd_rx_filter(struct be_adapter *adapter, u32 flags, u32 value)
}
if ((req->if_flags_mask & cpu_to_le32(be_if_cap_flags(adapter))) !=
- req->if_flags_mask) {
+ req->if_flags_mask) {
dev_warn(&adapter->pdev->dev,
"Cannot set rx filter flags 0x%x\n",
req->if_flags_mask);
@@ -1905,7 +1946,8 @@ int be_cmd_set_flow_control(struct be_adapter *adapter, u32 tx_fc, u32 rx_fc)
req = embedded_payload(wrb);
be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
- OPCODE_COMMON_SET_FLOW_CONTROL, sizeof(*req), wrb, NULL);
+ OPCODE_COMMON_SET_FLOW_CONTROL, sizeof(*req),
+ wrb, NULL);
req->tx_flow_control = cpu_to_le16((u16)tx_fc);
req->rx_flow_control = cpu_to_le16((u16)rx_fc);
@@ -1938,7 +1980,8 @@ int be_cmd_get_flow_control(struct be_adapter *adapter, u32 *tx_fc, u32 *rx_fc)
req = embedded_payload(wrb);
be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
- OPCODE_COMMON_GET_FLOW_CONTROL, sizeof(*req), wrb, NULL);
+ OPCODE_COMMON_GET_FLOW_CONTROL, sizeof(*req),
+ wrb, NULL);
status = be_mcc_notify_wait(adapter);
if (!status) {
@@ -1968,7 +2011,8 @@ int be_cmd_query_fw_cfg(struct be_adapter *adapter, u32 *port_num,
req = embedded_payload(wrb);
be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
- OPCODE_COMMON_QUERY_FIRMWARE_CONFIG, sizeof(*req), wrb, NULL);
+ OPCODE_COMMON_QUERY_FIRMWARE_CONFIG,
+ sizeof(*req), wrb, NULL);
status = be_mbox_notify_wait(adapter);
if (!status) {
@@ -2011,7 +2055,8 @@ int be_cmd_reset_function(struct be_adapter *adapter)
req = embedded_payload(wrb);
be_wrb_cmd_hdr_prepare(req, CMD_SUBSYSTEM_COMMON,
- OPCODE_COMMON_FUNCTION_RESET, sizeof(*req), wrb, NULL);
+ OPCODE_COMMON_FUNCTION_RESET, sizeof(*req), wrb,
+ NULL);
status = be_mbox_notify_wait(adapter);
@@ -2020,47 +2065,47 @@ int be_cmd_reset_function(struct be_adapter *adapter)
}
int be_cmd_rss_config(struct be_adapter *adapter, u8 *rsstable,
- u32 rss_hash_opts, u16 table_size)
+ u32 rss_hash_opts, u16 table_size, const u8 *rss_hkey)
{
struct be_mcc_wrb *wrb;
struct be_cmd_req_rss_config *req;
- u32 myhash[10] = {0x15d43fa5, 0x2534685a, 0x5f87693a, 0x5668494e,
- 0x33cf6a53, 0x383334c6, 0x76ac4257, 0x59b242b2,
- 0x3ea83c02, 0x4a110304};
int status;
if (!(be_if_cap_flags(adapter) & BE_IF_FLAGS_RSS))
return 0;
- if (mutex_lock_interruptible(&adapter->mbox_lock))
- return -1;
+ spin_lock_bh(&adapter->mcc_lock);
- wrb = wrb_from_mbox(adapter);
+ wrb = wrb_from_mccq(adapter);
+ if (!wrb) {
+ status = -EBUSY;
+ goto err;
+ }
req = embedded_payload(wrb);
be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
- OPCODE_ETH_RSS_CONFIG, sizeof(*req), wrb, NULL);
+ OPCODE_ETH_RSS_CONFIG, sizeof(*req), wrb, NULL);
req->if_id = cpu_to_le32(adapter->if_handle);
req->enable_rss = cpu_to_le16(rss_hash_opts);
req->cpu_table_size_log2 = cpu_to_le16(fls(table_size) - 1);
- if (lancer_chip(adapter) || skyhawk_chip(adapter))
+ if (!BEx_chip(adapter))
req->hdr.version = 1;
memcpy(req->cpu_table, rsstable, table_size);
- memcpy(req->hash, myhash, sizeof(myhash));
+ memcpy(req->hash, rss_hkey, RSS_HASH_KEY_LEN);
be_dws_cpu_to_le(req->hash, sizeof(req->hash));
- status = be_mbox_notify_wait(adapter);
-
- mutex_unlock(&adapter->mbox_lock);
+ status = be_mcc_notify_wait(adapter);
+err:
+ spin_unlock_bh(&adapter->mcc_lock);
return status;
}
/* Uses sync mcc */
int be_cmd_set_beacon_state(struct be_adapter *adapter, u8 port_num,
- u8 bcn, u8 sts, u8 state)
+ u8 bcn, u8 sts, u8 state)
{
struct be_mcc_wrb *wrb;
struct be_cmd_req_enable_disable_beacon *req;
@@ -2076,7 +2121,8 @@ int be_cmd_set_beacon_state(struct be_adapter *adapter, u8 port_num,
req = embedded_payload(wrb);
be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
- OPCODE_COMMON_ENABLE_DISABLE_BEACON, sizeof(*req), wrb, NULL);
+ OPCODE_COMMON_ENABLE_DISABLE_BEACON,
+ sizeof(*req), wrb, NULL);
req->port_num = port_num;
req->beacon_state = state;
@@ -2107,7 +2153,8 @@ int be_cmd_get_beacon_state(struct be_adapter *adapter, u8 port_num, u32 *state)
req = embedded_payload(wrb);
be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
- OPCODE_COMMON_GET_BEACON_STATE, sizeof(*req), wrb, NULL);
+ OPCODE_COMMON_GET_BEACON_STATE, sizeof(*req),
+ wrb, NULL);
req->port_num = port_num;
@@ -2146,20 +2193,20 @@ int lancer_cmd_write_object(struct be_adapter *adapter, struct be_dma_mem *cmd,
req = embedded_payload(wrb);
be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
- OPCODE_COMMON_WRITE_OBJECT,
- sizeof(struct lancer_cmd_req_write_object), wrb,
- NULL);
+ OPCODE_COMMON_WRITE_OBJECT,
+ sizeof(struct lancer_cmd_req_write_object), wrb,
+ NULL);
ctxt = &req->context;
AMAP_SET_BITS(struct amap_lancer_write_obj_context,
- write_length, ctxt, data_size);
+ write_length, ctxt, data_size);
if (data_size == 0)
AMAP_SET_BITS(struct amap_lancer_write_obj_context,
- eof, ctxt, 1);
+ eof, ctxt, 1);
else
AMAP_SET_BITS(struct amap_lancer_write_obj_context,
- eof, ctxt, 0);
+ eof, ctxt, 0);
be_dws_cpu_to_le(ctxt, sizeof(req->context));
req->write_offset = cpu_to_le32(data_offset);
@@ -2167,8 +2214,8 @@ int lancer_cmd_write_object(struct be_adapter *adapter, struct be_dma_mem *cmd,
req->descriptor_count = cpu_to_le32(1);
req->buf_len = cpu_to_le32(data_size);
req->addr_low = cpu_to_le32((cmd->dma +
- sizeof(struct lancer_cmd_req_write_object))
- & 0xFFFFFFFF);
+ sizeof(struct lancer_cmd_req_write_object))
+ & 0xFFFFFFFF);
req->addr_high = cpu_to_le32(upper_32_bits(cmd->dma +
sizeof(struct lancer_cmd_req_write_object)));
@@ -2197,8 +2244,8 @@ err_unlock:
}
int lancer_cmd_read_object(struct be_adapter *adapter, struct be_dma_mem *cmd,
- u32 data_size, u32 data_offset, const char *obj_name,
- u32 *data_read, u32 *eof, u8 *addn_status)
+ u32 data_size, u32 data_offset, const char *obj_name,
+ u32 *data_read, u32 *eof, u8 *addn_status)
{
struct be_mcc_wrb *wrb;
struct lancer_cmd_req_read_object *req;
@@ -2216,9 +2263,9 @@ int lancer_cmd_read_object(struct be_adapter *adapter, struct be_dma_mem *cmd,
req = embedded_payload(wrb);
be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
- OPCODE_COMMON_READ_OBJECT,
- sizeof(struct lancer_cmd_req_read_object), wrb,
- NULL);
+ OPCODE_COMMON_READ_OBJECT,
+ sizeof(struct lancer_cmd_req_read_object), wrb,
+ NULL);
req->desired_read_len = cpu_to_le32(data_size);
req->read_offset = cpu_to_le32(data_offset);
@@ -2244,7 +2291,7 @@ err_unlock:
}
int be_cmd_write_flashrom(struct be_adapter *adapter, struct be_dma_mem *cmd,
- u32 flash_type, u32 flash_opcode, u32 buf_size)
+ u32 flash_type, u32 flash_opcode, u32 buf_size)
{
struct be_mcc_wrb *wrb;
struct be_cmd_write_flashrom *req;
@@ -2261,7 +2308,8 @@ int be_cmd_write_flashrom(struct be_adapter *adapter, struct be_dma_mem *cmd,
req = cmd->va;
be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
- OPCODE_COMMON_WRITE_FLASHROM, cmd->size, wrb, cmd);
+ OPCODE_COMMON_WRITE_FLASHROM, cmd->size, wrb,
+ cmd);
req->params.op_type = cpu_to_le32(flash_type);
req->params.op_code = cpu_to_le32(flash_opcode);
@@ -2284,7 +2332,7 @@ err_unlock:
}
int be_cmd_get_flash_crc(struct be_adapter *adapter, u8 *flashed_crc,
- int offset)
+ u16 optype, int offset)
{
struct be_mcc_wrb *wrb;
struct be_cmd_read_flash_crc *req;
@@ -2303,7 +2351,7 @@ int be_cmd_get_flash_crc(struct be_adapter *adapter, u8 *flashed_crc,
OPCODE_COMMON_READ_FLASHROM, sizeof(*req),
wrb, NULL);
- req->params.op_type = cpu_to_le32(OPTYPE_REDBOOT);
+ req->params.op_type = cpu_to_le32(optype);
req->params.op_code = cpu_to_le32(FLASHROM_OPER_REPORT);
req->params.offset = cpu_to_le32(offset);
req->params.data_buf_size = cpu_to_le32(0x4);
@@ -2318,7 +2366,7 @@ err:
}
int be_cmd_enable_magic_wol(struct be_adapter *adapter, u8 *mac,
- struct be_dma_mem *nonemb_cmd)
+ struct be_dma_mem *nonemb_cmd)
{
struct be_mcc_wrb *wrb;
struct be_cmd_req_acpi_wol_magic_config *req;
@@ -2334,8 +2382,8 @@ int be_cmd_enable_magic_wol(struct be_adapter *adapter, u8 *mac,
req = nonemb_cmd->va;
be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
- OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG, sizeof(*req), wrb,
- nonemb_cmd);
+ OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG, sizeof(*req),
+ wrb, nonemb_cmd);
memcpy(req->magic_mac, mac, ETH_ALEN);
status = be_mcc_notify_wait(adapter);
@@ -2363,8 +2411,8 @@ int be_cmd_set_loopback(struct be_adapter *adapter, u8 port_num,
req = embedded_payload(wrb);
be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL,
- OPCODE_LOWLEVEL_SET_LOOPBACK_MODE, sizeof(*req), wrb,
- NULL);
+ OPCODE_LOWLEVEL_SET_LOOPBACK_MODE, sizeof(*req),
+ wrb, NULL);
req->src_port = port_num;
req->dest_port = port_num;
@@ -2378,7 +2426,8 @@ err:
}
int be_cmd_loopback_test(struct be_adapter *adapter, u32 port_num,
- u32 loopback_type, u32 pkt_size, u32 num_pkts, u64 pattern)
+ u32 loopback_type, u32 pkt_size, u32 num_pkts,
+ u64 pattern)
{
struct be_mcc_wrb *wrb;
struct be_cmd_req_loopback_test *req;
@@ -2396,7 +2445,8 @@ int be_cmd_loopback_test(struct be_adapter *adapter, u32 port_num,
req = embedded_payload(wrb);
be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL,
- OPCODE_LOWLEVEL_LOOPBACK_TEST, sizeof(*req), wrb, NULL);
+ OPCODE_LOWLEVEL_LOOPBACK_TEST, sizeof(*req), wrb,
+ NULL);
req->hdr.timeout = cpu_to_le32(15);
req->pattern = cpu_to_le64(pattern);
@@ -2421,7 +2471,7 @@ err:
}
int be_cmd_ddr_dma_test(struct be_adapter *adapter, u64 pattern,
- u32 byte_cnt, struct be_dma_mem *cmd)
+ u32 byte_cnt, struct be_dma_mem *cmd)
{
struct be_mcc_wrb *wrb;
struct be_cmd_req_ddrdma_test *req;
@@ -2437,7 +2487,8 @@ int be_cmd_ddr_dma_test(struct be_adapter *adapter, u64 pattern,
}
req = cmd->va;
be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL,
- OPCODE_LOWLEVEL_HOST_DDR_DMA, cmd->size, wrb, cmd);
+ OPCODE_LOWLEVEL_HOST_DDR_DMA, cmd->size, wrb,
+ cmd);
req->pattern = cpu_to_le64(pattern);
req->byte_count = cpu_to_le32(byte_cnt);
@@ -2465,7 +2516,7 @@ err:
}
int be_cmd_get_seeprom_data(struct be_adapter *adapter,
- struct be_dma_mem *nonemb_cmd)
+ struct be_dma_mem *nonemb_cmd)
{
struct be_mcc_wrb *wrb;
struct be_cmd_req_seeprom_read *req;
@@ -2481,8 +2532,8 @@ int be_cmd_get_seeprom_data(struct be_adapter *adapter,
req = nonemb_cmd->va;
be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
- OPCODE_COMMON_SEEPROM_READ, sizeof(*req), wrb,
- nonemb_cmd);
+ OPCODE_COMMON_SEEPROM_READ, sizeof(*req), wrb,
+ nonemb_cmd);
status = be_mcc_notify_wait(adapter);
@@ -2510,8 +2561,7 @@ int be_cmd_get_phy_info(struct be_adapter *adapter)
goto err;
}
cmd.size = sizeof(struct be_cmd_req_get_phy_info);
- cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size,
- &cmd.dma);
+ cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size, &cmd.dma);
if (!cmd.va) {
dev_err(&adapter->pdev->dev, "Memory alloc failure\n");
status = -ENOMEM;
@@ -2521,8 +2571,8 @@ int be_cmd_get_phy_info(struct be_adapter *adapter)
req = cmd.va;
be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
- OPCODE_COMMON_GET_PHY_DETAILS, sizeof(*req),
- wrb, &cmd);
+ OPCODE_COMMON_GET_PHY_DETAILS, sizeof(*req),
+ wrb, &cmd);
status = be_mcc_notify_wait(adapter);
if (!status) {
@@ -2544,8 +2594,7 @@ int be_cmd_get_phy_info(struct be_adapter *adapter)
BE_SUPPORTED_SPEED_1GBPS;
}
}
- pci_free_consistent(adapter->pdev, cmd.size,
- cmd.va, cmd.dma);
+ pci_free_consistent(adapter->pdev, cmd.size, cmd.va, cmd.dma);
err:
spin_unlock_bh(&adapter->mcc_lock);
return status;
@@ -2568,7 +2617,7 @@ int be_cmd_set_qos(struct be_adapter *adapter, u32 bps, u32 domain)
req = embedded_payload(wrb);
be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
- OPCODE_COMMON_SET_QOS, sizeof(*req), wrb, NULL);
+ OPCODE_COMMON_SET_QOS, sizeof(*req), wrb, NULL);
req->hdr.domain = domain;
req->valid_bits = cpu_to_le32(BE_QOS_BITS_NIC);
@@ -2597,10 +2646,9 @@ int be_cmd_get_cntl_attributes(struct be_adapter *adapter)
memset(&attribs_cmd, 0, sizeof(struct be_dma_mem));
attribs_cmd.size = sizeof(struct be_cmd_resp_cntl_attribs);
attribs_cmd.va = pci_alloc_consistent(adapter->pdev, attribs_cmd.size,
- &attribs_cmd.dma);
+ &attribs_cmd.dma);
if (!attribs_cmd.va) {
- dev_err(&adapter->pdev->dev,
- "Memory allocation failure\n");
+ dev_err(&adapter->pdev->dev, "Memory allocation failure\n");
status = -ENOMEM;
goto err;
}
@@ -2613,8 +2661,8 @@ int be_cmd_get_cntl_attributes(struct be_adapter *adapter)
req = attribs_cmd.va;
be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
- OPCODE_COMMON_GET_CNTL_ATTRIBUTES, payload_len, wrb,
- &attribs_cmd);
+ OPCODE_COMMON_GET_CNTL_ATTRIBUTES, payload_len,
+ wrb, &attribs_cmd);
status = be_mbox_notify_wait(adapter);
if (!status) {
@@ -2649,7 +2697,8 @@ int be_cmd_req_native_mode(struct be_adapter *adapter)
req = embedded_payload(wrb);
be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
- OPCODE_COMMON_SET_DRIVER_FUNCTION_CAP, sizeof(*req), wrb, NULL);
+ OPCODE_COMMON_SET_DRIVER_FUNCTION_CAP,
+ sizeof(*req), wrb, NULL);
req->valid_cap_flags = cpu_to_le32(CAPABILITY_SW_TIMESTAMPS |
CAPABILITY_BE3_NATIVE_ERX_API);
@@ -2762,12 +2811,12 @@ int be_cmd_get_mac_from_list(struct be_adapter *adapter, u8 *mac,
memset(&get_mac_list_cmd, 0, sizeof(struct be_dma_mem));
get_mac_list_cmd.size = sizeof(struct be_cmd_resp_get_mac_list);
get_mac_list_cmd.va = pci_alloc_consistent(adapter->pdev,
- get_mac_list_cmd.size,
- &get_mac_list_cmd.dma);
+ get_mac_list_cmd.size,
+ &get_mac_list_cmd.dma);
if (!get_mac_list_cmd.va) {
dev_err(&adapter->pdev->dev,
- "Memory allocation failure during GET_MAC_LIST\n");
+ "Memory allocation failure during GET_MAC_LIST\n");
return -ENOMEM;
}
@@ -2831,18 +2880,18 @@ int be_cmd_get_mac_from_list(struct be_adapter *adapter, u8 *mac,
/* If no active mac_id found, return first mac addr */
*pmac_id_valid = false;
memcpy(mac, resp->macaddr_list[0].mac_addr_id.macaddr,
- ETH_ALEN);
+ ETH_ALEN);
}
out:
spin_unlock_bh(&adapter->mcc_lock);
pci_free_consistent(adapter->pdev, get_mac_list_cmd.size,
- get_mac_list_cmd.va, get_mac_list_cmd.dma);
+ get_mac_list_cmd.va, get_mac_list_cmd.dma);
return status;
}
-int be_cmd_get_active_mac(struct be_adapter *adapter, u32 curr_pmac_id, u8 *mac,
- u32 if_handle, bool active, u32 domain)
+int be_cmd_get_active_mac(struct be_adapter *adapter, u32 curr_pmac_id,
+ u8 *mac, u32 if_handle, bool active, u32 domain)
{
if (!active)
@@ -2892,7 +2941,7 @@ int be_cmd_set_mac_list(struct be_adapter *adapter, u8 *mac_array,
memset(&cmd, 0, sizeof(struct be_dma_mem));
cmd.size = sizeof(struct be_cmd_req_set_mac_list);
cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size,
- &cmd.dma, GFP_KERNEL);
+ &cmd.dma, GFP_KERNEL);
if (!cmd.va)
return -ENOMEM;
@@ -2906,8 +2955,8 @@ int be_cmd_set_mac_list(struct be_adapter *adapter, u8 *mac_array,
req = cmd.va;
be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
- OPCODE_COMMON_SET_MAC_LIST, sizeof(*req),
- wrb, &cmd);
+ OPCODE_COMMON_SET_MAC_LIST, sizeof(*req),
+ wrb, &cmd);
req->hdr.domain = domain;
req->mac_count = mac_count;
@@ -2917,8 +2966,7 @@ int be_cmd_set_mac_list(struct be_adapter *adapter, u8 *mac_array,
status = be_mcc_notify_wait(adapter);
err:
- dma_free_coherent(&adapter->pdev->dev, cmd.size,
- cmd.va, cmd.dma);
+ dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
spin_unlock_bh(&adapter->mcc_lock);
return status;
}
@@ -2963,7 +3011,8 @@ int be_cmd_set_hsw_config(struct be_adapter *adapter, u16 pvid,
ctxt = &req->context;
be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
- OPCODE_COMMON_SET_HSW_CONFIG, sizeof(*req), wrb, NULL);
+ OPCODE_COMMON_SET_HSW_CONFIG, sizeof(*req), wrb,
+ NULL);
req->hdr.domain = domain;
AMAP_SET_BITS(struct amap_set_hsw_context, interface_id, ctxt, intf_id);
@@ -3009,7 +3058,8 @@ int be_cmd_get_hsw_config(struct be_adapter *adapter, u16 *pvid,
ctxt = &req->context;
be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
- OPCODE_COMMON_GET_HSW_CONFIG, sizeof(*req), wrb, NULL);
+ OPCODE_COMMON_GET_HSW_CONFIG, sizeof(*req), wrb,
+ NULL);
req->hdr.domain = domain;
AMAP_SET_BITS(struct amap_get_hsw_req_context, interface_id,
@@ -3027,10 +3077,9 @@ int be_cmd_get_hsw_config(struct be_adapter *adapter, u16 *pvid,
if (!status) {
struct be_cmd_resp_get_hsw_config *resp =
embedded_payload(wrb);
- be_dws_le_to_cpu(&resp->context,
- sizeof(resp->context));
+ be_dws_le_to_cpu(&resp->context, sizeof(resp->context));
vid = AMAP_GET_BITS(struct amap_get_hsw_resp_context,
- pvid, &resp->context);
+ pvid, &resp->context);
if (pvid)
*pvid = le16_to_cpu(vid);
if (mode)
@@ -3062,11 +3111,9 @@ int be_cmd_get_acpi_wol_cap(struct be_adapter *adapter)
memset(&cmd, 0, sizeof(struct be_dma_mem));
cmd.size = sizeof(struct be_cmd_resp_acpi_wol_magic_config_v1);
- cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size,
- &cmd.dma);
+ cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size, &cmd.dma);
if (!cmd.va) {
- dev_err(&adapter->pdev->dev,
- "Memory allocation failure\n");
+ dev_err(&adapter->pdev->dev, "Memory allocation failure\n");
status = -ENOMEM;
goto err;
}
@@ -3349,8 +3396,7 @@ int be_cmd_get_func_config(struct be_adapter *adapter, struct be_resources *res)
memset(&cmd, 0, sizeof(struct be_dma_mem));
cmd.size = sizeof(struct be_cmd_resp_get_func_config);
- cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size,
- &cmd.dma);
+ cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size, &cmd.dma);
if (!cmd.va) {
dev_err(&adapter->pdev->dev, "Memory alloc failure\n");
status = -ENOMEM;
@@ -3396,7 +3442,7 @@ err:
/* Uses mbox */
static int be_cmd_get_profile_config_mbox(struct be_adapter *adapter,
- u8 domain, struct be_dma_mem *cmd)
+ u8 domain, struct be_dma_mem *cmd)
{
struct be_mcc_wrb *wrb;
struct be_cmd_req_get_profile_config *req;
@@ -3424,7 +3470,7 @@ static int be_cmd_get_profile_config_mbox(struct be_adapter *adapter,
/* Uses sync mcc */
static int be_cmd_get_profile_config_mccq(struct be_adapter *adapter,
- u8 domain, struct be_dma_mem *cmd)
+ u8 domain, struct be_dma_mem *cmd)
{
struct be_mcc_wrb *wrb;
struct be_cmd_req_get_profile_config *req;
@@ -3484,8 +3530,8 @@ int be_cmd_get_profile_config(struct be_adapter *adapter,
resp = cmd.va;
desc_count = le32_to_cpu(resp->desc_count);
- pcie = be_get_pcie_desc(adapter->pdev->devfn, resp->func_param,
- desc_count);
+ pcie = be_get_pcie_desc(adapter->pdev->devfn, resp->func_param,
+ desc_count);
if (pcie)
res->max_vfs = le16_to_cpu(pcie->num_vfs);
@@ -3548,33 +3594,47 @@ void be_reset_nic_desc(struct be_nic_res_desc *nic)
nic->cq_count = 0xFFFF;
nic->toe_conn_count = 0xFFFF;
nic->eq_count = 0xFFFF;
+ nic->iface_count = 0xFFFF;
nic->link_param = 0xFF;
+ nic->channel_id_param = cpu_to_le16(0xF000);
nic->acpi_params = 0xFF;
nic->wol_param = 0x0F;
- nic->bw_min = 0xFFFFFFFF;
+ nic->tunnel_iface_count = 0xFFFF;
+ nic->direct_tenant_iface_count = 0xFFFF;
nic->bw_max = 0xFFFFFFFF;
}
-int be_cmd_config_qos(struct be_adapter *adapter, u32 bps, u8 domain)
+int be_cmd_config_qos(struct be_adapter *adapter, u32 max_rate, u16 link_speed,
+ u8 domain)
{
- if (lancer_chip(adapter)) {
- struct be_nic_res_desc nic_desc;
+ struct be_nic_res_desc nic_desc;
+ u32 bw_percent;
+ u16 version = 0;
+
+ if (BE3_chip(adapter))
+ return be_cmd_set_qos(adapter, max_rate / 10, domain);
- be_reset_nic_desc(&nic_desc);
+ be_reset_nic_desc(&nic_desc);
+ nic_desc.pf_num = adapter->pf_number;
+ nic_desc.vf_num = domain;
+ if (lancer_chip(adapter)) {
nic_desc.hdr.desc_type = NIC_RESOURCE_DESC_TYPE_V0;
nic_desc.hdr.desc_len = RESOURCE_DESC_SIZE_V0;
nic_desc.flags = (1 << QUN_SHIFT) | (1 << IMM_SHIFT) |
(1 << NOSV_SHIFT);
- nic_desc.pf_num = adapter->pf_number;
- nic_desc.vf_num = domain;
- nic_desc.bw_max = cpu_to_le32(bps);
-
- return be_cmd_set_profile_config(adapter, &nic_desc,
- RESOURCE_DESC_SIZE_V0,
- 0, domain);
+ nic_desc.bw_max = cpu_to_le32(max_rate / 10);
} else {
- return be_cmd_set_qos(adapter, bps, domain);
+ version = 1;
+ nic_desc.hdr.desc_type = NIC_RESOURCE_DESC_TYPE_V1;
+ nic_desc.hdr.desc_len = RESOURCE_DESC_SIZE_V1;
+ nic_desc.flags = (1 << IMM_SHIFT) | (1 << NOSV_SHIFT);
+ bw_percent = max_rate ? (max_rate * 100) / link_speed : 100;
+ nic_desc.bw_max = cpu_to_le32(bw_percent);
}
+
+ return be_cmd_set_profile_config(adapter, &nic_desc,
+ nic_desc.hdr.desc_len,
+ version, domain);
}
int be_cmd_manage_iface(struct be_adapter *adapter, u32 iface, u8 op)
@@ -3859,7 +3919,7 @@ err:
}
int be_roce_mcc_cmd(void *netdev_handle, void *wrb_payload,
- int wrb_payload_size, u16 *cmd_status, u16 *ext_status)
+ int wrb_payload_size, u16 *cmd_status, u16 *ext_status)
{
struct be_adapter *adapter = netdev_priv(netdev_handle);
struct be_mcc_wrb *wrb;
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.h b/drivers/net/ethernet/emulex/benet/be_cmds.h
index b60e4d53c1c9..3e0a6b243806 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.h
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.h
@@ -50,7 +50,7 @@ struct be_mcc_wrb {
#define CQE_FLAGS_CONSUMED_MASK (1 << 27)
/* Completion Status */
-enum {
+enum mcc_base_status {
MCC_STATUS_SUCCESS = 0,
MCC_STATUS_FAILED = 1,
MCC_STATUS_ILLEGAL_REQUEST = 2,
@@ -60,12 +60,25 @@ enum {
MCC_STATUS_NOT_SUPPORTED = 66
};
-#define MCC_ADDL_STS_INSUFFICIENT_RESOURCES 0x16
+/* Additional status */
+enum mcc_addl_status {
+ MCC_ADDL_STATUS_INSUFFICIENT_RESOURCES = 0x16,
+ MCC_ADDL_STATUS_FLASH_IMAGE_CRC_MISMATCH = 0x4d,
+ MCC_ADDL_STATUS_TOO_MANY_INTERFACES = 0x4a
+};
+
+#define CQE_BASE_STATUS_MASK 0xFFFF
+#define CQE_BASE_STATUS_SHIFT 0 /* bits 0 - 15 */
+#define CQE_ADDL_STATUS_MASK 0xFF
+#define CQE_ADDL_STATUS_SHIFT 16 /* bits 16 - 31 */
-#define CQE_STATUS_COMPL_MASK 0xFFFF
-#define CQE_STATUS_COMPL_SHIFT 0 /* bits 0 - 15 */
-#define CQE_STATUS_EXTD_MASK 0xFFFF
-#define CQE_STATUS_EXTD_SHIFT 16 /* bits 16 - 31 */
+#define base_status(status) \
+ ((enum mcc_base_status) \
+ (status > 0 ? (status & CQE_BASE_STATUS_MASK) : 0))
+#define addl_status(status) \
+ ((enum mcc_addl_status) \
+ (status > 0 ? (status >> CQE_ADDL_STATUS_SHIFT) & \
+ CQE_ADDL_STATUS_MASK : 0))
struct be_mcc_compl {
u32 status; /* dword 0 */
@@ -74,13 +87,13 @@ struct be_mcc_compl {
u32 flags; /* dword 3 */
};
-/* When the async bit of mcc_compl is set, the last 4 bytes of
- * mcc_compl is interpreted as follows:
+/* When the async bit of mcc_compl flags is set, flags
+ * is interpreted as follows:
*/
-#define ASYNC_TRAILER_EVENT_CODE_SHIFT 8 /* bits 8 - 15 */
-#define ASYNC_TRAILER_EVENT_CODE_MASK 0xFF
-#define ASYNC_TRAILER_EVENT_TYPE_SHIFT 16
-#define ASYNC_TRAILER_EVENT_TYPE_MASK 0xFF
+#define ASYNC_EVENT_CODE_SHIFT 8 /* bits 8 - 15 */
+#define ASYNC_EVENT_CODE_MASK 0xFF
+#define ASYNC_EVENT_TYPE_SHIFT 16
+#define ASYNC_EVENT_TYPE_MASK 0xFF
#define ASYNC_EVENT_CODE_LINK_STATE 0x1
#define ASYNC_EVENT_CODE_GRP_5 0x5
#define ASYNC_EVENT_QOS_SPEED 0x1
@@ -89,10 +102,6 @@ struct be_mcc_compl {
#define ASYNC_EVENT_CODE_QNQ 0x6
#define ASYNC_DEBUG_EVENT_TYPE_QNQ 1
-struct be_async_event_trailer {
- u32 code;
-};
-
enum {
LINK_DOWN = 0x0,
LINK_UP = 0x1
@@ -100,7 +109,7 @@ enum {
#define LINK_STATUS_MASK 0x1
#define LOGICAL_LINK_STATUS_MASK 0x2
-/* When the event code of an async trailer is link-state, the mcc_compl
+/* When the event code of compl->flags is link-state, the mcc_compl
* must be interpreted as follows
*/
struct be_async_event_link_state {
@@ -110,10 +119,10 @@ struct be_async_event_link_state {
u8 port_speed;
u8 port_fault;
u8 rsvd0[7];
- struct be_async_event_trailer trailer;
+ u32 flags;
} __packed;
-/* When the event code of an async trailer is GRP-5 and event_type is QOS_SPEED
+/* When the event code of compl->flags is GRP-5 and event_type is QOS_SPEED
* the mcc_compl must be interpreted as follows
*/
struct be_async_event_grp5_qos_link_speed {
@@ -121,10 +130,10 @@ struct be_async_event_grp5_qos_link_speed {
u8 rsvd[5];
u16 qos_link_speed;
u32 event_tag;
- struct be_async_event_trailer trailer;
+ u32 flags;
} __packed;
-/* When the event code of an async trailer is GRP5 and event type is
+/* When the event code of compl->flags is GRP5 and event type is
* CoS-Priority, the mcc_compl must be interpreted as follows
*/
struct be_async_event_grp5_cos_priority {
@@ -134,10 +143,10 @@ struct be_async_event_grp5_cos_priority {
u8 valid;
u8 rsvd0;
u8 event_tag;
- struct be_async_event_trailer trailer;
+ u32 flags;
} __packed;
-/* When the event code of an async trailer is GRP5 and event type is
+/* When the event code of compl->flags is GRP5 and event type is
* PVID state, the mcc_compl must be interpreted as follows
*/
struct be_async_event_grp5_pvid_state {
@@ -146,7 +155,7 @@ struct be_async_event_grp5_pvid_state {
u16 tag;
u32 event_tag;
u32 rsvd1;
- struct be_async_event_trailer trailer;
+ u32 flags;
} __packed;
/* async event indicating outer VLAN tag in QnQ */
@@ -156,7 +165,7 @@ struct be_async_event_qnq {
u16 vlan_tag;
u32 event_tag;
u8 rsvd1[4];
- struct be_async_event_trailer trailer;
+ u32 flags;
} __packed;
struct be_mcc_mailbox {
@@ -258,8 +267,8 @@ struct be_cmd_resp_hdr {
u8 opcode; /* dword 0 */
u8 subsystem; /* dword 0 */
u8 rsvd[2]; /* dword 0 */
- u8 status; /* dword 1 */
- u8 add_status; /* dword 1 */
+ u8 base_status; /* dword 1 */
+ u8 addl_status; /* dword 1 */
u8 rsvd1[2]; /* dword 1 */
u32 response_length; /* dword 2 */
u32 actual_resp_len; /* dword 3 */
@@ -1186,7 +1195,8 @@ struct be_cmd_read_flash_crc {
struct flashrom_params params;
u8 crc[4];
u8 rsvd[4];
-};
+} __packed;
+
/**************** Lancer Firmware Flash ************/
struct amap_lancer_write_obj_context {
u8 write_length[24];
@@ -1891,16 +1901,20 @@ struct be_nic_res_desc {
u16 cq_count;
u16 toe_conn_count;
u16 eq_count;
- u32 rsvd5;
+ u16 vlan_id;
+ u16 iface_count;
u32 cap_flags;
u8 link_param;
- u8 rsvd6[3];
+ u8 rsvd6;
+ u16 channel_id_param;
u32 bw_min;
u32 bw_max;
u8 acpi_params;
u8 wol_param;
u16 rsvd7;
- u32 rsvd8[7];
+ u16 tunnel_iface_count;
+ u16 direct_tenant_iface_count;
+ u32 rsvd8[6];
} __packed;
/************ Multi-Channel type ***********/
@@ -2060,7 +2074,7 @@ int be_cmd_get_fw_ver(struct be_adapter *adapter, char *fw_ver,
char *fw_on_flash);
int be_cmd_modify_eqd(struct be_adapter *adapter, struct be_set_eqd *, int num);
int be_cmd_vlan_config(struct be_adapter *adapter, u32 if_id, u16 *vtag_array,
- u32 num, bool promiscuous);
+ u32 num);
int be_cmd_rx_filter(struct be_adapter *adapter, u32 flags, u32 status);
int be_cmd_set_flow_control(struct be_adapter *adapter, u32 tx_fc, u32 rx_fc);
int be_cmd_get_flow_control(struct be_adapter *adapter, u32 *tx_fc, u32 *rx_fc);
@@ -2068,7 +2082,7 @@ int be_cmd_query_fw_cfg(struct be_adapter *adapter, u32 *port_num,
u32 *function_mode, u32 *function_caps, u16 *asic_rev);
int be_cmd_reset_function(struct be_adapter *adapter);
int be_cmd_rss_config(struct be_adapter *adapter, u8 *rsstable,
- u32 rss_hash_opts, u16 table_size);
+ u32 rss_hash_opts, u16 table_size, const u8 *rss_hkey);
int be_process_mcc(struct be_adapter *adapter);
int be_cmd_set_beacon_state(struct be_adapter *adapter, u8 port_num, u8 beacon,
u8 status, u8 state);
@@ -2084,7 +2098,7 @@ int lancer_cmd_read_object(struct be_adapter *adapter, struct be_dma_mem *cmd,
u32 data_size, u32 data_offset, const char *obj_name,
u32 *data_read, u32 *eof, u8 *addn_status);
int be_cmd_get_flash_crc(struct be_adapter *adapter, u8 *flashed_crc,
- int offset);
+ u16 optype, int offset);
int be_cmd_enable_magic_wol(struct be_adapter *adapter, u8 *mac,
struct be_dma_mem *nonemb_cmd);
int be_cmd_fw_init(struct be_adapter *adapter);
@@ -2101,7 +2115,8 @@ int be_cmd_get_seeprom_data(struct be_adapter *adapter,
int be_cmd_set_loopback(struct be_adapter *adapter, u8 port_num,
u8 loopback_type, u8 enable);
int be_cmd_get_phy_info(struct be_adapter *adapter);
-int be_cmd_config_qos(struct be_adapter *adapter, u32 bps, u8 domain);
+int be_cmd_config_qos(struct be_adapter *adapter, u32 max_rate,
+ u16 link_speed, u8 domain);
void be_detect_error(struct be_adapter *adapter);
int be_cmd_get_die_temperature(struct be_adapter *adapter);
int be_cmd_get_cntl_attributes(struct be_adapter *adapter);
diff --git a/drivers/net/ethernet/emulex/benet/be_ethtool.c b/drivers/net/ethernet/emulex/benet/be_ethtool.c
index 15ba96cba65d..e2da4d20dd3d 100644
--- a/drivers/net/ethernet/emulex/benet/be_ethtool.c
+++ b/drivers/net/ethernet/emulex/benet/be_ethtool.c
@@ -132,6 +132,7 @@ static const struct be_ethtool_stat et_rx_stats[] = {
{DRVSTAT_RX_INFO(rx_bytes)},/* If moving this member see above note */
{DRVSTAT_RX_INFO(rx_pkts)}, /* If moving this member see above note */
{DRVSTAT_RX_INFO(rx_compl)},
+ {DRVSTAT_RX_INFO(rx_compl_err)},
{DRVSTAT_RX_INFO(rx_mcast_pkts)},
/* Number of page allocation failures while posting receive buffers
* to HW.
@@ -181,7 +182,7 @@ static const char et_self_tests[][ETH_GSTRING_LEN] = {
#define BE_NO_LOOPBACK 0xff
static void be_get_drvinfo(struct net_device *netdev,
- struct ethtool_drvinfo *drvinfo)
+ struct ethtool_drvinfo *drvinfo)
{
struct be_adapter *adapter = netdev_priv(netdev);
@@ -201,8 +202,7 @@ static void be_get_drvinfo(struct net_device *netdev,
drvinfo->eedump_len = 0;
}
-static u32
-lancer_cmd_get_file_len(struct be_adapter *adapter, u8 *file_name)
+static u32 lancer_cmd_get_file_len(struct be_adapter *adapter, u8 *file_name)
{
u32 data_read = 0, eof;
u8 addn_status;
@@ -212,14 +212,14 @@ lancer_cmd_get_file_len(struct be_adapter *adapter, u8 *file_name)
memset(&data_len_cmd, 0, sizeof(data_len_cmd));
/* data_offset and data_size should be 0 to get reg len */
status = lancer_cmd_read_object(adapter, &data_len_cmd, 0, 0,
- file_name, &data_read, &eof, &addn_status);
+ file_name, &data_read, &eof,
+ &addn_status);
return data_read;
}
-static int
-lancer_cmd_read_file(struct be_adapter *adapter, u8 *file_name,
- u32 buf_len, void *buf)
+static int lancer_cmd_read_file(struct be_adapter *adapter, u8 *file_name,
+ u32 buf_len, void *buf)
{
struct be_dma_mem read_cmd;
u32 read_len = 0, total_read_len = 0, chunk_size;
@@ -229,11 +229,11 @@ lancer_cmd_read_file(struct be_adapter *adapter, u8 *file_name,
read_cmd.size = LANCER_READ_FILE_CHUNK;
read_cmd.va = pci_alloc_consistent(adapter->pdev, read_cmd.size,
- &read_cmd.dma);
+ &read_cmd.dma);
if (!read_cmd.va) {
dev_err(&adapter->pdev->dev,
- "Memory allocation failure while reading dump\n");
+ "Memory allocation failure while reading dump\n");
return -ENOMEM;
}
@@ -242,8 +242,8 @@ lancer_cmd_read_file(struct be_adapter *adapter, u8 *file_name,
LANCER_READ_FILE_CHUNK);
chunk_size = ALIGN(chunk_size, 4);
status = lancer_cmd_read_object(adapter, &read_cmd, chunk_size,
- total_read_len, file_name, &read_len,
- &eof, &addn_status);
+ total_read_len, file_name,
+ &read_len, &eof, &addn_status);
if (!status) {
memcpy(buf + total_read_len, read_cmd.va, read_len);
total_read_len += read_len;
@@ -254,13 +254,12 @@ lancer_cmd_read_file(struct be_adapter *adapter, u8 *file_name,
}
}
pci_free_consistent(adapter->pdev, read_cmd.size, read_cmd.va,
- read_cmd.dma);
+ read_cmd.dma);
return status;
}
-static int
-be_get_reg_len(struct net_device *netdev)
+static int be_get_reg_len(struct net_device *netdev)
{
struct be_adapter *adapter = netdev_priv(netdev);
u32 log_size = 0;
@@ -271,7 +270,7 @@ be_get_reg_len(struct net_device *netdev)
if (be_physfn(adapter)) {
if (lancer_chip(adapter))
log_size = lancer_cmd_get_file_len(adapter,
- LANCER_FW_DUMP_FILE);
+ LANCER_FW_DUMP_FILE);
else
be_cmd_get_reg_len(adapter, &log_size);
}
@@ -287,7 +286,7 @@ be_get_regs(struct net_device *netdev, struct ethtool_regs *regs, void *buf)
memset(buf, 0, regs->len);
if (lancer_chip(adapter))
lancer_cmd_read_file(adapter, LANCER_FW_DUMP_FILE,
- regs->len, buf);
+ regs->len, buf);
else
be_cmd_get_regs(adapter, regs->len, buf);
}
@@ -337,9 +336,8 @@ static int be_set_coalesce(struct net_device *netdev,
return 0;
}
-static void
-be_get_ethtool_stats(struct net_device *netdev,
- struct ethtool_stats *stats, uint64_t *data)
+static void be_get_ethtool_stats(struct net_device *netdev,
+ struct ethtool_stats *stats, uint64_t *data)
{
struct be_adapter *adapter = netdev_priv(netdev);
struct be_rx_obj *rxo;
@@ -390,9 +388,8 @@ be_get_ethtool_stats(struct net_device *netdev,
}
}
-static void
-be_get_stat_strings(struct net_device *netdev, uint32_t stringset,
- uint8_t *data)
+static void be_get_stat_strings(struct net_device *netdev, uint32_t stringset,
+ uint8_t *data)
{
struct be_adapter *adapter = netdev_priv(netdev);
int i, j;
@@ -642,16 +639,15 @@ be_set_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *ecmd)
adapter->rx_fc = ecmd->rx_pause;
status = be_cmd_set_flow_control(adapter,
- adapter->tx_fc, adapter->rx_fc);
+ adapter->tx_fc, adapter->rx_fc);
if (status)
dev_warn(&adapter->pdev->dev, "Pause param set failed.\n");
return status;
}
-static int
-be_set_phys_id(struct net_device *netdev,
- enum ethtool_phys_id_state state)
+static int be_set_phys_id(struct net_device *netdev,
+ enum ethtool_phys_id_state state)
{
struct be_adapter *adapter = netdev_priv(netdev);
@@ -708,8 +704,7 @@ static int be_set_dump(struct net_device *netdev, struct ethtool_dump *dump)
return status;
}
-static void
-be_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
+static void be_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
{
struct be_adapter *adapter = netdev_priv(netdev);
@@ -723,8 +718,7 @@ be_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
memset(&wol->sopass, 0, sizeof(wol->sopass));
}
-static int
-be_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
+static int be_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
{
struct be_adapter *adapter = netdev_priv(netdev);
@@ -744,8 +738,7 @@ be_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
return 0;
}
-static int
-be_test_ddr_dma(struct be_adapter *adapter)
+static int be_test_ddr_dma(struct be_adapter *adapter)
{
int ret, i;
struct be_dma_mem ddrdma_cmd;
@@ -761,7 +754,7 @@ be_test_ddr_dma(struct be_adapter *adapter)
for (i = 0; i < 2; i++) {
ret = be_cmd_ddr_dma_test(adapter, pattern[i],
- 4096, &ddrdma_cmd);
+ 4096, &ddrdma_cmd);
if (ret != 0)
goto err;
}
@@ -773,20 +766,17 @@ err:
}
static u64 be_loopback_test(struct be_adapter *adapter, u8 loopback_type,
- u64 *status)
+ u64 *status)
{
- be_cmd_set_loopback(adapter, adapter->hba_port_num,
- loopback_type, 1);
+ be_cmd_set_loopback(adapter, adapter->hba_port_num, loopback_type, 1);
*status = be_cmd_loopback_test(adapter, adapter->hba_port_num,
- loopback_type, 1500,
- 2, 0xabc);
- be_cmd_set_loopback(adapter, adapter->hba_port_num,
- BE_NO_LOOPBACK, 1);
+ loopback_type, 1500, 2, 0xabc);
+ be_cmd_set_loopback(adapter, adapter->hba_port_num, BE_NO_LOOPBACK, 1);
return *status;
}
-static void
-be_self_test(struct net_device *netdev, struct ethtool_test *test, u64 *data)
+static void be_self_test(struct net_device *netdev, struct ethtool_test *test,
+ u64 *data)
{
struct be_adapter *adapter = netdev_priv(netdev);
int status;
@@ -801,12 +791,10 @@ be_self_test(struct net_device *netdev, struct ethtool_test *test, u64 *data)
memset(data, 0, sizeof(u64) * ETHTOOL_TESTS_NUM);
if (test->flags & ETH_TEST_FL_OFFLINE) {
- if (be_loopback_test(adapter, BE_MAC_LOOPBACK,
- &data[0]) != 0)
+ if (be_loopback_test(adapter, BE_MAC_LOOPBACK, &data[0]) != 0)
test->flags |= ETH_TEST_FL_FAILED;
- if (be_loopback_test(adapter, BE_PHY_LOOPBACK,
- &data[1]) != 0)
+ if (be_loopback_test(adapter, BE_PHY_LOOPBACK, &data[1]) != 0)
test->flags |= ETH_TEST_FL_FAILED;
if (test->flags & ETH_TEST_FL_EXTERNAL_LB) {
@@ -832,16 +820,14 @@ be_self_test(struct net_device *netdev, struct ethtool_test *test, u64 *data)
}
}
-static int
-be_do_flash(struct net_device *netdev, struct ethtool_flash *efl)
+static int be_do_flash(struct net_device *netdev, struct ethtool_flash *efl)
{
struct be_adapter *adapter = netdev_priv(netdev);
return be_load_fw(adapter, efl->data);
}
-static int
-be_get_eeprom_len(struct net_device *netdev)
+static int be_get_eeprom_len(struct net_device *netdev)
{
struct be_adapter *adapter = netdev_priv(netdev);
@@ -851,18 +837,17 @@ be_get_eeprom_len(struct net_device *netdev)
if (lancer_chip(adapter)) {
if (be_physfn(adapter))
return lancer_cmd_get_file_len(adapter,
- LANCER_VPD_PF_FILE);
+ LANCER_VPD_PF_FILE);
else
return lancer_cmd_get_file_len(adapter,
- LANCER_VPD_VF_FILE);
+ LANCER_VPD_VF_FILE);
} else {
return BE_READ_SEEPROM_LEN;
}
}
-static int
-be_read_eeprom(struct net_device *netdev, struct ethtool_eeprom *eeprom,
- uint8_t *data)
+static int be_read_eeprom(struct net_device *netdev,
+ struct ethtool_eeprom *eeprom, uint8_t *data)
{
struct be_adapter *adapter = netdev_priv(netdev);
struct be_dma_mem eeprom_cmd;
@@ -875,10 +860,10 @@ be_read_eeprom(struct net_device *netdev, struct ethtool_eeprom *eeprom,
if (lancer_chip(adapter)) {
if (be_physfn(adapter))
return lancer_cmd_read_file(adapter, LANCER_VPD_PF_FILE,
- eeprom->len, data);
+ eeprom->len, data);
else
return lancer_cmd_read_file(adapter, LANCER_VPD_VF_FILE,
- eeprom->len, data);
+ eeprom->len, data);
}
eeprom->magic = BE_VENDOR_ID | (adapter->pdev->device<<16);
@@ -933,27 +918,27 @@ static u64 be_get_rss_hash_opts(struct be_adapter *adapter, u64 flow_type)
switch (flow_type) {
case TCP_V4_FLOW:
- if (adapter->rss_flags & RSS_ENABLE_IPV4)
+ if (adapter->rss_info.rss_flags & RSS_ENABLE_IPV4)
data |= RXH_IP_DST | RXH_IP_SRC;
- if (adapter->rss_flags & RSS_ENABLE_TCP_IPV4)
+ if (adapter->rss_info.rss_flags & RSS_ENABLE_TCP_IPV4)
data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
break;
case UDP_V4_FLOW:
- if (adapter->rss_flags & RSS_ENABLE_IPV4)
+ if (adapter->rss_info.rss_flags & RSS_ENABLE_IPV4)
data |= RXH_IP_DST | RXH_IP_SRC;
- if (adapter->rss_flags & RSS_ENABLE_UDP_IPV4)
+ if (adapter->rss_info.rss_flags & RSS_ENABLE_UDP_IPV4)
data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
break;
case TCP_V6_FLOW:
- if (adapter->rss_flags & RSS_ENABLE_IPV6)
+ if (adapter->rss_info.rss_flags & RSS_ENABLE_IPV6)
data |= RXH_IP_DST | RXH_IP_SRC;
- if (adapter->rss_flags & RSS_ENABLE_TCP_IPV6)
+ if (adapter->rss_info.rss_flags & RSS_ENABLE_TCP_IPV6)
data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
break;
case UDP_V6_FLOW:
- if (adapter->rss_flags & RSS_ENABLE_IPV6)
+ if (adapter->rss_info.rss_flags & RSS_ENABLE_IPV6)
data |= RXH_IP_DST | RXH_IP_SRC;
- if (adapter->rss_flags & RSS_ENABLE_UDP_IPV6)
+ if (adapter->rss_info.rss_flags & RSS_ENABLE_UDP_IPV6)
data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
break;
}
@@ -962,7 +947,7 @@ static u64 be_get_rss_hash_opts(struct be_adapter *adapter, u64 flow_type)
}
static int be_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd,
- u32 *rule_locs)
+ u32 *rule_locs)
{
struct be_adapter *adapter = netdev_priv(netdev);
@@ -992,7 +977,7 @@ static int be_set_rss_hash_opts(struct be_adapter *adapter,
struct be_rx_obj *rxo;
int status = 0, i, j;
u8 rsstable[128];
- u32 rss_flags = adapter->rss_flags;
+ u32 rss_flags = adapter->rss_info.rss_flags;
if (cmd->data != L3_RSS_FLAGS &&
cmd->data != (L3_RSS_FLAGS | L4_RSS_FLAGS))
@@ -1039,7 +1024,7 @@ static int be_set_rss_hash_opts(struct be_adapter *adapter,
return -EINVAL;
}
- if (rss_flags == adapter->rss_flags)
+ if (rss_flags == adapter->rss_info.rss_flags)
return status;
if (be_multi_rxq(adapter)) {
@@ -1051,9 +1036,11 @@ static int be_set_rss_hash_opts(struct be_adapter *adapter,
}
}
}
- status = be_cmd_rss_config(adapter, rsstable, rss_flags, 128);
+
+ status = be_cmd_rss_config(adapter, adapter->rss_info.rsstable,
+ rss_flags, 128, adapter->rss_info.rss_hkey);
if (!status)
- adapter->rss_flags = rss_flags;
+ adapter->rss_info.rss_flags = rss_flags;
return status;
}
@@ -1103,6 +1090,69 @@ static int be_set_channels(struct net_device *netdev,
return be_update_queues(adapter);
}
+static u32 be_get_rxfh_indir_size(struct net_device *netdev)
+{
+ return RSS_INDIR_TABLE_LEN;
+}
+
+static u32 be_get_rxfh_key_size(struct net_device *netdev)
+{
+ return RSS_HASH_KEY_LEN;
+}
+
+static int be_get_rxfh(struct net_device *netdev, u32 *indir, u8 *hkey)
+{
+ struct be_adapter *adapter = netdev_priv(netdev);
+ int i;
+ struct rss_info *rss = &adapter->rss_info;
+
+ if (indir) {
+ for (i = 0; i < RSS_INDIR_TABLE_LEN; i++)
+ indir[i] = rss->rss_queue[i];
+ }
+
+ if (hkey)
+ memcpy(hkey, rss->rss_hkey, RSS_HASH_KEY_LEN);
+
+ return 0;
+}
+
+static int be_set_rxfh(struct net_device *netdev, const u32 *indir,
+ const u8 *hkey)
+{
+ int rc = 0, i, j;
+ struct be_adapter *adapter = netdev_priv(netdev);
+ u8 rsstable[RSS_INDIR_TABLE_LEN];
+
+ if (indir) {
+ struct be_rx_obj *rxo;
+ for (i = 0; i < RSS_INDIR_TABLE_LEN; i++) {
+ j = indir[i];
+ rxo = &adapter->rx_obj[j];
+ rsstable[i] = rxo->rss_id;
+ adapter->rss_info.rss_queue[i] = j;
+ }
+ } else {
+ memcpy(rsstable, adapter->rss_info.rsstable,
+ RSS_INDIR_TABLE_LEN);
+ }
+
+ if (!hkey)
+ hkey = adapter->rss_info.rss_hkey;
+
+ rc = be_cmd_rss_config(adapter, rsstable,
+ adapter->rss_info.rss_flags,
+ RSS_INDIR_TABLE_LEN, hkey);
+ if (rc) {
+ adapter->rss_info.rss_flags = RSS_ENABLE_NONE;
+ return -EIO;
+ }
+ memcpy(adapter->rss_info.rss_hkey, hkey, RSS_HASH_KEY_LEN);
+ memcpy(adapter->rss_info.rsstable, rsstable,
+ RSS_INDIR_TABLE_LEN);
+ return 0;
+}
+
const struct ethtool_ops be_ethtool_ops = {
.get_settings = be_get_settings,
.get_drvinfo = be_get_drvinfo,
@@ -1129,6 +1179,10 @@ const struct ethtool_ops be_ethtool_ops = {
.self_test = be_self_test,
.get_rxnfc = be_get_rxnfc,
.set_rxnfc = be_set_rxnfc,
+ .get_rxfh_indir_size = be_get_rxfh_indir_size,
+ .get_rxfh_key_size = be_get_rxfh_key_size,
+ .get_rxfh = be_get_rxfh,
+ .set_rxfh = be_set_rxfh,
.get_channels = be_get_channels,
.set_channels = be_set_channels
};
diff --git a/drivers/net/ethernet/emulex/benet/be_hw.h b/drivers/net/ethernet/emulex/benet/be_hw.h
index 3bd198550edb..8840c64aaeca 100644
--- a/drivers/net/ethernet/emulex/benet/be_hw.h
+++ b/drivers/net/ethernet/emulex/benet/be_hw.h
@@ -188,10 +188,14 @@
#define OPTYPE_FCOE_FW_ACTIVE 10
#define OPTYPE_FCOE_FW_BACKUP 11
#define OPTYPE_NCSI_FW 13
+#define OPTYPE_REDBOOT_DIR 18
+#define OPTYPE_REDBOOT_CONFIG 19
+#define OPTYPE_SH_PHY_FW 21
+#define OPTYPE_FLASHISM_JUMPVECTOR 22
+#define OPTYPE_UFI_DIR 23
#define OPTYPE_PHY_FW 99
#define TN_8022 13
-#define ILLEGAL_IOCTL_REQ 2
#define FLASHROM_OPER_PHY_FLASH 9
#define FLASHROM_OPER_PHY_SAVE 10
#define FLASHROM_OPER_FLASH 1
@@ -250,6 +254,9 @@
#define IMAGE_FIRMWARE_BACKUP_FCoE 178
#define IMAGE_FIRMWARE_BACKUP_COMP_FCoE 179
#define IMAGE_FIRMWARE_PHY 192
+#define IMAGE_REDBOOT_DIR 208
+#define IMAGE_REDBOOT_CONFIG 209
+#define IMAGE_UFI_DIR 210
#define IMAGE_BOOT_CODE 224
/************* Rx Packet Type Encoding **************/
@@ -534,7 +541,8 @@ struct flash_section_entry {
u32 image_size;
u32 cksum;
u32 entry_point;
- u32 rsvd0;
+ u16 optype;
+ u16 rsvd0;
u32 rsvd1;
u8 ver_data[32];
} __packed;
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index dc19bc5dec77..6822b3d76d85 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -134,7 +134,7 @@ static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
}
static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
- u16 len, u16 entry_size)
+ u16 len, u16 entry_size)
{
struct be_dma_mem *mem = &q->dma_mem;
@@ -154,7 +154,7 @@ static void be_reg_intr_set(struct be_adapter *adapter, bool enable)
u32 reg, enabled;
pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
- &reg);
+ &reg);
enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
if (!enabled && enable)
@@ -165,7 +165,7 @@ static void be_reg_intr_set(struct be_adapter *adapter, bool enable)
return;
pci_write_config_dword(adapter->pdev,
- PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
+ PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
}
static void be_intr_set(struct be_adapter *adapter, bool enable)
@@ -206,12 +206,11 @@ static void be_txq_notify(struct be_adapter *adapter, struct be_tx_obj *txo,
}
static void be_eq_notify(struct be_adapter *adapter, u16 qid,
- bool arm, bool clear_int, u16 num_popped)
+ bool arm, bool clear_int, u16 num_popped)
{
u32 val = 0;
val |= qid & DB_EQ_RING_ID_MASK;
- val |= ((qid & DB_EQ_RING_ID_EXT_MASK) <<
- DB_EQ_RING_ID_EXT_MASK_SHIFT);
+ val |= ((qid & DB_EQ_RING_ID_EXT_MASK) << DB_EQ_RING_ID_EXT_MASK_SHIFT);
if (adapter->eeh_error)
return;
@@ -477,7 +476,7 @@ static void populate_be_v2_stats(struct be_adapter *adapter)
drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
- if (be_roce_supported(adapter)) {
+ if (be_roce_supported(adapter)) {
drvs->rx_roce_bytes_lsd = port_stats->roce_bytes_received_lsd;
drvs->rx_roce_bytes_msd = port_stats->roce_bytes_received_msd;
drvs->rx_roce_frames = port_stats->roce_frames_received;
@@ -491,8 +490,7 @@ static void populate_lancer_stats(struct be_adapter *adapter)
{
struct be_drv_stats *drvs = &adapter->drv_stats;
- struct lancer_pport_stats *pport_stats =
- pport_stats_from_cmd(adapter);
+ struct lancer_pport_stats *pport_stats = pport_stats_from_cmd(adapter);
be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
@@ -539,8 +537,7 @@ static void accumulate_16bit_val(u32 *acc, u16 val)
}
static void populate_erx_stats(struct be_adapter *adapter,
- struct be_rx_obj *rxo,
- u32 erx_stat)
+ struct be_rx_obj *rxo, u32 erx_stat)
{
if (!BEx_chip(adapter))
rx_stats(rxo)->rx_drops_no_frags = erx_stat;
@@ -579,7 +576,7 @@ void be_parse_stats(struct be_adapter *adapter)
}
static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
- struct rtnl_link_stats64 *stats)
+ struct rtnl_link_stats64 *stats)
{
struct be_adapter *adapter = netdev_priv(netdev);
struct be_drv_stats *drvs = &adapter->drv_stats;
@@ -660,7 +657,8 @@ void be_link_status_update(struct be_adapter *adapter, u8 link_status)
}
static void be_tx_stats_update(struct be_tx_obj *txo,
- u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped)
+ u32 wrb_cnt, u32 copied, u32 gso_segs,
+ bool stopped)
{
struct be_tx_stats *stats = tx_stats(txo);
@@ -676,7 +674,7 @@ static void be_tx_stats_update(struct be_tx_obj *txo,
/* Determine number of WRB entries needed to xmit data in an skb */
static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
- bool *dummy)
+ bool *dummy)
{
int cnt = (skb->len > skb->data_len);
@@ -704,7 +702,7 @@ static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
}
static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter,
- struct sk_buff *skb)
+ struct sk_buff *skb)
{
u8 vlan_prio;
u16 vlan_tag;
@@ -733,7 +731,8 @@ static u16 skb_ip_proto(struct sk_buff *skb)
}
static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
- struct sk_buff *skb, u32 wrb_cnt, u32 len, bool skip_hw_vlan)
+ struct sk_buff *skb, u32 wrb_cnt, u32 len,
+ bool skip_hw_vlan)
{
u16 vlan_tag, proto;
@@ -774,7 +773,7 @@ static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
}
static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
- bool unmap_single)
+ bool unmap_single)
{
dma_addr_t dma;
@@ -791,8 +790,8 @@ static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
}
static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq,
- struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb,
- bool skip_hw_vlan)
+ struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb,
+ bool skip_hw_vlan)
{
dma_addr_t busaddr;
int i, copied = 0;
@@ -821,8 +820,7 @@ static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq,
}
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
- const struct skb_frag_struct *frag =
- &skb_shinfo(skb)->frags[i];
+ const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
busaddr = skb_frag_dma_map(dev, frag, 0,
skb_frag_size(frag), DMA_TO_DEVICE);
if (dma_mapping_error(dev, busaddr))
@@ -927,8 +925,7 @@ static int be_vlan_tag_tx_chk(struct be_adapter *adapter, struct sk_buff *skb)
return vlan_tx_tag_present(skb) || adapter->pvid || adapter->qnq_vid;
}
-static int be_ipv6_tx_stall_chk(struct be_adapter *adapter,
- struct sk_buff *skb)
+static int be_ipv6_tx_stall_chk(struct be_adapter *adapter, struct sk_buff *skb)
{
return BE3_chip(adapter) && be_ipv6_exthdr_check(skb);
}
@@ -959,7 +956,7 @@ static struct sk_buff *be_lancer_xmit_workarounds(struct be_adapter *adapter,
*/
if (be_pvid_tagging_enabled(adapter) &&
veh->h_vlan_proto == htons(ETH_P_8021Q))
- *skip_hw_vlan = true;
+ *skip_hw_vlan = true;
/* HW has a bug wherein it will calculate CSUM for VLAN
* pkts even though it is disabled.
@@ -1077,16 +1074,15 @@ static int be_change_mtu(struct net_device *netdev, int new_mtu)
{
struct be_adapter *adapter = netdev_priv(netdev);
if (new_mtu < BE_MIN_MTU ||
- new_mtu > (BE_MAX_JUMBO_FRAME_SIZE -
- (ETH_HLEN + ETH_FCS_LEN))) {
+ new_mtu > (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN))) {
dev_info(&adapter->pdev->dev,
- "MTU must be between %d and %d bytes\n",
- BE_MIN_MTU,
- (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
+ "MTU must be between %d and %d bytes\n",
+ BE_MIN_MTU,
+ (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
return -EINVAL;
}
dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
- netdev->mtu, new_mtu);
+ netdev->mtu, new_mtu);
netdev->mtu = new_mtu;
return 0;
}
@@ -1098,7 +1094,7 @@ static int be_change_mtu(struct net_device *netdev, int new_mtu)
static int be_vid_config(struct be_adapter *adapter)
{
u16 vids[BE_NUM_VLANS_SUPPORTED];
- u16 num = 0, i;
+ u16 num = 0, i = 0;
int status = 0;
/* No need to further configure vids if in promiscuous mode */
@@ -1109,16 +1105,14 @@ static int be_vid_config(struct be_adapter *adapter)
goto set_vlan_promisc;
/* Construct VLAN Table to give to HW */
- for (i = 0; i < VLAN_N_VID; i++)
- if (adapter->vlan_tag[i])
- vids[num++] = cpu_to_le16(i);
-
- status = be_cmd_vlan_config(adapter, adapter->if_handle,
- vids, num, 0);
+ for_each_set_bit(i, adapter->vids, VLAN_N_VID)
+ vids[num++] = cpu_to_le16(i);
+ status = be_cmd_vlan_config(adapter, adapter->if_handle, vids, num);
if (status) {
/* Set to VLAN promisc mode as setting VLAN filter failed */
- if (status == MCC_ADDL_STS_INSUFFICIENT_RESOURCES)
+ if (addl_status(status) ==
+ MCC_ADDL_STATUS_INSUFFICIENT_RESOURCES)
goto set_vlan_promisc;
dev_err(&adapter->pdev->dev,
"Setting HW VLAN filtering failed.\n");
@@ -1160,16 +1154,16 @@ static int be_vlan_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
if (lancer_chip(adapter) && vid == 0)
return status;
- if (adapter->vlan_tag[vid])
+ if (test_bit(vid, adapter->vids))
return status;
- adapter->vlan_tag[vid] = 1;
+ set_bit(vid, adapter->vids);
adapter->vlans_added++;
status = be_vid_config(adapter);
if (status) {
adapter->vlans_added--;
- adapter->vlan_tag[vid] = 0;
+ clear_bit(vid, adapter->vids);
}
return status;
@@ -1184,12 +1178,12 @@ static int be_vlan_rem_vid(struct net_device *netdev, __be16 proto, u16 vid)
if (lancer_chip(adapter) && vid == 0)
goto ret;
- adapter->vlan_tag[vid] = 0;
+ clear_bit(vid, adapter->vids);
status = be_vid_config(adapter);
if (!status)
adapter->vlans_added--;
else
- adapter->vlan_tag[vid] = 1;
+ set_bit(vid, adapter->vids);
ret:
return status;
}
@@ -1197,7 +1191,7 @@ ret:
static void be_clear_promisc(struct be_adapter *adapter)
{
adapter->promiscuous = false;
- adapter->flags &= ~BE_FLAGS_VLAN_PROMISC;
+ adapter->flags &= ~(BE_FLAGS_VLAN_PROMISC | BE_FLAGS_MCAST_PROMISC);
be_cmd_rx_filter(adapter, IFF_PROMISC, OFF);
}
@@ -1222,10 +1216,8 @@ static void be_set_rx_mode(struct net_device *netdev)
/* Enable multicast promisc if num configured exceeds what we support */
if (netdev->flags & IFF_ALLMULTI ||
- netdev_mc_count(netdev) > be_max_mc(adapter)) {
- be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
- goto done;
- }
+ netdev_mc_count(netdev) > be_max_mc(adapter))
+ goto set_mcast_promisc;
if (netdev_uc_count(netdev) != adapter->uc_macs) {
struct netdev_hw_addr *ha;
@@ -1251,13 +1243,22 @@ static void be_set_rx_mode(struct net_device *netdev)
}
status = be_cmd_rx_filter(adapter, IFF_MULTICAST, ON);
-
- /* Set to MCAST promisc mode if setting MULTICAST address fails */
- if (status) {
- dev_info(&adapter->pdev->dev, "Exhausted multicast HW filters.\n");
- dev_info(&adapter->pdev->dev, "Disabling HW multicast filtering.\n");
- be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
+ if (!status) {
+ if (adapter->flags & BE_FLAGS_MCAST_PROMISC)
+ adapter->flags &= ~BE_FLAGS_MCAST_PROMISC;
+ goto done;
}
+
+set_mcast_promisc:
+ if (adapter->flags & BE_FLAGS_MCAST_PROMISC)
+ return;
+
+ /* Set to MCAST promisc mode if setting MULTICAST address fails
+ * or if num configured exceeds what we support
+ */
+ status = be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
+ if (!status)
+ adapter->flags |= BE_FLAGS_MCAST_PROMISC;
done:
return;
}
@@ -1287,7 +1288,7 @@ static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
if (status)
dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
- mac, vf);
+ mac, vf);
else
memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
@@ -1295,7 +1296,7 @@ static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
}
static int be_get_vf_config(struct net_device *netdev, int vf,
- struct ifla_vf_info *vi)
+ struct ifla_vf_info *vi)
{
struct be_adapter *adapter = netdev_priv(netdev);
struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
@@ -1307,7 +1308,8 @@ static int be_get_vf_config(struct net_device *netdev, int vf,
return -EINVAL;
vi->vf = vf;
- vi->tx_rate = vf_cfg->tx_rate;
+ vi->max_tx_rate = vf_cfg->tx_rate;
+ vi->min_tx_rate = 0;
vi->vlan = vf_cfg->vlan_tag & VLAN_VID_MASK;
vi->qos = vf_cfg->vlan_tag >> VLAN_PRIO_SHIFT;
memcpy(&vi->mac, vf_cfg->mac_addr, ETH_ALEN);
@@ -1316,8 +1318,7 @@ static int be_get_vf_config(struct net_device *netdev, int vf,
return 0;
}
-static int be_set_vf_vlan(struct net_device *netdev,
- int vf, u16 vlan, u8 qos)
+static int be_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos)
{
struct be_adapter *adapter = netdev_priv(netdev);
struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
@@ -1348,11 +1349,14 @@ static int be_set_vf_vlan(struct net_device *netdev,
return status;
}
-static int be_set_vf_tx_rate(struct net_device *netdev,
- int vf, int rate)
+static int be_set_vf_tx_rate(struct net_device *netdev, int vf,
+ int min_tx_rate, int max_tx_rate)
{
struct be_adapter *adapter = netdev_priv(netdev);
- int status = 0;
+ struct device *dev = &adapter->pdev->dev;
+ int percent_rate, status = 0;
+ u16 link_speed = 0;
+ u8 link_status;
if (!sriov_enabled(adapter))
return -EPERM;
@@ -1360,18 +1364,50 @@ static int be_set_vf_tx_rate(struct net_device *netdev,
if (vf >= adapter->num_vfs)
return -EINVAL;
- if (rate < 100 || rate > 10000) {
- dev_err(&adapter->pdev->dev,
- "tx rate must be between 100 and 10000 Mbps\n");
+ if (min_tx_rate)
return -EINVAL;
+
+ if (!max_tx_rate)
+ goto config_qos;
+
+ status = be_cmd_link_status_query(adapter, &link_speed,
+ &link_status, 0);
+ if (status)
+ goto err;
+
+ if (!link_status) {
+ dev_err(dev, "TX-rate setting not allowed when link is down\n");
+ status = -EPERM;
+ goto err;
+ }
+
+ if (max_tx_rate < 100 || max_tx_rate > link_speed) {
+ dev_err(dev, "TX-rate must be between 100 and %d Mbps\n",
+ link_speed);
+ status = -EINVAL;
+ goto err;
+ }
+
+ /* On Skyhawk the QOS setting must be done only as a % value */
+ percent_rate = link_speed / 100;
+ if (skyhawk_chip(adapter) && (max_tx_rate % percent_rate)) {
+ dev_err(dev, "TX-rate must be a multiple of %d Mbps\n",
+ percent_rate);
+ status = -EINVAL;
+ goto err;
}
- status = be_cmd_config_qos(adapter, rate / 10, vf + 1);
+config_qos:
+ status = be_cmd_config_qos(adapter, max_tx_rate, link_speed, vf + 1);
if (status)
- dev_err(&adapter->pdev->dev,
- "tx rate %d on VF %d failed\n", rate, vf);
- else
- adapter->vf_cfg[vf].tx_rate = rate;
+ goto err;
+
+ adapter->vf_cfg[vf].tx_rate = max_tx_rate;
+ return 0;
+
+err:
+ dev_err(dev, "TX-rate setting of %dMbps on VF%d failed\n",
+ max_tx_rate, vf);
return status;
}
static int be_set_vf_link_state(struct net_device *netdev, int vf,
@@ -1469,7 +1505,7 @@ modify_eqd:
}
static void be_rx_stats_update(struct be_rx_obj *rxo,
- struct be_rx_compl_info *rxcp)
+ struct be_rx_compl_info *rxcp)
{
struct be_rx_stats *stats = rx_stats(rxo);
@@ -1566,7 +1602,8 @@ static void skb_fill_rx_data(struct be_rx_obj *rxo, struct sk_buff *skb,
skb_frag_set_page(skb, 0, page_info->page);
skb_shinfo(skb)->frags[0].page_offset =
page_info->page_offset + hdr_len;
- skb_frag_size_set(&skb_shinfo(skb)->frags[0], curr_frag_len - hdr_len);
+ skb_frag_size_set(&skb_shinfo(skb)->frags[0],
+ curr_frag_len - hdr_len);
skb->data_len = curr_frag_len - hdr_len;
skb->truesize += rx_frag_size;
skb->tail += hdr_len;
@@ -1725,8 +1762,8 @@ static void be_parse_rx_compl_v1(struct be_eth_rx_compl *compl,
if (rxcp->vlanf) {
rxcp->qnq = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, qnq,
compl);
- rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vlan_tag,
- compl);
+ rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v1,
+ vlan_tag, compl);
}
rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, port, compl);
rxcp->tunneled =
@@ -1757,8 +1794,8 @@ static void be_parse_rx_compl_v0(struct be_eth_rx_compl *compl,
if (rxcp->vlanf) {
rxcp->qnq = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, qnq,
compl);
- rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vlan_tag,
- compl);
+ rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0,
+ vlan_tag, compl);
}
rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, port, compl);
rxcp->ip_frag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0,
@@ -1799,7 +1836,7 @@ static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
rxcp->vlan_tag = swab16(rxcp->vlan_tag);
if (adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK) &&
- !adapter->vlan_tag[rxcp->vlan_tag])
+ !test_bit(rxcp->vlan_tag, adapter->vids))
rxcp->vlanf = 0;
}
@@ -1915,7 +1952,7 @@ static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
}
static u16 be_tx_compl_process(struct be_adapter *adapter,
- struct be_tx_obj *txo, u16 last_index)
+ struct be_tx_obj *txo, u16 last_index)
{
struct be_queue_info *txq = &txo->q;
struct be_eth_wrb *wrb;
@@ -2122,7 +2159,7 @@ static int be_evt_queues_create(struct be_adapter *adapter)
eq = &eqo->q;
rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
- sizeof(struct be_eq_entry));
+ sizeof(struct be_eq_entry));
if (rc)
return rc;
@@ -2155,7 +2192,7 @@ static int be_mcc_queues_create(struct be_adapter *adapter)
cq = &adapter->mcc_obj.cq;
if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
- sizeof(struct be_mcc_compl)))
+ sizeof(struct be_mcc_compl)))
goto err;
/* Use the default EQ for MCC completions */
@@ -2275,7 +2312,7 @@ static int be_rx_cqs_create(struct be_adapter *adapter)
rxo->adapter = adapter;
cq = &rxo->cq;
rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
- sizeof(struct be_eth_rx_compl));
+ sizeof(struct be_eth_rx_compl));
if (rc)
return rc;
@@ -2339,7 +2376,7 @@ static inline bool do_gro(struct be_rx_compl_info *rxcp)
}
static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi,
- int budget, int polling)
+ int budget, int polling)
{
struct be_adapter *adapter = rxo->adapter;
struct be_queue_info *rx_cq = &rxo->cq;
@@ -2365,7 +2402,7 @@ static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi,
* promiscuous mode on some skews
*/
if (unlikely(rxcp->port != adapter->port_num &&
- !lancer_chip(adapter))) {
+ !lancer_chip(adapter))) {
be_rx_compl_discard(rxo, rxcp);
goto loop_continue;
}
@@ -2405,8 +2442,9 @@ static bool be_process_tx(struct be_adapter *adapter, struct be_tx_obj *txo,
if (!txcp)
break;
num_wrbs += be_tx_compl_process(adapter, txo,
- AMAP_GET_BITS(struct amap_eth_tx_compl,
- wrb_index, txcp));
+ AMAP_GET_BITS(struct
+ amap_eth_tx_compl,
+ wrb_index, txcp));
}
if (work_done) {
@@ -2416,7 +2454,7 @@ static bool be_process_tx(struct be_adapter *adapter, struct be_tx_obj *txo,
/* As Tx wrbs have been freed up, wake up netdev queue
* if it was stopped due to lack of tx wrbs. */
if (__netif_subqueue_stopped(adapter->netdev, idx) &&
- atomic_read(&txo->q.used) < txo->q.len / 2) {
+ atomic_read(&txo->q.used) < txo->q.len / 2) {
netif_wake_subqueue(adapter->netdev, idx);
}
@@ -2510,9 +2548,9 @@ void be_detect_error(struct be_adapter *adapter)
sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
sliport_err1 = ioread32(adapter->db +
- SLIPORT_ERROR1_OFFSET);
+ SLIPORT_ERROR1_OFFSET);
sliport_err2 = ioread32(adapter->db +
- SLIPORT_ERROR2_OFFSET);
+ SLIPORT_ERROR2_OFFSET);
adapter->hw_error = true;
/* Do not log error messages if its a FW reset */
if (sliport_err1 == SLIPORT_ERROR_FW_RESET1 &&
@@ -2531,13 +2569,13 @@ void be_detect_error(struct be_adapter *adapter)
}
} else {
pci_read_config_dword(adapter->pdev,
- PCICFG_UE_STATUS_LOW, &ue_lo);
+ PCICFG_UE_STATUS_LOW, &ue_lo);
pci_read_config_dword(adapter->pdev,
- PCICFG_UE_STATUS_HIGH, &ue_hi);
+ PCICFG_UE_STATUS_HIGH, &ue_hi);
pci_read_config_dword(adapter->pdev,
- PCICFG_UE_STATUS_LOW_MASK, &ue_lo_mask);
+ PCICFG_UE_STATUS_LOW_MASK, &ue_lo_mask);
pci_read_config_dword(adapter->pdev,
- PCICFG_UE_STATUS_HI_MASK, &ue_hi_mask);
+ PCICFG_UE_STATUS_HI_MASK, &ue_hi_mask);
ue_lo = (ue_lo & ~ue_lo_mask);
ue_hi = (ue_hi & ~ue_hi_mask);
@@ -2624,7 +2662,7 @@ fail:
}
static inline int be_msix_vec_get(struct be_adapter *adapter,
- struct be_eq_obj *eqo)
+ struct be_eq_obj *eqo)
{
return adapter->msix_entries[eqo->msix_idx].vector;
}
@@ -2648,7 +2686,7 @@ err_msix:
for (i--, eqo = &adapter->eq_obj[i]; i >= 0; i--, eqo--)
free_irq(be_msix_vec_get(adapter, eqo), eqo);
dev_warn(&adapter->pdev->dev, "MSIX Request IRQ failed - err %d\n",
- status);
+ status);
be_msix_disable(adapter);
return status;
}
@@ -2774,7 +2812,8 @@ static int be_rx_qs_create(struct be_adapter *adapter)
{
struct be_rx_obj *rxo;
int rc, i, j;
- u8 rsstable[128];
+ u8 rss_hkey[RSS_HASH_KEY_LEN];
+ struct rss_info *rss = &adapter->rss_info;
for_all_rx_queues(adapter, rxo, i) {
rc = be_queue_alloc(adapter, &rxo->q, RX_Q_LEN,
@@ -2799,31 +2838,36 @@ static int be_rx_qs_create(struct be_adapter *adapter)
}
if (be_multi_rxq(adapter)) {
- for (j = 0; j < 128; j += adapter->num_rx_qs - 1) {
+ for (j = 0; j < RSS_INDIR_TABLE_LEN;
+ j += adapter->num_rx_qs - 1) {
for_all_rss_queues(adapter, rxo, i) {
- if ((j + i) >= 128)
+ if ((j + i) >= RSS_INDIR_TABLE_LEN)
break;
- rsstable[j + i] = rxo->rss_id;
+ rss->rsstable[j + i] = rxo->rss_id;
+ rss->rss_queue[j + i] = i;
}
}
- adapter->rss_flags = RSS_ENABLE_TCP_IPV4 | RSS_ENABLE_IPV4 |
- RSS_ENABLE_TCP_IPV6 | RSS_ENABLE_IPV6;
+ rss->rss_flags = RSS_ENABLE_TCP_IPV4 | RSS_ENABLE_IPV4 |
+ RSS_ENABLE_TCP_IPV6 | RSS_ENABLE_IPV6;
if (!BEx_chip(adapter))
- adapter->rss_flags |= RSS_ENABLE_UDP_IPV4 |
- RSS_ENABLE_UDP_IPV6;
+ rss->rss_flags |= RSS_ENABLE_UDP_IPV4 |
+ RSS_ENABLE_UDP_IPV6;
} else {
/* Disable RSS, if only default RX Q is created */
- adapter->rss_flags = RSS_ENABLE_NONE;
+ rss->rss_flags = RSS_ENABLE_NONE;
}
- rc = be_cmd_rss_config(adapter, rsstable, adapter->rss_flags,
- 128);
+ get_random_bytes(rss_hkey, RSS_HASH_KEY_LEN);
+ rc = be_cmd_rss_config(adapter, rss->rsstable, rss->rss_flags,
+ 128, rss_hkey);
if (rc) {
- adapter->rss_flags = RSS_ENABLE_NONE;
+ rss->rss_flags = RSS_ENABLE_NONE;
return rc;
}
+ memcpy(rss->rss_hkey, rss_hkey, RSS_HASH_KEY_LEN);
+
/* First time posting */
for_all_rx_queues(adapter, rxo, i)
be_post_rx_frags(rxo, GFP_KERNEL);
@@ -2896,7 +2940,8 @@ static int be_setup_wol(struct be_adapter *adapter, bool enable)
if (enable) {
status = pci_write_config_dword(adapter->pdev,
- PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK);
+ PCICFG_PM_CONTROL_OFFSET,
+ PCICFG_PM_CONTROL_MASK);
if (status) {
dev_err(&adapter->pdev->dev,
"Could not enable Wake-on-lan\n");
@@ -2905,7 +2950,8 @@ static int be_setup_wol(struct be_adapter *adapter, bool enable)
return status;
}
status = be_cmd_enable_magic_wol(adapter,
- adapter->netdev->dev_addr, &cmd);
+ adapter->netdev->dev_addr,
+ &cmd);
pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
} else {
@@ -2944,7 +2990,8 @@ static int be_vf_eth_addr_config(struct be_adapter *adapter)
if (status)
dev_err(&adapter->pdev->dev,
- "Mac address assignment failed for VF %d\n", vf);
+ "Mac address assignment failed for VF %d\n",
+ vf);
else
memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
@@ -3086,9 +3133,11 @@ static int be_vfs_if_create(struct be_adapter *adapter)
/* If a FW profile exists, then cap_flags are updated */
en_flags = cap_flags & (BE_IF_FLAGS_UNTAGGED |
- BE_IF_FLAGS_BROADCAST | BE_IF_FLAGS_MULTICAST);
- status = be_cmd_if_create(adapter, cap_flags, en_flags,
- &vf_cfg->if_handle, vf + 1);
+ BE_IF_FLAGS_BROADCAST |
+ BE_IF_FLAGS_MULTICAST);
+ status =
+ be_cmd_if_create(adapter, cap_flags, en_flags,
+ &vf_cfg->if_handle, vf + 1);
if (status)
goto err;
}
@@ -3119,7 +3168,6 @@ static int be_vf_setup(struct be_adapter *adapter)
struct be_vf_cfg *vf_cfg;
int status, old_vfs, vf;
u32 privileges;
- u16 lnk_speed;
old_vfs = pci_num_vf(adapter->pdev);
if (old_vfs) {
@@ -3175,16 +3223,9 @@ static int be_vf_setup(struct be_adapter *adapter)
vf);
}
- /* BE3 FW, by default, caps VF TX-rate to 100mbps.
- * Allow full available bandwidth
- */
- if (BE3_chip(adapter) && !old_vfs)
- be_cmd_config_qos(adapter, 1000, vf + 1);
-
- status = be_cmd_link_status_query(adapter, &lnk_speed,
- NULL, vf + 1);
- if (!status)
- vf_cfg->tx_rate = lnk_speed;
+ /* Allow full available bandwidth */
+ if (!old_vfs)
+ be_cmd_config_qos(adapter, 0, 0, vf + 1);
if (!old_vfs) {
be_cmd_enable_vf(adapter, vf + 1);
@@ -3590,35 +3631,7 @@ static void be_netpoll(struct net_device *netdev)
}
#endif
-#define FW_FILE_HDR_SIGN "ServerEngines Corp. "
-static char flash_cookie[2][16] = {"*** SE FLAS", "H DIRECTORY *** "};
-
-static bool be_flash_redboot(struct be_adapter *adapter,
- const u8 *p, u32 img_start, int image_size,
- int hdr_size)
-{
- u32 crc_offset;
- u8 flashed_crc[4];
- int status;
-
- crc_offset = hdr_size + img_start + image_size - 4;
-
- p += crc_offset;
-
- status = be_cmd_get_flash_crc(adapter, flashed_crc,
- (image_size - 4));
- if (status) {
- dev_err(&adapter->pdev->dev,
- "could not get crc from flash, not flashing redboot\n");
- return false;
- }
-
- /*update redboot only if crc does not match*/
- if (!memcmp(flashed_crc, p, 4))
- return false;
- else
- return true;
-}
+static char flash_cookie[2][16] = {"*** SE FLAS", "H DIRECTORY *** "};
static bool phy_flashing_required(struct be_adapter *adapter)
{
@@ -3649,8 +3662,8 @@ static bool is_comp_in_ufi(struct be_adapter *adapter,
}
static struct flash_section_info *get_fsec_info(struct be_adapter *adapter,
- int header_size,
- const struct firmware *fw)
+ int header_size,
+ const struct firmware *fw)
{
struct flash_section_info *fsec = NULL;
const u8 *p = fw->data;
@@ -3665,12 +3678,35 @@ static struct flash_section_info *get_fsec_info(struct be_adapter *adapter,
return NULL;
}
+static int be_check_flash_crc(struct be_adapter *adapter, const u8 *p,
+ u32 img_offset, u32 img_size, int hdr_size,
+ u16 img_optype, bool *crc_match)
+{
+ u32 crc_offset;
+ int status;
+ u8 crc[4];
+
+ status = be_cmd_get_flash_crc(adapter, crc, img_optype, img_size - 4);
+ if (status)
+ return status;
+
+ crc_offset = hdr_size + img_offset + img_size - 4;
+
+ /* Skip flashing, if crc of flashed region matches */
+ if (!memcmp(crc, p + crc_offset, 4))
+ *crc_match = true;
+ else
+ *crc_match = false;
+
+ return status;
+}
+
static int be_flash(struct be_adapter *adapter, const u8 *img,
- struct be_dma_mem *flash_cmd, int optype, int img_size)
+ struct be_dma_mem *flash_cmd, int optype, int img_size)
{
- u32 total_bytes = 0, flash_op, num_bytes = 0;
- int status = 0;
struct be_cmd_write_flashrom *req = flash_cmd->va;
+ u32 total_bytes, flash_op, num_bytes;
+ int status;
total_bytes = img_size;
while (total_bytes) {
@@ -3693,32 +3729,28 @@ static int be_flash(struct be_adapter *adapter, const u8 *img,
memcpy(req->data_buf, img, num_bytes);
img += num_bytes;
status = be_cmd_write_flashrom(adapter, flash_cmd, optype,
- flash_op, num_bytes);
- if (status) {
- if (status == ILLEGAL_IOCTL_REQ &&
- optype == OPTYPE_PHY_FW)
- break;
- dev_err(&adapter->pdev->dev,
- "cmd to write to flash rom failed.\n");
+ flash_op, num_bytes);
+ if (base_status(status) == MCC_STATUS_ILLEGAL_REQUEST &&
+ optype == OPTYPE_PHY_FW)
+ break;
+ else if (status)
return status;
- }
}
return 0;
}
/* For BE2, BE3 and BE3-R */
static int be_flash_BEx(struct be_adapter *adapter,
- const struct firmware *fw,
- struct be_dma_mem *flash_cmd,
- int num_of_images)
-
+ const struct firmware *fw,
+ struct be_dma_mem *flash_cmd, int num_of_images)
{
- int status = 0, i, filehdr_size = 0;
int img_hdrs_size = (num_of_images * sizeof(struct image_hdr));
- const u8 *p = fw->data;
- const struct flash_comp *pflashcomp;
- int num_comp, redboot;
+ struct device *dev = &adapter->pdev->dev;
struct flash_section_info *fsec = NULL;
+ int status, i, filehdr_size, num_comp;
+ const struct flash_comp *pflashcomp;
+ bool crc_match;
+ const u8 *p;
struct flash_comp gen3_flash_types[] = {
{ FLASH_iSCSI_PRIMARY_IMAGE_START_g3, OPTYPE_ISCSI_ACTIVE,
@@ -3775,8 +3807,7 @@ static int be_flash_BEx(struct be_adapter *adapter,
/* Get flash section info*/
fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
if (!fsec) {
- dev_err(&adapter->pdev->dev,
- "Invalid Cookie. UFI corrupted ?\n");
+ dev_err(dev, "Invalid Cookie. FW image may be corrupted\n");
return -1;
}
for (i = 0; i < num_comp; i++) {
@@ -3792,23 +3823,32 @@ static int be_flash_BEx(struct be_adapter *adapter,
continue;
if (pflashcomp[i].optype == OPTYPE_REDBOOT) {
- redboot = be_flash_redboot(adapter, fw->data,
- pflashcomp[i].offset, pflashcomp[i].size,
- filehdr_size + img_hdrs_size);
- if (!redboot)
+ status = be_check_flash_crc(adapter, fw->data,
+ pflashcomp[i].offset,
+ pflashcomp[i].size,
+ filehdr_size +
+ img_hdrs_size,
+ OPTYPE_REDBOOT, &crc_match);
+ if (status) {
+ dev_err(dev,
+ "Could not get CRC for 0x%x region\n",
+ pflashcomp[i].optype);
+ continue;
+ }
+
+ if (crc_match)
continue;
}
- p = fw->data;
- p += filehdr_size + pflashcomp[i].offset + img_hdrs_size;
+ p = fw->data + filehdr_size + pflashcomp[i].offset +
+ img_hdrs_size;
if (p + pflashcomp[i].size > fw->data + fw->size)
return -1;
status = be_flash(adapter, p, flash_cmd, pflashcomp[i].optype,
- pflashcomp[i].size);
+ pflashcomp[i].size);
if (status) {
- dev_err(&adapter->pdev->dev,
- "Flashing section type %d failed.\n",
+ dev_err(dev, "Flashing section type 0x%x failed\n",
pflashcomp[i].img_type);
return status;
}
@@ -3816,80 +3856,142 @@ static int be_flash_BEx(struct be_adapter *adapter,
return 0;
}
+static u16 be_get_img_optype(struct flash_section_entry fsec_entry)
+{
+ u32 img_type = le32_to_cpu(fsec_entry.type);
+ u16 img_optype = le16_to_cpu(fsec_entry.optype);
+
+ if (img_optype != 0xFFFF)
+ return img_optype;
+
+ switch (img_type) {
+ case IMAGE_FIRMWARE_iSCSI:
+ img_optype = OPTYPE_ISCSI_ACTIVE;
+ break;
+ case IMAGE_BOOT_CODE:
+ img_optype = OPTYPE_REDBOOT;
+ break;
+ case IMAGE_OPTION_ROM_ISCSI:
+ img_optype = OPTYPE_BIOS;
+ break;
+ case IMAGE_OPTION_ROM_PXE:
+ img_optype = OPTYPE_PXE_BIOS;
+ break;
+ case IMAGE_OPTION_ROM_FCoE:
+ img_optype = OPTYPE_FCOE_BIOS;
+ break;
+ case IMAGE_FIRMWARE_BACKUP_iSCSI:
+ img_optype = OPTYPE_ISCSI_BACKUP;
+ break;
+ case IMAGE_NCSI:
+ img_optype = OPTYPE_NCSI_FW;
+ break;
+ case IMAGE_FLASHISM_JUMPVECTOR:
+ img_optype = OPTYPE_FLASHISM_JUMPVECTOR;
+ break;
+ case IMAGE_FIRMWARE_PHY:
+ img_optype = OPTYPE_SH_PHY_FW;
+ break;
+ case IMAGE_REDBOOT_DIR:
+ img_optype = OPTYPE_REDBOOT_DIR;
+ break;
+ case IMAGE_REDBOOT_CONFIG:
+ img_optype = OPTYPE_REDBOOT_CONFIG;
+ break;
+ case IMAGE_UFI_DIR:
+ img_optype = OPTYPE_UFI_DIR;
+ break;
+ default:
+ break;
+ }
+
+ return img_optype;
+}
+
static int be_flash_skyhawk(struct be_adapter *adapter,
- const struct firmware *fw,
- struct be_dma_mem *flash_cmd, int num_of_images)
+ const struct firmware *fw,
+ struct be_dma_mem *flash_cmd, int num_of_images)
{
- int status = 0, i, filehdr_size = 0;
- int img_offset, img_size, img_optype, redboot;
int img_hdrs_size = num_of_images * sizeof(struct image_hdr);
- const u8 *p = fw->data;
+ struct device *dev = &adapter->pdev->dev;
struct flash_section_info *fsec = NULL;
+ u32 img_offset, img_size, img_type;
+ int status, i, filehdr_size;
+ bool crc_match, old_fw_img;
+ u16 img_optype;
+ const u8 *p;
filehdr_size = sizeof(struct flash_file_hdr_g3);
fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
if (!fsec) {
- dev_err(&adapter->pdev->dev,
- "Invalid Cookie. UFI corrupted ?\n");
+ dev_err(dev, "Invalid Cookie. FW image may be corrupted\n");
return -1;
}
for (i = 0; i < le32_to_cpu(fsec->fsec_hdr.num_images); i++) {
img_offset = le32_to_cpu(fsec->fsec_entry[i].offset);
img_size = le32_to_cpu(fsec->fsec_entry[i].pad_size);
+ img_type = le32_to_cpu(fsec->fsec_entry[i].type);
+ img_optype = be_get_img_optype(fsec->fsec_entry[i]);
+ old_fw_img = fsec->fsec_entry[i].optype == 0xFFFF;
- switch (le32_to_cpu(fsec->fsec_entry[i].type)) {
- case IMAGE_FIRMWARE_iSCSI:
- img_optype = OPTYPE_ISCSI_ACTIVE;
- break;
- case IMAGE_BOOT_CODE:
- img_optype = OPTYPE_REDBOOT;
- break;
- case IMAGE_OPTION_ROM_ISCSI:
- img_optype = OPTYPE_BIOS;
- break;
- case IMAGE_OPTION_ROM_PXE:
- img_optype = OPTYPE_PXE_BIOS;
- break;
- case IMAGE_OPTION_ROM_FCoE:
- img_optype = OPTYPE_FCOE_BIOS;
- break;
- case IMAGE_FIRMWARE_BACKUP_iSCSI:
- img_optype = OPTYPE_ISCSI_BACKUP;
- break;
- case IMAGE_NCSI:
- img_optype = OPTYPE_NCSI_FW;
- break;
- default:
+ if (img_optype == 0xFFFF)
continue;
+ /* Don't bother verifying CRC if an old FW image is being
+ * flashed
+ */
+ if (old_fw_img)
+ goto flash;
+
+ status = be_check_flash_crc(adapter, fw->data, img_offset,
+ img_size, filehdr_size +
+ img_hdrs_size, img_optype,
+ &crc_match);
+ /* The current FW image on the card does not recognize the new
+ * FLASH op_type. The FW download is partially complete.
+ * Reboot the server now to enable FW image to recognize the
+ * new FLASH op_type. To complete the remaining process,
+ * download the same FW again after the reboot.
+ */
+ if (base_status(status) == MCC_STATUS_ILLEGAL_REQUEST ||
+ base_status(status) == MCC_STATUS_ILLEGAL_FIELD) {
+ dev_err(dev, "Flash incomplete. Reset the server\n");
+ dev_err(dev, "Download FW image again after reset\n");
+ return -EAGAIN;
+ } else if (status) {
+ dev_err(dev, "Could not get CRC for 0x%x region\n",
+ img_optype);
+ return -EFAULT;
}
- if (img_optype == OPTYPE_REDBOOT) {
- redboot = be_flash_redboot(adapter, fw->data,
- img_offset, img_size,
- filehdr_size + img_hdrs_size);
- if (!redboot)
- continue;
- }
+ if (crc_match)
+ continue;
- p = fw->data;
- p += filehdr_size + img_offset + img_hdrs_size;
+flash:
+ p = fw->data + filehdr_size + img_offset + img_hdrs_size;
if (p + img_size > fw->data + fw->size)
return -1;
status = be_flash(adapter, p, flash_cmd, img_optype, img_size);
- if (status) {
- dev_err(&adapter->pdev->dev,
- "Flashing section type %d failed.\n",
- fsec->fsec_entry[i].type);
- return status;
+ /* For old FW images ignore ILLEGAL_FIELD error or errors on
+ * UFI_DIR region
+ */
+ if (old_fw_img &&
+ (base_status(status) == MCC_STATUS_ILLEGAL_FIELD ||
+ (img_optype == OPTYPE_UFI_DIR &&
+ base_status(status) == MCC_STATUS_FAILED))) {
+ continue;
+ } else if (status) {
+ dev_err(dev, "Flashing section type 0x%x failed\n",
+ img_type);
+ return -EFAULT;
}
}
return 0;
}
static int lancer_fw_download(struct be_adapter *adapter,
- const struct firmware *fw)
+ const struct firmware *fw)
{
#define LANCER_FW_DOWNLOAD_CHUNK (32 * 1024)
#define LANCER_FW_DOWNLOAD_LOCATION "/prg"
@@ -3955,7 +4057,7 @@ static int lancer_fw_download(struct be_adapter *adapter,
}
dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
- flash_cmd.dma);
+ flash_cmd.dma);
if (status) {
dev_err(&adapter->pdev->dev,
"Firmware load error. "
@@ -3976,9 +4078,8 @@ static int lancer_fw_download(struct be_adapter *adapter,
goto lancer_fw_exit;
}
} else if (change_status != LANCER_NO_RESET_NEEDED) {
- dev_err(&adapter->pdev->dev,
- "System reboot required for new FW"
- " to be active\n");
+ dev_err(&adapter->pdev->dev,
+ "System reboot required for new FW to be active\n");
}
dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
@@ -4042,7 +4143,7 @@ static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
switch (ufi_type) {
case UFI_TYPE4:
status = be_flash_skyhawk(adapter, fw,
- &flash_cmd, num_imgs);
+ &flash_cmd, num_imgs);
break;
case UFI_TYPE3R:
status = be_flash_BEx(adapter, fw, &flash_cmd,
@@ -4112,8 +4213,7 @@ fw_exit:
return status;
}
-static int be_ndo_bridge_setlink(struct net_device *dev,
- struct nlmsghdr *nlh)
+static int be_ndo_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh)
{
struct be_adapter *adapter = netdev_priv(dev);
struct nlattr *attr, *br_spec;
@@ -4155,8 +4255,7 @@ err:
}
static int be_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
- struct net_device *dev,
- u32 filter_mask)
+ struct net_device *dev, u32 filter_mask)
{
struct be_adapter *adapter = netdev_priv(dev);
int status = 0;
@@ -4254,7 +4353,7 @@ static const struct net_device_ops be_netdev_ops = {
.ndo_vlan_rx_kill_vid = be_vlan_rem_vid,
.ndo_set_vf_mac = be_set_vf_mac,
.ndo_set_vf_vlan = be_set_vf_vlan,
- .ndo_set_vf_tx_rate = be_set_vf_tx_rate,
+ .ndo_set_vf_rate = be_set_vf_tx_rate,
.ndo_get_vf_config = be_get_vf_config,
.ndo_set_vf_link_state = be_set_vf_link_state,
#ifdef CONFIG_NET_POLL_CONTROLLER
@@ -4301,7 +4400,7 @@ static void be_netdev_init(struct net_device *netdev)
netdev->netdev_ops = &be_netdev_ops;
- SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
+ netdev->ethtool_ops = &be_ethtool_ops;
}
static void be_unmap_pci_bars(struct be_adapter *adapter)
@@ -4870,7 +4969,7 @@ static void be_shutdown(struct pci_dev *pdev)
}
static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
- pci_channel_state_t state)
+ pci_channel_state_t state)
{
struct be_adapter *adapter = pci_get_drvdata(pdev);
struct net_device *netdev = adapter->netdev;
diff --git a/drivers/net/ethernet/ethoc.c b/drivers/net/ethernet/ethoc.c
index 8b70ca7e342b..f3658bdb64cc 100644
--- a/drivers/net/ethernet/ethoc.c
+++ b/drivers/net/ethernet/ethoc.c
@@ -769,11 +769,6 @@ static int ethoc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
return phy_mii_ioctl(phy, ifr, cmd);
}
-static int ethoc_config(struct net_device *dev, struct ifmap *map)
-{
- return -ENOSYS;
-}
-
static void ethoc_do_set_mac_address(struct net_device *dev)
{
struct ethoc *priv = netdev_priv(dev);
@@ -995,7 +990,6 @@ static const struct net_device_ops ethoc_netdev_ops = {
.ndo_open = ethoc_open,
.ndo_stop = ethoc_stop,
.ndo_do_ioctl = ethoc_ioctl,
- .ndo_set_config = ethoc_config,
.ndo_set_mac_address = ethoc_set_mac_address,
.ndo_set_rx_mode = ethoc_set_multicast_list,
.ndo_change_mtu = ethoc_change_mtu,
diff --git a/drivers/net/ethernet/faraday/ftgmac100.c b/drivers/net/ethernet/faraday/ftgmac100.c
index 68069eabc4f8..c77fa4a69844 100644
--- a/drivers/net/ethernet/faraday/ftgmac100.c
+++ b/drivers/net/ethernet/faraday/ftgmac100.c
@@ -1210,7 +1210,7 @@ static int ftgmac100_probe(struct platform_device *pdev)
SET_NETDEV_DEV(netdev, &pdev->dev);
- SET_ETHTOOL_OPS(netdev, &ftgmac100_ethtool_ops);
+ netdev->ethtool_ops = &ftgmac100_ethtool_ops;
netdev->netdev_ops = &ftgmac100_netdev_ops;
netdev->features = NETIF_F_IP_CSUM | NETIF_F_GRO;
diff --git a/drivers/net/ethernet/faraday/ftmac100.c b/drivers/net/ethernet/faraday/ftmac100.c
index 8be5b40c0a12..4ff1adc6bfca 100644
--- a/drivers/net/ethernet/faraday/ftmac100.c
+++ b/drivers/net/ethernet/faraday/ftmac100.c
@@ -1085,7 +1085,7 @@ static int ftmac100_probe(struct platform_device *pdev)
}
SET_NETDEV_DEV(netdev, &pdev->dev);
- SET_ETHTOOL_OPS(netdev, &ftmac100_ethtool_ops);
+ netdev->ethtool_ops = &ftmac100_ethtool_ops;
netdev->netdev_ops = &ftmac100_netdev_ops;
platform_set_drvdata(pdev, netdev);
diff --git a/drivers/net/ethernet/freescale/Kconfig b/drivers/net/ethernet/freescale/Kconfig
index 6048dc8604ee..270308315d43 100644
--- a/drivers/net/ethernet/freescale/Kconfig
+++ b/drivers/net/ethernet/freescale/Kconfig
@@ -67,6 +67,7 @@ config FSL_XGMAC_MDIO
tristate "Freescale XGMAC MDIO"
depends on FSL_SOC
select PHYLIB
+ select OF_MDIO
---help---
This driver supports the MDIO bus on the Fman 10G Ethernet MACs.
diff --git a/drivers/net/ethernet/freescale/fec.h b/drivers/net/ethernet/freescale/fec.h
index 3b8d6d19ff05..671d080105a7 100644
--- a/drivers/net/ethernet/freescale/fec.h
+++ b/drivers/net/ethernet/freescale/fec.h
@@ -221,7 +221,7 @@ struct bufdesc_ex {
#define BD_ENET_TX_RCMASK ((ushort)0x003c)
#define BD_ENET_TX_UN ((ushort)0x0002)
#define BD_ENET_TX_CSL ((ushort)0x0001)
-#define BD_ENET_TX_STATS ((ushort)0x03ff) /* All status bits */
+#define BD_ENET_TX_STATS ((ushort)0x0fff) /* All status bits */
/*enhanced buffer descriptor control/status used by Ethernet transmit*/
#define BD_ENET_TX_INT 0x40000000
@@ -246,8 +246,8 @@ struct bufdesc_ex {
#define RX_RING_SIZE (FEC_ENET_RX_FRPPG * FEC_ENET_RX_PAGES)
#define FEC_ENET_TX_FRSIZE 2048
#define FEC_ENET_TX_FRPPG (PAGE_SIZE / FEC_ENET_TX_FRSIZE)
-#define TX_RING_SIZE 16 /* Must be power of two */
-#define TX_RING_MOD_MASK 15 /* for this to work */
+#define TX_RING_SIZE 512 /* Must be power of two */
+#define TX_RING_MOD_MASK 511 /* for this to work */
#define BD_ENET_RX_INT 0x00800000
#define BD_ENET_RX_PTP ((ushort)0x0400)
@@ -296,8 +296,15 @@ struct fec_enet_private {
/* The ring entries to be free()ed */
struct bufdesc *dirty_tx;
+ unsigned short bufdesc_size;
unsigned short tx_ring_size;
unsigned short rx_ring_size;
+ unsigned short tx_stop_threshold;
+ unsigned short tx_wake_threshold;
+
+ /* Software TSO */
+ char *tso_hdrs;
+ dma_addr_t tso_hdrs_dma;
struct platform_device *pdev;
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index 8d69e439f0c5..38d9d276ab8b 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -36,6 +36,7 @@
#include <linux/in.h>
#include <linux/ip.h>
#include <net/ip.h>
+#include <net/tso.h>
#include <linux/tcp.h>
#include <linux/udp.h>
#include <linux/icmp.h>
@@ -54,6 +55,7 @@
#include <linux/of_net.h>
#include <linux/regulator/consumer.h>
#include <linux/if_vlan.h>
+#include <linux/pinctrl/consumer.h>
#include <asm/cacheflush.h>
@@ -172,10 +174,6 @@ MODULE_PARM_DESC(macaddr, "FEC Ethernet MAC address");
#endif
#endif /* CONFIG_M5272 */
-#if (((RX_RING_SIZE + TX_RING_SIZE) * 32) > PAGE_SIZE)
-#error "FEC: descriptor ring size constants too large"
-#endif
-
/* Interrupt events/masks. */
#define FEC_ENET_HBERR ((uint)0x80000000) /* Heartbeat error */
#define FEC_ENET_BABR ((uint)0x40000000) /* Babbling receiver */
@@ -231,6 +229,15 @@ MODULE_PARM_DESC(macaddr, "FEC Ethernet MAC address");
#define FEC_PAUSE_FLAG_AUTONEG 0x1
#define FEC_PAUSE_FLAG_ENABLE 0x2
+#define TSO_HEADER_SIZE 128
+/* Max number of allowed TCP segments for software TSO */
+#define FEC_MAX_TSO_SEGS 100
+#define FEC_MAX_SKB_DESCS (FEC_MAX_TSO_SEGS * 2 + MAX_SKB_FRAGS)
+
+#define IS_TSO_HEADER(txq, addr) \
+ ((addr >= txq->tso_hdrs_dma) && \
+ (addr < txq->tso_hdrs_dma + txq->tx_ring_size * TSO_HEADER_SIZE))
+
static int mii_cnt;
static inline
@@ -286,6 +293,22 @@ struct bufdesc *fec_enet_get_prevdesc(struct bufdesc *bdp, struct fec_enet_priva
return (new_bd < base) ? (new_bd + ring_size) : new_bd;
}
+static int fec_enet_get_bd_index(struct bufdesc *base, struct bufdesc *bdp,
+ struct fec_enet_private *fep)
+{
+ return ((const char *)bdp - (const char *)base) / fep->bufdesc_size;
+}
+
+static int fec_enet_get_free_txdesc_num(struct fec_enet_private *fep)
+{
+ int entries;
+
+ entries = ((const char *)fep->dirty_tx -
+ (const char *)fep->cur_tx) / fep->bufdesc_size - 1;
+
+ return entries > 0 ? entries : entries + fep->tx_ring_size;
+}
+
static void *swap_buffer(void *bufaddr, int len)
{
int i;
@@ -307,33 +330,133 @@ fec_enet_clear_csum(struct sk_buff *skb, struct net_device *ndev)
if (unlikely(skb_cow_head(skb, 0)))
return -1;
+ ip_hdr(skb)->check = 0;
*(__sum16 *)(skb->head + skb->csum_start + skb->csum_offset) = 0;
return 0;
}
-static netdev_tx_t
-fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
+static void
+fec_enet_submit_work(struct bufdesc *bdp, struct fec_enet_private *fep)
+{
+ const struct platform_device_id *id_entry =
+ platform_get_device_id(fep->pdev);
+ struct bufdesc *bdp_pre;
+
+ bdp_pre = fec_enet_get_prevdesc(bdp, fep);
+ if ((id_entry->driver_data & FEC_QUIRK_ERR006358) &&
+ !(bdp_pre->cbd_sc & BD_ENET_TX_READY)) {
+ fep->delay_work.trig_tx = true;
+ schedule_delayed_work(&(fep->delay_work.delay_work),
+ msecs_to_jiffies(1));
+ }
+}
+
+static int
+fec_enet_txq_submit_frag_skb(struct sk_buff *skb, struct net_device *ndev)
{
struct fec_enet_private *fep = netdev_priv(ndev);
const struct platform_device_id *id_entry =
platform_get_device_id(fep->pdev);
- struct bufdesc *bdp, *bdp_pre;
- void *bufaddr;
- unsigned short status;
+ struct bufdesc *bdp = fep->cur_tx;
+ struct bufdesc_ex *ebdp;
+ int nr_frags = skb_shinfo(skb)->nr_frags;
+ int frag, frag_len;
+ unsigned short status;
+ unsigned int estatus = 0;
+ skb_frag_t *this_frag;
unsigned int index;
+ void *bufaddr;
+ int i;
- /* Fill in a Tx ring entry */
+ for (frag = 0; frag < nr_frags; frag++) {
+ this_frag = &skb_shinfo(skb)->frags[frag];
+ bdp = fec_enet_get_nextdesc(bdp, fep);
+ ebdp = (struct bufdesc_ex *)bdp;
+
+ status = bdp->cbd_sc;
+ status &= ~BD_ENET_TX_STATS;
+ status |= (BD_ENET_TX_TC | BD_ENET_TX_READY);
+ frag_len = skb_shinfo(skb)->frags[frag].size;
+
+ /* Handle the last BD specially */
+ if (frag == nr_frags - 1) {
+ status |= (BD_ENET_TX_INTR | BD_ENET_TX_LAST);
+ if (fep->bufdesc_ex) {
+ estatus |= BD_ENET_TX_INT;
+ if (unlikely(skb_shinfo(skb)->tx_flags &
+ SKBTX_HW_TSTAMP && fep->hwts_tx_en))
+ estatus |= BD_ENET_TX_TS;
+ }
+ }
+
+ if (fep->bufdesc_ex) {
+ if (skb->ip_summed == CHECKSUM_PARTIAL)
+ estatus |= BD_ENET_TX_PINS | BD_ENET_TX_IINS;
+ ebdp->cbd_bdu = 0;
+ ebdp->cbd_esc = estatus;
+ }
+
+ bufaddr = page_address(this_frag->page.p) + this_frag->page_offset;
+
+ index = fec_enet_get_bd_index(fep->tx_bd_base, bdp, fep);
+ if (((unsigned long) bufaddr) & FEC_ALIGNMENT ||
+ id_entry->driver_data & FEC_QUIRK_SWAP_FRAME) {
+ memcpy(fep->tx_bounce[index], bufaddr, frag_len);
+ bufaddr = fep->tx_bounce[index];
+
+ if (id_entry->driver_data & FEC_QUIRK_SWAP_FRAME)
+ swap_buffer(bufaddr, frag_len);
+ }
+
+ bdp->cbd_bufaddr = dma_map_single(&fep->pdev->dev, bufaddr,
+ frag_len, DMA_TO_DEVICE);
+ if (dma_mapping_error(&fep->pdev->dev, bdp->cbd_bufaddr)) {
+ dev_kfree_skb_any(skb);
+ if (net_ratelimit())
+ netdev_err(ndev, "Tx DMA memory map failed\n");
+ goto dma_mapping_error;
+ }
+
+ bdp->cbd_datlen = frag_len;
+ bdp->cbd_sc = status;
+ }
+
+ fep->cur_tx = bdp;
+
+ return 0;
+
+dma_mapping_error:
bdp = fep->cur_tx;
+ for (i = 0; i < frag; i++) {
+ bdp = fec_enet_get_nextdesc(bdp, fep);
+ dma_unmap_single(&fep->pdev->dev, bdp->cbd_bufaddr,
+ bdp->cbd_datlen, DMA_TO_DEVICE);
+ }
+ return NETDEV_TX_OK;
+}
- status = bdp->cbd_sc;
+static int fec_enet_txq_submit_skb(struct sk_buff *skb, struct net_device *ndev)
+{
+ struct fec_enet_private *fep = netdev_priv(ndev);
+ const struct platform_device_id *id_entry =
+ platform_get_device_id(fep->pdev);
+ int nr_frags = skb_shinfo(skb)->nr_frags;
+ struct bufdesc *bdp, *last_bdp;
+ void *bufaddr;
+ unsigned short status;
+ unsigned short buflen;
+ unsigned int estatus = 0;
+ unsigned int index;
+ int entries_free;
+ int ret;
- if (status & BD_ENET_TX_READY) {
- /* Ooops. All transmit buffers are full. Bail out.
- * This should not happen, since ndev->tbusy should be set.
- */
- netdev_err(ndev, "tx queue full!\n");
- return NETDEV_TX_BUSY;
+ entries_free = fec_enet_get_free_txdesc_num(fep);
+ if (entries_free < MAX_SKB_FRAGS + 1) {
+ dev_kfree_skb_any(skb);
+ if (net_ratelimit())
+ netdev_err(ndev, "NOT enough BD for SG!\n");
+ return NETDEV_TX_OK;
}
/* Protocol checksum off-load for TCP and UDP. */
@@ -342,102 +465,300 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
return NETDEV_TX_OK;
}
- /* Clear all of the status flags */
+ /* Fill in a Tx ring entry */
+ bdp = fep->cur_tx;
+ status = bdp->cbd_sc;
status &= ~BD_ENET_TX_STATS;
/* Set buffer length and buffer pointer */
bufaddr = skb->data;
- bdp->cbd_datlen = skb->len;
-
- /*
- * On some FEC implementations data must be aligned on
- * 4-byte boundaries. Use bounce buffers to copy data
- * and get it aligned. Ugh.
- */
- if (fep->bufdesc_ex)
- index = (struct bufdesc_ex *)bdp -
- (struct bufdesc_ex *)fep->tx_bd_base;
- else
- index = bdp - fep->tx_bd_base;
+ buflen = skb_headlen(skb);
- if (((unsigned long) bufaddr) & FEC_ALIGNMENT) {
- memcpy(fep->tx_bounce[index], skb->data, skb->len);
+ index = fec_enet_get_bd_index(fep->tx_bd_base, bdp, fep);
+ if (((unsigned long) bufaddr) & FEC_ALIGNMENT ||
+ id_entry->driver_data & FEC_QUIRK_SWAP_FRAME) {
+ memcpy(fep->tx_bounce[index], skb->data, buflen);
bufaddr = fep->tx_bounce[index];
- }
- /*
- * Some design made an incorrect assumption on endian mode of
- * the system that it's running on. As the result, driver has to
- * swap every frame going to and coming from the controller.
- */
- if (id_entry->driver_data & FEC_QUIRK_SWAP_FRAME)
- swap_buffer(bufaddr, skb->len);
-
- /* Save skb pointer */
- fep->tx_skbuff[index] = skb;
+ if (id_entry->driver_data & FEC_QUIRK_SWAP_FRAME)
+ swap_buffer(bufaddr, buflen);
+ }
/* Push the data cache so the CPM does not get stale memory
* data.
*/
bdp->cbd_bufaddr = dma_map_single(&fep->pdev->dev, bufaddr,
- skb->len, DMA_TO_DEVICE);
+ buflen, DMA_TO_DEVICE);
if (dma_mapping_error(&fep->pdev->dev, bdp->cbd_bufaddr)) {
- bdp->cbd_bufaddr = 0;
- fep->tx_skbuff[index] = NULL;
dev_kfree_skb_any(skb);
if (net_ratelimit())
netdev_err(ndev, "Tx DMA memory map failed\n");
return NETDEV_TX_OK;
}
+ if (nr_frags) {
+ ret = fec_enet_txq_submit_frag_skb(skb, ndev);
+ if (ret)
+ return ret;
+ } else {
+ status |= (BD_ENET_TX_INTR | BD_ENET_TX_LAST);
+ if (fep->bufdesc_ex) {
+ estatus = BD_ENET_TX_INT;
+ if (unlikely(skb_shinfo(skb)->tx_flags &
+ SKBTX_HW_TSTAMP && fep->hwts_tx_en))
+ estatus |= BD_ENET_TX_TS;
+ }
+ }
+
if (fep->bufdesc_ex) {
struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp;
- ebdp->cbd_bdu = 0;
+
if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP &&
- fep->hwts_tx_en)) {
- ebdp->cbd_esc = (BD_ENET_TX_TS | BD_ENET_TX_INT);
+ fep->hwts_tx_en))
skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
- } else {
- ebdp->cbd_esc = BD_ENET_TX_INT;
- /* Enable protocol checksum flags
- * We do not bother with the IP Checksum bits as they
- * are done by the kernel
- */
- if (skb->ip_summed == CHECKSUM_PARTIAL)
- ebdp->cbd_esc |= BD_ENET_TX_PINS;
- }
+ if (skb->ip_summed == CHECKSUM_PARTIAL)
+ estatus |= BD_ENET_TX_PINS | BD_ENET_TX_IINS;
+
+ ebdp->cbd_bdu = 0;
+ ebdp->cbd_esc = estatus;
}
+ last_bdp = fep->cur_tx;
+ index = fec_enet_get_bd_index(fep->tx_bd_base, last_bdp, fep);
+ /* Save skb pointer */
+ fep->tx_skbuff[index] = skb;
+
+ bdp->cbd_datlen = buflen;
+
/* Send it on its way. Tell FEC it's ready, interrupt when done,
* it's the last BD of the frame, and to put the CRC on the end.
*/
- status |= (BD_ENET_TX_READY | BD_ENET_TX_INTR
- | BD_ENET_TX_LAST | BD_ENET_TX_TC);
+ status |= (BD_ENET_TX_READY | BD_ENET_TX_TC);
bdp->cbd_sc = status;
- bdp_pre = fec_enet_get_prevdesc(bdp, fep);
- if ((id_entry->driver_data & FEC_QUIRK_ERR006358) &&
- !(bdp_pre->cbd_sc & BD_ENET_TX_READY)) {
- fep->delay_work.trig_tx = true;
- schedule_delayed_work(&(fep->delay_work.delay_work),
- msecs_to_jiffies(1));
- }
+ fec_enet_submit_work(bdp, fep);
/* If this was the last BD in the ring, start at the beginning again. */
- bdp = fec_enet_get_nextdesc(bdp, fep);
+ bdp = fec_enet_get_nextdesc(last_bdp, fep);
skb_tx_timestamp(skb);
fep->cur_tx = bdp;
- if (fep->cur_tx == fep->dirty_tx)
- netif_stop_queue(ndev);
+ /* Trigger transmission start */
+ writel(0, fep->hwp + FEC_X_DES_ACTIVE);
+
+ return 0;
+}
+
+static int
+fec_enet_txq_put_data_tso(struct sk_buff *skb, struct net_device *ndev,
+ struct bufdesc *bdp, int index, char *data,
+ int size, bool last_tcp, bool is_last)
+{
+ struct fec_enet_private *fep = netdev_priv(ndev);
+ const struct platform_device_id *id_entry =
+ platform_get_device_id(fep->pdev);
+ struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp;
+ unsigned short status;
+ unsigned int estatus = 0;
+
+ status = bdp->cbd_sc;
+ status &= ~BD_ENET_TX_STATS;
+
+ status |= (BD_ENET_TX_TC | BD_ENET_TX_READY);
+ bdp->cbd_datlen = size;
+
+ if (((unsigned long) data) & FEC_ALIGNMENT ||
+ id_entry->driver_data & FEC_QUIRK_SWAP_FRAME) {
+ memcpy(fep->tx_bounce[index], data, size);
+ data = fep->tx_bounce[index];
+
+ if (id_entry->driver_data & FEC_QUIRK_SWAP_FRAME)
+ swap_buffer(data, size);
+ }
+
+ bdp->cbd_bufaddr = dma_map_single(&fep->pdev->dev, data,
+ size, DMA_TO_DEVICE);
+ if (dma_mapping_error(&fep->pdev->dev, bdp->cbd_bufaddr)) {
+ dev_kfree_skb_any(skb);
+ if (net_ratelimit())
+ netdev_err(ndev, "Tx DMA memory map failed\n");
+ return NETDEV_TX_BUSY;
+ }
+
+ if (fep->bufdesc_ex) {
+ if (skb->ip_summed == CHECKSUM_PARTIAL)
+ estatus |= BD_ENET_TX_PINS | BD_ENET_TX_IINS;
+ ebdp->cbd_bdu = 0;
+ ebdp->cbd_esc = estatus;
+ }
+
+ /* Handle the last BD specially */
+ if (last_tcp)
+ status |= (BD_ENET_TX_LAST | BD_ENET_TX_TC);
+ if (is_last) {
+ status |= BD_ENET_TX_INTR;
+ if (fep->bufdesc_ex)
+ ebdp->cbd_esc |= BD_ENET_TX_INT;
+ }
+
+ bdp->cbd_sc = status;
+
+ return 0;
+}
+
+static int
+fec_enet_txq_put_hdr_tso(struct sk_buff *skb, struct net_device *ndev,
+ struct bufdesc *bdp, int index)
+{
+ struct fec_enet_private *fep = netdev_priv(ndev);
+ const struct platform_device_id *id_entry =
+ platform_get_device_id(fep->pdev);
+ int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
+ struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp;
+ void *bufaddr;
+ unsigned long dmabuf;
+ unsigned short status;
+ unsigned int estatus = 0;
+
+ status = bdp->cbd_sc;
+ status &= ~BD_ENET_TX_STATS;
+ status |= (BD_ENET_TX_TC | BD_ENET_TX_READY);
+
+ bufaddr = fep->tso_hdrs + index * TSO_HEADER_SIZE;
+ dmabuf = fep->tso_hdrs_dma + index * TSO_HEADER_SIZE;
+ if (((unsigned long) bufaddr) & FEC_ALIGNMENT ||
+ id_entry->driver_data & FEC_QUIRK_SWAP_FRAME) {
+ memcpy(fep->tx_bounce[index], skb->data, hdr_len);
+ bufaddr = fep->tx_bounce[index];
+
+ if (id_entry->driver_data & FEC_QUIRK_SWAP_FRAME)
+ swap_buffer(bufaddr, hdr_len);
+
+ dmabuf = dma_map_single(&fep->pdev->dev, bufaddr,
+ hdr_len, DMA_TO_DEVICE);
+ if (dma_mapping_error(&fep->pdev->dev, dmabuf)) {
+ dev_kfree_skb_any(skb);
+ if (net_ratelimit())
+ netdev_err(ndev, "Tx DMA memory map failed\n");
+ return NETDEV_TX_BUSY;
+ }
+ }
+
+ bdp->cbd_bufaddr = dmabuf;
+ bdp->cbd_datlen = hdr_len;
+
+ if (fep->bufdesc_ex) {
+ if (skb->ip_summed == CHECKSUM_PARTIAL)
+ estatus |= BD_ENET_TX_PINS | BD_ENET_TX_IINS;
+ ebdp->cbd_bdu = 0;
+ ebdp->cbd_esc = estatus;
+ }
+
+ bdp->cbd_sc = status;
+
+ return 0;
+}
+
+static int fec_enet_txq_submit_tso(struct sk_buff *skb, struct net_device *ndev)
+{
+ struct fec_enet_private *fep = netdev_priv(ndev);
+ int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
+ int total_len, data_left;
+ struct bufdesc *bdp = fep->cur_tx;
+ struct tso_t tso;
+ unsigned int index = 0;
+ int ret;
+
+ if (tso_count_descs(skb) >= fec_enet_get_free_txdesc_num(fep)) {
+ dev_kfree_skb_any(skb);
+ if (net_ratelimit())
+ netdev_err(ndev, "NOT enough BD for TSO!\n");
+ return NETDEV_TX_OK;
+ }
+
+ /* Protocol checksum off-load for TCP and UDP. */
+ if (fec_enet_clear_csum(skb, ndev)) {
+ dev_kfree_skb_any(skb);
+ return NETDEV_TX_OK;
+ }
+
+ /* Initialize the TSO handler, and prepare the first payload */
+ tso_start(skb, &tso);
+
+ total_len = skb->len - hdr_len;
+ while (total_len > 0) {
+ char *hdr;
+
+ index = fec_enet_get_bd_index(fep->tx_bd_base, bdp, fep);
+ data_left = min_t(int, skb_shinfo(skb)->gso_size, total_len);
+ total_len -= data_left;
+
+ /* prepare packet headers: MAC + IP + TCP */
+ hdr = fep->tso_hdrs + index * TSO_HEADER_SIZE;
+ tso_build_hdr(skb, hdr, &tso, data_left, total_len == 0);
+ ret = fec_enet_txq_put_hdr_tso(skb, ndev, bdp, index);
+ if (ret)
+ goto err_release;
+
+ while (data_left > 0) {
+ int size;
+
+ size = min_t(int, tso.size, data_left);
+ bdp = fec_enet_get_nextdesc(bdp, fep);
+ index = fec_enet_get_bd_index(fep->tx_bd_base, bdp, fep);
+ ret = fec_enet_txq_put_data_tso(skb, ndev, bdp, index, tso.data,
+ size, size == data_left,
+ total_len == 0);
+ if (ret)
+ goto err_release;
+
+ data_left -= size;
+ tso_build_data(skb, &tso, size);
+ }
+
+ bdp = fec_enet_get_nextdesc(bdp, fep);
+ }
+
+ /* Save skb pointer */
+ fep->tx_skbuff[index] = skb;
+
+ fec_enet_submit_work(bdp, fep);
+
+ skb_tx_timestamp(skb);
+ fep->cur_tx = bdp;
/* Trigger transmission start */
writel(0, fep->hwp + FEC_X_DES_ACTIVE);
+ return 0;
+
+err_release:
+ /* TODO: Release all used data descriptors for TSO */
+ return ret;
+}
+
+static netdev_tx_t
+fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
+{
+ struct fec_enet_private *fep = netdev_priv(ndev);
+ int entries_free;
+ int ret;
+
+ if (skb_is_gso(skb))
+ ret = fec_enet_txq_submit_tso(skb, ndev);
+ else
+ ret = fec_enet_txq_submit_skb(skb, ndev);
+ if (ret)
+ return ret;
+
+ entries_free = fec_enet_get_free_txdesc_num(fep);
+ if (entries_free <= fep->tx_stop_threshold)
+ netif_stop_queue(ndev);
+
return NETDEV_TX_OK;
}
@@ -756,6 +1077,7 @@ fec_enet_tx(struct net_device *ndev)
unsigned short status;
struct sk_buff *skb;
int index = 0;
+ int entries_free;
fep = netdev_priv(ndev);
bdp = fep->dirty_tx;
@@ -769,16 +1091,17 @@ fec_enet_tx(struct net_device *ndev)
if (bdp == fep->cur_tx)
break;
- if (fep->bufdesc_ex)
- index = (struct bufdesc_ex *)bdp -
- (struct bufdesc_ex *)fep->tx_bd_base;
- else
- index = bdp - fep->tx_bd_base;
+ index = fec_enet_get_bd_index(fep->tx_bd_base, bdp, fep);
skb = fep->tx_skbuff[index];
- dma_unmap_single(&fep->pdev->dev, bdp->cbd_bufaddr, skb->len,
- DMA_TO_DEVICE);
+ if (!IS_TSO_HEADER(fep, bdp->cbd_bufaddr))
+ dma_unmap_single(&fep->pdev->dev, bdp->cbd_bufaddr,
+ bdp->cbd_datlen, DMA_TO_DEVICE);
bdp->cbd_bufaddr = 0;
+ if (!skb) {
+ bdp = fec_enet_get_nextdesc(bdp, fep);
+ continue;
+ }
/* Check for errors. */
if (status & (BD_ENET_TX_HB | BD_ENET_TX_LC |
@@ -797,7 +1120,7 @@ fec_enet_tx(struct net_device *ndev)
ndev->stats.tx_carrier_errors++;
} else {
ndev->stats.tx_packets++;
- ndev->stats.tx_bytes += bdp->cbd_datlen;
+ ndev->stats.tx_bytes += skb->len;
}
if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS) &&
@@ -834,15 +1157,15 @@ fec_enet_tx(struct net_device *ndev)
/* Since we have freed up a buffer, the ring is no longer full
*/
- if (fep->dirty_tx != fep->cur_tx) {
- if (netif_queue_stopped(ndev))
+ if (netif_queue_stopped(ndev)) {
+ entries_free = fec_enet_get_free_txdesc_num(fep);
+ if (entries_free >= fep->tx_wake_threshold)
netif_wake_queue(ndev);
}
}
return;
}
-
/* During a receive, the cur_rx points to the current incoming buffer.
* When we update through the ring, if the next incoming buffer has
* not been given to the system, we just set the empty indicator,
@@ -920,11 +1243,7 @@ fec_enet_rx(struct net_device *ndev, int budget)
pkt_len = bdp->cbd_datlen;
ndev->stats.rx_bytes += pkt_len;
- if (fep->bufdesc_ex)
- index = (struct bufdesc_ex *)bdp -
- (struct bufdesc_ex *)fep->rx_bd_base;
- else
- index = bdp - fep->rx_bd_base;
+ index = fec_enet_get_bd_index(fep->rx_bd_base, bdp, fep);
data = fep->rx_skbuff[index]->data;
dma_sync_single_for_cpu(&fep->pdev->dev, bdp->cbd_bufaddr,
FEC_ENET_RX_FRSIZE, DMA_FROM_DEVICE);
@@ -1255,6 +1574,49 @@ static int fec_enet_mdio_write(struct mii_bus *bus, int mii_id, int regnum,
return 0;
}
+static int fec_enet_clk_enable(struct net_device *ndev, bool enable)
+{
+ struct fec_enet_private *fep = netdev_priv(ndev);
+ int ret;
+
+ if (enable) {
+ ret = clk_prepare_enable(fep->clk_ahb);
+ if (ret)
+ return ret;
+ ret = clk_prepare_enable(fep->clk_ipg);
+ if (ret)
+ goto failed_clk_ipg;
+ if (fep->clk_enet_out) {
+ ret = clk_prepare_enable(fep->clk_enet_out);
+ if (ret)
+ goto failed_clk_enet_out;
+ }
+ if (fep->clk_ptp) {
+ ret = clk_prepare_enable(fep->clk_ptp);
+ if (ret)
+ goto failed_clk_ptp;
+ }
+ } else {
+ clk_disable_unprepare(fep->clk_ahb);
+ clk_disable_unprepare(fep->clk_ipg);
+ if (fep->clk_enet_out)
+ clk_disable_unprepare(fep->clk_enet_out);
+ if (fep->clk_ptp)
+ clk_disable_unprepare(fep->clk_ptp);
+ }
+
+ return 0;
+failed_clk_ptp:
+ if (fep->clk_enet_out)
+ clk_disable_unprepare(fep->clk_enet_out);
+failed_clk_enet_out:
+ clk_disable_unprepare(fep->clk_ipg);
+failed_clk_ipg:
+ clk_disable_unprepare(fep->clk_ahb);
+
+ return ret;
+}
+
static int fec_enet_mii_probe(struct net_device *ndev)
{
struct fec_enet_private *fep = netdev_priv(ndev);
@@ -1364,7 +1726,7 @@ static int fec_enet_mii_init(struct platform_device *pdev)
* Reference Manual has an error on this, and gets fixed on i.MX6Q
* document.
*/
- fep->phy_speed = DIV_ROUND_UP(clk_get_rate(fep->clk_ahb), 5000000);
+ fep->phy_speed = DIV_ROUND_UP(clk_get_rate(fep->clk_ipg), 5000000);
if (id_entry->driver_data & FEC_QUIRK_ENET_MAC)
fep->phy_speed--;
fep->phy_speed <<= 1;
@@ -1773,6 +2135,11 @@ fec_enet_open(struct net_device *ndev)
struct fec_enet_private *fep = netdev_priv(ndev);
int ret;
+ pinctrl_pm_select_default_state(&fep->pdev->dev);
+ ret = fec_enet_clk_enable(ndev, true);
+ if (ret)
+ return ret;
+
/* I should reset the ring buffers here, but I don't yet know
* a simple way to do that.
*/
@@ -1811,6 +2178,8 @@ fec_enet_close(struct net_device *ndev)
phy_disconnect(fep->phy_dev);
}
+ fec_enet_clk_enable(ndev, false);
+ pinctrl_pm_select_sleep_state(&fep->pdev->dev);
fec_enet_free_buffers(ndev);
return 0;
@@ -1988,13 +2357,35 @@ static int fec_enet_init(struct net_device *ndev)
const struct platform_device_id *id_entry =
platform_get_device_id(fep->pdev);
struct bufdesc *cbd_base;
+ int bd_size;
+
+ /* init the tx & rx ring size */
+ fep->tx_ring_size = TX_RING_SIZE;
+ fep->rx_ring_size = RX_RING_SIZE;
+
+ fep->tx_stop_threshold = FEC_MAX_SKB_DESCS;
+ fep->tx_wake_threshold = (fep->tx_ring_size - fep->tx_stop_threshold) / 2;
+
+ if (fep->bufdesc_ex)
+ fep->bufdesc_size = sizeof(struct bufdesc_ex);
+ else
+ fep->bufdesc_size = sizeof(struct bufdesc);
+ bd_size = (fep->tx_ring_size + fep->rx_ring_size) *
+ fep->bufdesc_size;
/* Allocate memory for buffer descriptors. */
- cbd_base = dma_alloc_coherent(NULL, PAGE_SIZE, &fep->bd_dma,
+ cbd_base = dma_alloc_coherent(NULL, bd_size, &fep->bd_dma,
GFP_KERNEL);
if (!cbd_base)
return -ENOMEM;
+ fep->tso_hdrs = dma_alloc_coherent(NULL, fep->tx_ring_size * TSO_HEADER_SIZE,
+ &fep->tso_hdrs_dma, GFP_KERNEL);
+ if (!fep->tso_hdrs) {
+ dma_free_coherent(NULL, bd_size, cbd_base, fep->bd_dma);
+ return -ENOMEM;
+ }
+
memset(cbd_base, 0, PAGE_SIZE);
fep->netdev = ndev;
@@ -2004,10 +2395,6 @@ static int fec_enet_init(struct net_device *ndev)
/* make sure MAC we just acquired is programmed into the hw */
fec_set_mac_address(ndev, NULL);
- /* init the tx & rx ring size */
- fep->tx_ring_size = TX_RING_SIZE;
- fep->rx_ring_size = RX_RING_SIZE;
-
/* Set receive and transmit descriptor base. */
fep->rx_bd_base = cbd_base;
if (fep->bufdesc_ex)
@@ -2024,21 +2411,21 @@ static int fec_enet_init(struct net_device *ndev)
writel(FEC_RX_DISABLED_IMASK, fep->hwp + FEC_IMASK);
netif_napi_add(ndev, &fep->napi, fec_enet_rx_napi, NAPI_POLL_WEIGHT);
- if (id_entry->driver_data & FEC_QUIRK_HAS_VLAN) {
+ if (id_entry->driver_data & FEC_QUIRK_HAS_VLAN)
/* enable hw VLAN support */
ndev->features |= NETIF_F_HW_VLAN_CTAG_RX;
- ndev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX;
- }
if (id_entry->driver_data & FEC_QUIRK_HAS_CSUM) {
+ ndev->gso_max_segs = FEC_MAX_TSO_SEGS;
+
/* enable hw accelerator */
ndev->features |= (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM
- | NETIF_F_RXCSUM);
- ndev->hw_features |= (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM
- | NETIF_F_RXCSUM);
+ | NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_TSO);
fep->csum_flags |= FLAG_RX_CSUM_ENABLED;
}
+ ndev->hw_features = ndev->features;
+
fec_restart(ndev, 0);
return 0;
@@ -2114,6 +2501,9 @@ fec_probe(struct platform_device *pdev)
fep->pause_flag |= FEC_PAUSE_FLAG_AUTONEG;
#endif
+ /* Select default pin state */
+ pinctrl_pm_select_default_state(&pdev->dev);
+
r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
fep->hwp = devm_ioremap_resource(&pdev->dev, r);
if (IS_ERR(fep->hwp)) {
@@ -2164,26 +2554,10 @@ fec_probe(struct platform_device *pdev)
fep->bufdesc_ex = 0;
}
- ret = clk_prepare_enable(fep->clk_ahb);
+ ret = fec_enet_clk_enable(ndev, true);
if (ret)
goto failed_clk;
- ret = clk_prepare_enable(fep->clk_ipg);
- if (ret)
- goto failed_clk_ipg;
-
- if (fep->clk_enet_out) {
- ret = clk_prepare_enable(fep->clk_enet_out);
- if (ret)
- goto failed_clk_enet_out;
- }
-
- if (fep->clk_ptp) {
- ret = clk_prepare_enable(fep->clk_ptp);
- if (ret)
- goto failed_clk_ptp;
- }
-
fep->reg_phy = devm_regulator_get(&pdev->dev, "phy");
if (!IS_ERR(fep->reg_phy)) {
ret = regulator_enable(fep->reg_phy);
@@ -2225,6 +2599,8 @@ fec_probe(struct platform_device *pdev)
/* Carrier starts down, phylib will bring it up */
netif_carrier_off(ndev);
+ fec_enet_clk_enable(ndev, false);
+ pinctrl_pm_select_sleep_state(&pdev->dev);
ret = register_netdev(ndev);
if (ret)
@@ -2244,15 +2620,7 @@ failed_init:
if (fep->reg_phy)
regulator_disable(fep->reg_phy);
failed_regulator:
- if (fep->clk_ptp)
- clk_disable_unprepare(fep->clk_ptp);
-failed_clk_ptp:
- if (fep->clk_enet_out)
- clk_disable_unprepare(fep->clk_enet_out);
-failed_clk_enet_out:
- clk_disable_unprepare(fep->clk_ipg);
-failed_clk_ipg:
- clk_disable_unprepare(fep->clk_ahb);
+ fec_enet_clk_enable(ndev, false);
failed_clk:
failed_ioremap:
free_netdev(ndev);
@@ -2272,14 +2640,9 @@ fec_drv_remove(struct platform_device *pdev)
del_timer_sync(&fep->time_keep);
if (fep->reg_phy)
regulator_disable(fep->reg_phy);
- if (fep->clk_ptp)
- clk_disable_unprepare(fep->clk_ptp);
if (fep->ptp_clock)
ptp_clock_unregister(fep->ptp_clock);
- if (fep->clk_enet_out)
- clk_disable_unprepare(fep->clk_enet_out);
- clk_disable_unprepare(fep->clk_ipg);
- clk_disable_unprepare(fep->clk_ahb);
+ fec_enet_clk_enable(ndev, false);
free_netdev(ndev);
return 0;
@@ -2296,12 +2659,8 @@ fec_suspend(struct device *dev)
fec_stop(ndev);
netif_device_detach(ndev);
}
- if (fep->clk_ptp)
- clk_disable_unprepare(fep->clk_ptp);
- if (fep->clk_enet_out)
- clk_disable_unprepare(fep->clk_enet_out);
- clk_disable_unprepare(fep->clk_ipg);
- clk_disable_unprepare(fep->clk_ahb);
+ fec_enet_clk_enable(ndev, false);
+ pinctrl_pm_select_sleep_state(&fep->pdev->dev);
if (fep->reg_phy)
regulator_disable(fep->reg_phy);
@@ -2322,25 +2681,10 @@ fec_resume(struct device *dev)
return ret;
}
- ret = clk_prepare_enable(fep->clk_ahb);
+ pinctrl_pm_select_default_state(&fep->pdev->dev);
+ ret = fec_enet_clk_enable(ndev, true);
if (ret)
- goto failed_clk_ahb;
-
- ret = clk_prepare_enable(fep->clk_ipg);
- if (ret)
- goto failed_clk_ipg;
-
- if (fep->clk_enet_out) {
- ret = clk_prepare_enable(fep->clk_enet_out);
- if (ret)
- goto failed_clk_enet_out;
- }
-
- if (fep->clk_ptp) {
- ret = clk_prepare_enable(fep->clk_ptp);
- if (ret)
- goto failed_clk_ptp;
- }
+ goto failed_clk;
if (netif_running(ndev)) {
fec_restart(ndev, fep->full_duplex);
@@ -2349,14 +2693,7 @@ fec_resume(struct device *dev)
return 0;
-failed_clk_ptp:
- if (fep->clk_enet_out)
- clk_disable_unprepare(fep->clk_enet_out);
-failed_clk_enet_out:
- clk_disable_unprepare(fep->clk_ipg);
-failed_clk_ipg:
- clk_disable_unprepare(fep->clk_ahb);
-failed_clk_ahb:
+failed_clk:
if (fep->reg_phy)
regulator_disable(fep->reg_phy);
return ret;
diff --git a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
index dc80db41d6b3..cfaf17b70f3f 100644
--- a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
+++ b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
@@ -792,10 +792,6 @@ static int fs_init_phy(struct net_device *dev)
phydev = of_phy_connect(dev, fep->fpi->phy_node, &fs_adjust_link, 0,
iface);
if (!phydev) {
- phydev = of_phy_connect_fixed_link(dev, &fs_adjust_link,
- iface);
- }
- if (!phydev) {
dev_err(&dev->dev, "Could not attach to PHY\n");
return -ENODEV;
}
@@ -1029,9 +1025,16 @@ static int fs_enet_probe(struct platform_device *ofdev)
fpi->use_napi = 1;
fpi->napi_weight = 17;
fpi->phy_node = of_parse_phandle(ofdev->dev.of_node, "phy-handle", 0);
- if ((!fpi->phy_node) && (!of_get_property(ofdev->dev.of_node, "fixed-link",
- NULL)))
- goto out_free_fpi;
+ if (!fpi->phy_node && of_phy_is_fixed_link(ofdev->dev.of_node)) {
+ err = of_phy_register_fixed_link(ofdev->dev.of_node);
+ if (err)
+ goto out_free_fpi;
+
+ /* In the case of a fixed PHY, the DT node associated
+ * to the PHY is the Ethernet MAC DT node.
+ */
+ fpi->phy_node = ofdev->dev.of_node;
+ }
if (of_device_is_compatible(ofdev->dev.of_node, "fsl,mpc5125-fec")) {
phy_connection_type = of_get_property(ofdev->dev.of_node,
diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
index ee6ddbd4f252..a6cf40e62f3a 100644
--- a/drivers/net/ethernet/freescale/gianfar.c
+++ b/drivers/net/ethernet/freescale/gianfar.c
@@ -889,6 +889,17 @@ static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev)
priv->phy_node = of_parse_phandle(np, "phy-handle", 0);
+ /* In the case of a fixed PHY, the DT node associated
+ * to the PHY is the Ethernet MAC DT node.
+ */
+ if (of_phy_is_fixed_link(np)) {
+ err = of_phy_register_fixed_link(np);
+ if (err)
+ goto err_grp_init;
+
+ priv->phy_node = np;
+ }
+
/* Find the TBI PHY. If it's not there, we don't support SGMII */
priv->tbi_node = of_parse_phandle(np, "tbi-handle", 0);
@@ -1231,7 +1242,7 @@ static void gfar_hw_init(struct gfar_private *priv)
gfar_write_isrg(priv);
}
-static void __init gfar_init_addr_hash_table(struct gfar_private *priv)
+static void gfar_init_addr_hash_table(struct gfar_private *priv)
{
struct gfar __iomem *regs = priv->gfargrp[0].regs;
@@ -1373,6 +1384,9 @@ static int gfar_probe(struct platform_device *ofdev)
gfar_hw_init(priv);
+ /* Carrier starts down, phylib will bring it up */
+ netif_carrier_off(dev);
+
err = register_netdev(dev);
if (err) {
@@ -1380,9 +1394,6 @@ static int gfar_probe(struct platform_device *ofdev)
goto register_fail;
}
- /* Carrier starts down, phylib will bring it up */
- netif_carrier_off(dev);
-
device_init_wakeup(&dev->dev,
priv->device_flags &
FSL_GIANFAR_DEV_HAS_MAGIC_PACKET);
@@ -1660,9 +1671,6 @@ static int init_phy(struct net_device *dev)
priv->phydev = of_phy_connect(dev, priv->phy_node, &adjust_link, 0,
interface);
- if (!priv->phydev)
- priv->phydev = of_phy_connect_fixed_link(dev, &adjust_link,
- interface);
if (!priv->phydev) {
dev_err(&dev->dev, "could not attach to PHY\n");
return -ENODEV;
diff --git a/drivers/net/ethernet/freescale/ucc_geth.c b/drivers/net/ethernet/freescale/ucc_geth.c
index c8299c31b21f..fab39e295441 100644
--- a/drivers/net/ethernet/freescale/ucc_geth.c
+++ b/drivers/net/ethernet/freescale/ucc_geth.c
@@ -1728,9 +1728,6 @@ static int init_phy(struct net_device *dev)
phydev = of_phy_connect(dev, ug_info->phy_node, &adjust_link, 0,
priv->phy_interface);
- if (!phydev)
- phydev = of_phy_connect_fixed_link(dev, &adjust_link,
- priv->phy_interface);
if (!phydev) {
dev_err(&dev->dev, "Could not attach to PHY\n");
return -ENODEV;
@@ -3790,6 +3787,17 @@ static int ucc_geth_probe(struct platform_device* ofdev)
ug_info->uf_info.irq = irq_of_parse_and_map(np, 0);
ug_info->phy_node = of_parse_phandle(np, "phy-handle", 0);
+ if (!ug_info->phy_node) {
+ /* In the case of a fixed PHY, the DT node associated
+ * to the PHY is the Ethernet MAC DT node.
+ */
+ if (of_phy_is_fixed_link(np)) {
+ err = of_phy_register_fixed_link(np);
+ if (err)
+ return err;
+ }
+ ug_info->phy_node = np;
+ }
/* Find the TBI PHY node. If it's not there, we don't support SGMII */
ug_info->tbi_node = of_parse_phandle(np, "tbi-handle", 0);
diff --git a/drivers/net/ethernet/freescale/ucc_geth_ethtool.c b/drivers/net/ethernet/freescale/ucc_geth_ethtool.c
index 413329eff2ff..cc83350d56ba 100644
--- a/drivers/net/ethernet/freescale/ucc_geth_ethtool.c
+++ b/drivers/net/ethernet/freescale/ucc_geth_ethtool.c
@@ -417,5 +417,5 @@ static const struct ethtool_ops uec_ethtool_ops = {
void uec_set_ethtool_ops(struct net_device *netdev)
{
- SET_ETHTOOL_OPS(netdev, &uec_ethtool_ops);
+ netdev->ethtool_ops = &uec_ethtool_ops;
}
diff --git a/drivers/net/ethernet/freescale/xgmac_mdio.c b/drivers/net/ethernet/freescale/xgmac_mdio.c
index d449fcb90199..0c9d55c862ae 100644
--- a/drivers/net/ethernet/freescale/xgmac_mdio.c
+++ b/drivers/net/ethernet/freescale/xgmac_mdio.c
@@ -162,7 +162,9 @@ static int xgmac_mdio_read(struct mii_bus *bus, int phy_id, int regnum)
/* Return all Fs if nothing was there */
if (in_be32(&regs->mdio_stat) & MDIO_STAT_RD_ER) {
- dev_err(&bus->dev, "MDIO read error\n");
+ dev_err(&bus->dev,
+ "Error while reading PHY%d reg at %d.%d\n",
+ phy_id, dev_addr, regnum);
return 0xffff;
}
diff --git a/drivers/net/ethernet/fujitsu/fmvj18x_cs.c b/drivers/net/ethernet/fujitsu/fmvj18x_cs.c
index 7becab1aa3e4..cfe7a7431730 100644
--- a/drivers/net/ethernet/fujitsu/fmvj18x_cs.c
+++ b/drivers/net/ethernet/fujitsu/fmvj18x_cs.c
@@ -256,7 +256,7 @@ static int fmvj18x_probe(struct pcmcia_device *link)
dev->netdev_ops = &fjn_netdev_ops;
dev->watchdog_timeo = TX_TIMEOUT;
- SET_ETHTOOL_OPS(dev, &netdev_ethtool_ops);
+ dev->ethtool_ops = &netdev_ethtool_ops;
return fmvj18x_config(link);
} /* fmvj18x_attach */
diff --git a/drivers/net/ethernet/hisilicon/Kconfig b/drivers/net/ethernet/hisilicon/Kconfig
new file mode 100644
index 000000000000..e9421731b05e
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/Kconfig
@@ -0,0 +1,27 @@
+#
+# HISILICON device configuration
+#
+
+config NET_VENDOR_HISILICON
+ bool "Hisilicon devices"
+ default y
+ depends on ARM
+ ---help---
+ If you have a network (Ethernet) card belonging to this class, say Y
+ and read the Ethernet-HOWTO, available from
+ <http://www.tldp.org/docs.html#howto>.
+
+ Note that the answer to this question doesn't directly affect the
+ kernel: saying N will just cause the configurator to skip all
+ the questions about Hisilicon devices. If you say Y, you will be asked
+ for your specific card in the following questions.
+
+if NET_VENDOR_HISILICON
+
+config HIX5HD2_GMAC
+ tristate "Hisilicon HIX5HD2 Family Network Device Support"
+ select PHYLIB
+ help
+ This selects the hix5hd2 mac family network device.
+
+endif # NET_VENDOR_HISILICON
diff --git a/drivers/net/ethernet/hisilicon/Makefile b/drivers/net/ethernet/hisilicon/Makefile
new file mode 100644
index 000000000000..9175e84622d4
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/Makefile
@@ -0,0 +1,5 @@
+#
+# Makefile for the HISILICON network device drivers.
+#
+
+obj-$(CONFIG_HIX5HD2_GMAC) += hix5hd2_gmac.o
diff --git a/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c b/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c
new file mode 100644
index 000000000000..0ffdcd381fdd
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c
@@ -0,0 +1,1066 @@
+/* Copyright (c) 2014 Linaro Ltd.
+ * Copyright (c) 2014 Hisilicon Limited.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/etherdevice.h>
+#include <linux/platform_device.h>
+#include <linux/of_net.h>
+#include <linux/of_mdio.h>
+#include <linux/clk.h>
+#include <linux/circ_buf.h>
+
+#define STATION_ADDR_LOW 0x0000
+#define STATION_ADDR_HIGH 0x0004
+#define MAC_DUPLEX_HALF_CTRL 0x0008
+#define MAX_FRM_SIZE 0x003c
+#define PORT_MODE 0x0040
+#define PORT_EN 0x0044
+#define BITS_TX_EN BIT(2)
+#define BITS_RX_EN BIT(1)
+#define REC_FILT_CONTROL 0x0064
+#define BIT_CRC_ERR_PASS BIT(5)
+#define BIT_PAUSE_FRM_PASS BIT(4)
+#define BIT_VLAN_DROP_EN BIT(3)
+#define BIT_BC_DROP_EN BIT(2)
+#define BIT_MC_MATCH_EN BIT(1)
+#define BIT_UC_MATCH_EN BIT(0)
+#define PORT_MC_ADDR_LOW 0x0068
+#define PORT_MC_ADDR_HIGH 0x006C
+#define CF_CRC_STRIP 0x01b0
+#define MODE_CHANGE_EN 0x01b4
+#define BIT_MODE_CHANGE_EN BIT(0)
+#define COL_SLOT_TIME 0x01c0
+#define RECV_CONTROL 0x01e0
+#define BIT_STRIP_PAD_EN BIT(3)
+#define BIT_RUNT_PKT_EN BIT(4)
+#define CONTROL_WORD 0x0214
+#define MDIO_SINGLE_CMD 0x03c0
+#define MDIO_SINGLE_DATA 0x03c4
+#define MDIO_CTRL 0x03cc
+#define MDIO_RDATA_STATUS 0x03d0
+
+#define MDIO_START BIT(20)
+#define MDIO_R_VALID BIT(0)
+#define MDIO_READ (BIT(17) | MDIO_START)
+#define MDIO_WRITE (BIT(16) | MDIO_START)
+
+#define RX_FQ_START_ADDR 0x0500
+#define RX_FQ_DEPTH 0x0504
+#define RX_FQ_WR_ADDR 0x0508
+#define RX_FQ_RD_ADDR 0x050c
+#define RX_FQ_VLDDESC_CNT 0x0510
+#define RX_FQ_ALEMPTY_TH 0x0514
+#define RX_FQ_REG_EN 0x0518
+#define BITS_RX_FQ_START_ADDR_EN BIT(2)
+#define BITS_RX_FQ_DEPTH_EN BIT(1)
+#define BITS_RX_FQ_RD_ADDR_EN BIT(0)
+#define RX_FQ_ALFULL_TH 0x051c
+#define RX_BQ_START_ADDR 0x0520
+#define RX_BQ_DEPTH 0x0524
+#define RX_BQ_WR_ADDR 0x0528
+#define RX_BQ_RD_ADDR 0x052c
+#define RX_BQ_FREE_DESC_CNT 0x0530
+#define RX_BQ_ALEMPTY_TH 0x0534
+#define RX_BQ_REG_EN 0x0538
+#define BITS_RX_BQ_START_ADDR_EN BIT(2)
+#define BITS_RX_BQ_DEPTH_EN BIT(1)
+#define BITS_RX_BQ_WR_ADDR_EN BIT(0)
+#define RX_BQ_ALFULL_TH 0x053c
+#define TX_BQ_START_ADDR 0x0580
+#define TX_BQ_DEPTH 0x0584
+#define TX_BQ_WR_ADDR 0x0588
+#define TX_BQ_RD_ADDR 0x058c
+#define TX_BQ_VLDDESC_CNT 0x0590
+#define TX_BQ_ALEMPTY_TH 0x0594
+#define TX_BQ_REG_EN 0x0598
+#define BITS_TX_BQ_START_ADDR_EN BIT(2)
+#define BITS_TX_BQ_DEPTH_EN BIT(1)
+#define BITS_TX_BQ_RD_ADDR_EN BIT(0)
+#define TX_BQ_ALFULL_TH 0x059c
+#define TX_RQ_START_ADDR 0x05a0
+#define TX_RQ_DEPTH 0x05a4
+#define TX_RQ_WR_ADDR 0x05a8
+#define TX_RQ_RD_ADDR 0x05ac
+#define TX_RQ_FREE_DESC_CNT 0x05b0
+#define TX_RQ_ALEMPTY_TH 0x05b4
+#define TX_RQ_REG_EN 0x05b8
+#define BITS_TX_RQ_START_ADDR_EN BIT(2)
+#define BITS_TX_RQ_DEPTH_EN BIT(1)
+#define BITS_TX_RQ_WR_ADDR_EN BIT(0)
+#define TX_RQ_ALFULL_TH 0x05bc
+#define RAW_PMU_INT 0x05c0
+#define ENA_PMU_INT 0x05c4
+#define STATUS_PMU_INT 0x05c8
+#define MAC_FIFO_ERR_IN BIT(30)
+#define TX_RQ_IN_TIMEOUT_INT BIT(29)
+#define RX_BQ_IN_TIMEOUT_INT BIT(28)
+#define TXOUTCFF_FULL_INT BIT(27)
+#define TXOUTCFF_EMPTY_INT BIT(26)
+#define TXCFF_FULL_INT BIT(25)
+#define TXCFF_EMPTY_INT BIT(24)
+#define RXOUTCFF_FULL_INT BIT(23)
+#define RXOUTCFF_EMPTY_INT BIT(22)
+#define RXCFF_FULL_INT BIT(21)
+#define RXCFF_EMPTY_INT BIT(20)
+#define TX_RQ_IN_INT BIT(19)
+#define TX_BQ_OUT_INT BIT(18)
+#define RX_BQ_IN_INT BIT(17)
+#define RX_FQ_OUT_INT BIT(16)
+#define TX_RQ_EMPTY_INT BIT(15)
+#define TX_RQ_FULL_INT BIT(14)
+#define TX_RQ_ALEMPTY_INT BIT(13)
+#define TX_RQ_ALFULL_INT BIT(12)
+#define TX_BQ_EMPTY_INT BIT(11)
+#define TX_BQ_FULL_INT BIT(10)
+#define TX_BQ_ALEMPTY_INT BIT(9)
+#define TX_BQ_ALFULL_INT BIT(8)
+#define RX_BQ_EMPTY_INT BIT(7)
+#define RX_BQ_FULL_INT BIT(6)
+#define RX_BQ_ALEMPTY_INT BIT(5)
+#define RX_BQ_ALFULL_INT BIT(4)
+#define RX_FQ_EMPTY_INT BIT(3)
+#define RX_FQ_FULL_INT BIT(2)
+#define RX_FQ_ALEMPTY_INT BIT(1)
+#define RX_FQ_ALFULL_INT BIT(0)
+
+#define DEF_INT_MASK (RX_BQ_IN_INT | RX_BQ_IN_TIMEOUT_INT | \
+ TX_RQ_IN_INT | TX_RQ_IN_TIMEOUT_INT)
+
+#define DESC_WR_RD_ENA 0x05cc
+#define IN_QUEUE_TH 0x05d8
+#define OUT_QUEUE_TH 0x05dc
+#define QUEUE_TX_BQ_SHIFT 16
+#define RX_BQ_IN_TIMEOUT_TH 0x05e0
+#define TX_RQ_IN_TIMEOUT_TH 0x05e4
+#define STOP_CMD 0x05e8
+#define BITS_TX_STOP BIT(1)
+#define BITS_RX_STOP BIT(0)
+#define FLUSH_CMD 0x05eC
+#define BITS_TX_FLUSH_CMD BIT(5)
+#define BITS_RX_FLUSH_CMD BIT(4)
+#define BITS_TX_FLUSH_FLAG_DOWN BIT(3)
+#define BITS_TX_FLUSH_FLAG_UP BIT(2)
+#define BITS_RX_FLUSH_FLAG_DOWN BIT(1)
+#define BITS_RX_FLUSH_FLAG_UP BIT(0)
+#define RX_CFF_NUM_REG 0x05f0
+#define PMU_FSM_REG 0x05f8
+#define RX_FIFO_PKT_IN_NUM 0x05fc
+#define RX_FIFO_PKT_OUT_NUM 0x0600
+
+#define RGMII_SPEED_1000 0x2c
+#define RGMII_SPEED_100 0x2f
+#define RGMII_SPEED_10 0x2d
+#define MII_SPEED_100 0x0f
+#define MII_SPEED_10 0x0d
+#define GMAC_SPEED_1000 0x05
+#define GMAC_SPEED_100 0x01
+#define GMAC_SPEED_10 0x00
+#define GMAC_FULL_DUPLEX BIT(4)
+
+#define RX_BQ_INT_THRESHOLD 0x01
+#define TX_RQ_INT_THRESHOLD 0x01
+#define RX_BQ_IN_TIMEOUT 0x10000
+#define TX_RQ_IN_TIMEOUT 0x50000
+
+#define MAC_MAX_FRAME_SIZE 1600
+#define DESC_SIZE 32
+#define RX_DESC_NUM 1024
+#define TX_DESC_NUM 1024
+
+#define DESC_VLD_FREE 0
+#define DESC_VLD_BUSY 0x80000000
+#define DESC_FL_MID 0
+#define DESC_FL_LAST 0x20000000
+#define DESC_FL_FIRST 0x40000000
+#define DESC_FL_FULL 0x60000000
+#define DESC_DATA_LEN_OFF 16
+#define DESC_BUFF_LEN_OFF 0
+#define DESC_DATA_MASK 0x7ff
+
+/* DMA descriptor ring helpers */
+#define dma_ring_incr(n, s) (((n) + 1) & ((s) - 1))
+#define dma_cnt(n) ((n) >> 5)
+#define dma_byte(n) ((n) << 5)
+
+struct hix5hd2_desc {
+ __le32 buff_addr;
+ __le32 cmd;
+} __aligned(32);
+
+struct hix5hd2_desc_sw {
+ struct hix5hd2_desc *desc;
+ dma_addr_t phys_addr;
+ unsigned int count;
+ unsigned int size;
+};
+
+#define QUEUE_NUMS 4
+struct hix5hd2_priv {
+ struct hix5hd2_desc_sw pool[QUEUE_NUMS];
+#define rx_fq pool[0]
+#define rx_bq pool[1]
+#define tx_bq pool[2]
+#define tx_rq pool[3]
+
+ void __iomem *base;
+ void __iomem *ctrl_base;
+
+ struct sk_buff *tx_skb[TX_DESC_NUM];
+ struct sk_buff *rx_skb[RX_DESC_NUM];
+
+ struct device *dev;
+ struct net_device *netdev;
+
+ struct phy_device *phy;
+ struct device_node *phy_node;
+ phy_interface_t phy_mode;
+
+ unsigned int speed;
+ unsigned int duplex;
+
+ struct clk *clk;
+ struct mii_bus *bus;
+ struct napi_struct napi;
+ struct work_struct tx_timeout_task;
+};
+
+static void hix5hd2_config_port(struct net_device *dev, u32 speed, u32 duplex)
+{
+ struct hix5hd2_priv *priv = netdev_priv(dev);
+ u32 val;
+
+ priv->speed = speed;
+ priv->duplex = duplex;
+
+ switch (priv->phy_mode) {
+ case PHY_INTERFACE_MODE_RGMII:
+ if (speed == SPEED_1000)
+ val = RGMII_SPEED_1000;
+ else if (speed == SPEED_100)
+ val = RGMII_SPEED_100;
+ else
+ val = RGMII_SPEED_10;
+ break;
+ case PHY_INTERFACE_MODE_MII:
+ if (speed == SPEED_100)
+ val = MII_SPEED_100;
+ else
+ val = MII_SPEED_10;
+ break;
+ default:
+ netdev_warn(dev, "not supported mode\n");
+ val = MII_SPEED_10;
+ break;
+ }
+
+ if (duplex)
+ val |= GMAC_FULL_DUPLEX;
+ writel_relaxed(val, priv->ctrl_base);
+
+ writel_relaxed(BIT_MODE_CHANGE_EN, priv->base + MODE_CHANGE_EN);
+ if (speed == SPEED_1000)
+ val = GMAC_SPEED_1000;
+ else if (speed == SPEED_100)
+ val = GMAC_SPEED_100;
+ else
+ val = GMAC_SPEED_10;
+ writel_relaxed(val, priv->base + PORT_MODE);
+ writel_relaxed(0, priv->base + MODE_CHANGE_EN);
+ writel_relaxed(duplex, priv->base + MAC_DUPLEX_HALF_CTRL);
+}
+
+static void hix5hd2_set_desc_depth(struct hix5hd2_priv *priv, int rx, int tx)
+{
+ writel_relaxed(BITS_RX_FQ_DEPTH_EN, priv->base + RX_FQ_REG_EN);
+ writel_relaxed(rx << 3, priv->base + RX_FQ_DEPTH);
+ writel_relaxed(0, priv->base + RX_FQ_REG_EN);
+
+ writel_relaxed(BITS_RX_BQ_DEPTH_EN, priv->base + RX_BQ_REG_EN);
+ writel_relaxed(rx << 3, priv->base + RX_BQ_DEPTH);
+ writel_relaxed(0, priv->base + RX_BQ_REG_EN);
+
+ writel_relaxed(BITS_TX_BQ_DEPTH_EN, priv->base + TX_BQ_REG_EN);
+ writel_relaxed(tx << 3, priv->base + TX_BQ_DEPTH);
+ writel_relaxed(0, priv->base + TX_BQ_REG_EN);
+
+ writel_relaxed(BITS_TX_RQ_DEPTH_EN, priv->base + TX_RQ_REG_EN);
+ writel_relaxed(tx << 3, priv->base + TX_RQ_DEPTH);
+ writel_relaxed(0, priv->base + TX_RQ_REG_EN);
+}
+
+static void hix5hd2_set_rx_fq(struct hix5hd2_priv *priv, dma_addr_t phy_addr)
+{
+ writel_relaxed(BITS_RX_FQ_START_ADDR_EN, priv->base + RX_FQ_REG_EN);
+ writel_relaxed(phy_addr, priv->base + RX_FQ_START_ADDR);
+ writel_relaxed(0, priv->base + RX_FQ_REG_EN);
+}
+
+static void hix5hd2_set_rx_bq(struct hix5hd2_priv *priv, dma_addr_t phy_addr)
+{
+ writel_relaxed(BITS_RX_BQ_START_ADDR_EN, priv->base + RX_BQ_REG_EN);
+ writel_relaxed(phy_addr, priv->base + RX_BQ_START_ADDR);
+ writel_relaxed(0, priv->base + RX_BQ_REG_EN);
+}
+
+static void hix5hd2_set_tx_bq(struct hix5hd2_priv *priv, dma_addr_t phy_addr)
+{
+ writel_relaxed(BITS_TX_BQ_START_ADDR_EN, priv->base + TX_BQ_REG_EN);
+ writel_relaxed(phy_addr, priv->base + TX_BQ_START_ADDR);
+ writel_relaxed(0, priv->base + TX_BQ_REG_EN);
+}
+
+static void hix5hd2_set_tx_rq(struct hix5hd2_priv *priv, dma_addr_t phy_addr)
+{
+ writel_relaxed(BITS_TX_RQ_START_ADDR_EN, priv->base + TX_RQ_REG_EN);
+ writel_relaxed(phy_addr, priv->base + TX_RQ_START_ADDR);
+ writel_relaxed(0, priv->base + TX_RQ_REG_EN);
+}
+
+static void hix5hd2_set_desc_addr(struct hix5hd2_priv *priv)
+{
+ hix5hd2_set_rx_fq(priv, priv->rx_fq.phys_addr);
+ hix5hd2_set_rx_bq(priv, priv->rx_bq.phys_addr);
+ hix5hd2_set_tx_rq(priv, priv->tx_rq.phys_addr);
+ hix5hd2_set_tx_bq(priv, priv->tx_bq.phys_addr);
+}
+
+static void hix5hd2_hw_init(struct hix5hd2_priv *priv)
+{
+ u32 val;
+
+ /* disable and clear all interrupts */
+ writel_relaxed(0, priv->base + ENA_PMU_INT);
+ writel_relaxed(~0, priv->base + RAW_PMU_INT);
+
+ writel_relaxed(BIT_CRC_ERR_PASS, priv->base + REC_FILT_CONTROL);
+ writel_relaxed(MAC_MAX_FRAME_SIZE, priv->base + CONTROL_WORD);
+ writel_relaxed(0, priv->base + COL_SLOT_TIME);
+
+ val = RX_BQ_INT_THRESHOLD | TX_RQ_INT_THRESHOLD << QUEUE_TX_BQ_SHIFT;
+ writel_relaxed(val, priv->base + IN_QUEUE_TH);
+
+ writel_relaxed(RX_BQ_IN_TIMEOUT, priv->base + RX_BQ_IN_TIMEOUT_TH);
+ writel_relaxed(TX_RQ_IN_TIMEOUT, priv->base + TX_RQ_IN_TIMEOUT_TH);
+
+ hix5hd2_set_desc_depth(priv, RX_DESC_NUM, TX_DESC_NUM);
+ hix5hd2_set_desc_addr(priv);
+}
+
+static void hix5hd2_irq_enable(struct hix5hd2_priv *priv)
+{
+ writel_relaxed(DEF_INT_MASK, priv->base + ENA_PMU_INT);
+}
+
+static void hix5hd2_irq_disable(struct hix5hd2_priv *priv)
+{
+ writel_relaxed(0, priv->base + ENA_PMU_INT);
+}
+
+static void hix5hd2_port_enable(struct hix5hd2_priv *priv)
+{
+ writel_relaxed(0xf, priv->base + DESC_WR_RD_ENA);
+ writel_relaxed(BITS_RX_EN | BITS_TX_EN, priv->base + PORT_EN);
+}
+
+static void hix5hd2_port_disable(struct hix5hd2_priv *priv)
+{
+ writel_relaxed(~(BITS_RX_EN | BITS_TX_EN), priv->base + PORT_EN);
+ writel_relaxed(0, priv->base + DESC_WR_RD_ENA);
+}
+
+static void hix5hd2_hw_set_mac_addr(struct net_device *dev)
+{
+ struct hix5hd2_priv *priv = netdev_priv(dev);
+ unsigned char *mac = dev->dev_addr;
+ u32 val;
+
+ val = mac[1] | (mac[0] << 8);
+ writel_relaxed(val, priv->base + STATION_ADDR_HIGH);
+
+ val = mac[5] | (mac[4] << 8) | (mac[3] << 16) | (mac[2] << 24);
+ writel_relaxed(val, priv->base + STATION_ADDR_LOW);
+}
+
+static int hix5hd2_net_set_mac_address(struct net_device *dev, void *p)
+{
+ int ret;
+
+ ret = eth_mac_addr(dev, p);
+ if (!ret)
+ hix5hd2_hw_set_mac_addr(dev);
+
+ return ret;
+}
+
+static void hix5hd2_adjust_link(struct net_device *dev)
+{
+ struct hix5hd2_priv *priv = netdev_priv(dev);
+ struct phy_device *phy = priv->phy;
+
+ if ((priv->speed != phy->speed) || (priv->duplex != phy->duplex)) {
+ hix5hd2_config_port(dev, phy->speed, phy->duplex);
+ phy_print_status(phy);
+ }
+}
+
+static void hix5hd2_rx_refill(struct hix5hd2_priv *priv)
+{
+ struct hix5hd2_desc *desc;
+ struct sk_buff *skb;
+ u32 start, end, num, pos, i;
+ u32 len = MAC_MAX_FRAME_SIZE;
+ dma_addr_t addr;
+
+ /* software write pointer */
+ start = dma_cnt(readl_relaxed(priv->base + RX_FQ_WR_ADDR));
+ /* logic read pointer */
+ end = dma_cnt(readl_relaxed(priv->base + RX_FQ_RD_ADDR));
+ num = CIRC_SPACE(start, end, RX_DESC_NUM);
+
+ for (i = 0, pos = start; i < num; i++) {
+ if (priv->rx_skb[pos]) {
+ break;
+ } else {
+ skb = netdev_alloc_skb_ip_align(priv->netdev, len);
+ if (unlikely(skb == NULL))
+ break;
+ }
+
+ addr = dma_map_single(priv->dev, skb->data, len, DMA_FROM_DEVICE);
+ if (dma_mapping_error(priv->dev, addr)) {
+ dev_kfree_skb_any(skb);
+ break;
+ }
+
+ desc = priv->rx_fq.desc + pos;
+ desc->buff_addr = cpu_to_le32(addr);
+ priv->rx_skb[pos] = skb;
+ desc->cmd = cpu_to_le32(DESC_VLD_FREE |
+ (len - 1) << DESC_BUFF_LEN_OFF);
+ pos = dma_ring_incr(pos, RX_DESC_NUM);
+ }
+
+ /* ensure desc updated */
+ wmb();
+
+ if (pos != start)
+ writel_relaxed(dma_byte(pos), priv->base + RX_FQ_WR_ADDR);
+}
+
+static int hix5hd2_rx(struct net_device *dev, int limit)
+{
+ struct hix5hd2_priv *priv = netdev_priv(dev);
+ struct sk_buff *skb;
+ struct hix5hd2_desc *desc;
+ dma_addr_t addr;
+ u32 start, end, num, pos, i, len;
+
+ /* software read pointer */
+ start = dma_cnt(readl_relaxed(priv->base + RX_BQ_RD_ADDR));
+ /* logic write pointer */
+ end = dma_cnt(readl_relaxed(priv->base + RX_BQ_WR_ADDR));
+ num = CIRC_CNT(end, start, RX_DESC_NUM);
+ if (num > limit)
+ num = limit;
+
+ /* ensure get updated desc */
+ rmb();
+ for (i = 0, pos = start; i < num; i++) {
+ skb = priv->rx_skb[pos];
+ if (unlikely(!skb)) {
+ netdev_err(dev, "inconsistent rx_skb\n");
+ break;
+ }
+ priv->rx_skb[pos] = NULL;
+
+ desc = priv->rx_bq.desc + pos;
+ len = (le32_to_cpu(desc->cmd) >> DESC_DATA_LEN_OFF) &
+ DESC_DATA_MASK;
+ addr = le32_to_cpu(desc->buff_addr);
+ dma_unmap_single(priv->dev, addr, MAC_MAX_FRAME_SIZE,
+ DMA_FROM_DEVICE);
+
+ skb_put(skb, len);
+ if (skb->len > MAC_MAX_FRAME_SIZE) {
+ netdev_err(dev, "rcv len err, len = %d\n", skb->len);
+ dev->stats.rx_errors++;
+ dev->stats.rx_length_errors++;
+ dev_kfree_skb_any(skb);
+ goto next;
+ }
+
+ skb->protocol = eth_type_trans(skb, dev);
+ napi_gro_receive(&priv->napi, skb);
+ dev->stats.rx_packets++;
+ dev->stats.rx_bytes += skb->len;
+ dev->last_rx = jiffies;
+next:
+ pos = dma_ring_incr(pos, RX_DESC_NUM);
+ }
+
+ if (pos != start)
+ writel_relaxed(dma_byte(pos), priv->base + RX_BQ_RD_ADDR);
+
+ hix5hd2_rx_refill(priv);
+
+ return num;
+}
+
+static void hix5hd2_xmit_reclaim(struct net_device *dev)
+{
+ struct sk_buff *skb;
+ struct hix5hd2_desc *desc;
+ struct hix5hd2_priv *priv = netdev_priv(dev);
+ unsigned int bytes_compl = 0, pkts_compl = 0;
+ u32 start, end, num, pos, i;
+ dma_addr_t addr;
+
+ netif_tx_lock(dev);
+
+ /* software read */
+ start = dma_cnt(readl_relaxed(priv->base + TX_RQ_RD_ADDR));
+ /* logic write */
+ end = dma_cnt(readl_relaxed(priv->base + TX_RQ_WR_ADDR));
+ num = CIRC_CNT(end, start, TX_DESC_NUM);
+
+ for (i = 0, pos = start; i < num; i++) {
+ skb = priv->tx_skb[pos];
+ if (unlikely(!skb)) {
+ netdev_err(dev, "inconsistent tx_skb\n");
+ break;
+ }
+
+ pkts_compl++;
+ bytes_compl += skb->len;
+ desc = priv->tx_rq.desc + pos;
+ addr = le32_to_cpu(desc->buff_addr);
+ dma_unmap_single(priv->dev, addr, skb->len, DMA_TO_DEVICE);
+ priv->tx_skb[pos] = NULL;
+ dev_consume_skb_any(skb);
+ pos = dma_ring_incr(pos, TX_DESC_NUM);
+ }
+
+ if (pos != start)
+ writel_relaxed(dma_byte(pos), priv->base + TX_RQ_RD_ADDR);
+
+ netif_tx_unlock(dev);
+
+ if (pkts_compl || bytes_compl)
+ netdev_completed_queue(dev, pkts_compl, bytes_compl);
+
+ if (unlikely(netif_queue_stopped(priv->netdev)) && pkts_compl)
+ netif_wake_queue(priv->netdev);
+}
+
+static int hix5hd2_poll(struct napi_struct *napi, int budget)
+{
+ struct hix5hd2_priv *priv = container_of(napi,
+ struct hix5hd2_priv, napi);
+ struct net_device *dev = priv->netdev;
+ int work_done = 0, task = budget;
+ int ints, num;
+
+ do {
+ hix5hd2_xmit_reclaim(dev);
+ num = hix5hd2_rx(dev, task);
+ work_done += num;
+ task -= num;
+ if ((work_done >= budget) || (num == 0))
+ break;
+
+ ints = readl_relaxed(priv->base + RAW_PMU_INT);
+ writel_relaxed(ints, priv->base + RAW_PMU_INT);
+ } while (ints & DEF_INT_MASK);
+
+ if (work_done < budget) {
+ napi_complete(napi);
+ hix5hd2_irq_enable(priv);
+ }
+
+ return work_done;
+}
+
+static irqreturn_t hix5hd2_interrupt(int irq, void *dev_id)
+{
+ struct net_device *dev = (struct net_device *)dev_id;
+ struct hix5hd2_priv *priv = netdev_priv(dev);
+ int ints = readl_relaxed(priv->base + RAW_PMU_INT);
+
+ writel_relaxed(ints, priv->base + RAW_PMU_INT);
+ if (likely(ints & DEF_INT_MASK)) {
+ hix5hd2_irq_disable(priv);
+ napi_schedule(&priv->napi);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int hix5hd2_net_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct hix5hd2_priv *priv = netdev_priv(dev);
+ struct hix5hd2_desc *desc;
+ dma_addr_t addr;
+ u32 pos;
+
+ /* software write pointer */
+ pos = dma_cnt(readl_relaxed(priv->base + TX_BQ_WR_ADDR));
+ if (unlikely(priv->tx_skb[pos])) {
+ dev->stats.tx_dropped++;
+ dev->stats.tx_fifo_errors++;
+ netif_stop_queue(dev);
+ return NETDEV_TX_BUSY;
+ }
+
+ addr = dma_map_single(priv->dev, skb->data, skb->len, DMA_TO_DEVICE);
+ if (dma_mapping_error(priv->dev, addr)) {
+ dev_kfree_skb_any(skb);
+ return NETDEV_TX_OK;
+ }
+
+ desc = priv->tx_bq.desc + pos;
+ desc->buff_addr = cpu_to_le32(addr);
+ priv->tx_skb[pos] = skb;
+ desc->cmd = cpu_to_le32(DESC_VLD_BUSY | DESC_FL_FULL |
+ (skb->len & DESC_DATA_MASK) << DESC_DATA_LEN_OFF |
+ (skb->len & DESC_DATA_MASK) << DESC_BUFF_LEN_OFF);
+
+ /* ensure desc updated */
+ wmb();
+
+ pos = dma_ring_incr(pos, TX_DESC_NUM);
+ writel_relaxed(dma_byte(pos), priv->base + TX_BQ_WR_ADDR);
+
+ dev->trans_start = jiffies;
+ dev->stats.tx_packets++;
+ dev->stats.tx_bytes += skb->len;
+ netdev_sent_queue(dev, skb->len);
+
+ return NETDEV_TX_OK;
+}
+
+static void hix5hd2_free_dma_desc_rings(struct hix5hd2_priv *priv)
+{
+ struct hix5hd2_desc *desc;
+ dma_addr_t addr;
+ int i;
+
+ for (i = 0; i < RX_DESC_NUM; i++) {
+ struct sk_buff *skb = priv->rx_skb[i];
+ if (skb == NULL)
+ continue;
+
+ desc = priv->rx_fq.desc + i;
+ addr = le32_to_cpu(desc->buff_addr);
+ dma_unmap_single(priv->dev, addr,
+ MAC_MAX_FRAME_SIZE, DMA_FROM_DEVICE);
+ dev_kfree_skb_any(skb);
+ priv->rx_skb[i] = NULL;
+ }
+
+ for (i = 0; i < TX_DESC_NUM; i++) {
+ struct sk_buff *skb = priv->tx_skb[i];
+ if (skb == NULL)
+ continue;
+
+ desc = priv->tx_rq.desc + i;
+ addr = le32_to_cpu(desc->buff_addr);
+ dma_unmap_single(priv->dev, addr, skb->len, DMA_TO_DEVICE);
+ dev_kfree_skb_any(skb);
+ priv->tx_skb[i] = NULL;
+ }
+}
+
+static int hix5hd2_net_open(struct net_device *dev)
+{
+ struct hix5hd2_priv *priv = netdev_priv(dev);
+ int ret;
+
+ ret = clk_prepare_enable(priv->clk);
+ if (ret < 0) {
+ netdev_err(dev, "failed to enable clk %d\n", ret);
+ return ret;
+ }
+
+ priv->phy = of_phy_connect(dev, priv->phy_node,
+ &hix5hd2_adjust_link, 0, priv->phy_mode);
+ if (!priv->phy)
+ return -ENODEV;
+
+ phy_start(priv->phy);
+ hix5hd2_hw_init(priv);
+ hix5hd2_rx_refill(priv);
+
+ netdev_reset_queue(dev);
+ netif_start_queue(dev);
+ napi_enable(&priv->napi);
+
+ hix5hd2_port_enable(priv);
+ hix5hd2_irq_enable(priv);
+
+ return 0;
+}
+
+static int hix5hd2_net_close(struct net_device *dev)
+{
+ struct hix5hd2_priv *priv = netdev_priv(dev);
+
+ hix5hd2_port_disable(priv);
+ hix5hd2_irq_disable(priv);
+ napi_disable(&priv->napi);
+ netif_stop_queue(dev);
+ hix5hd2_free_dma_desc_rings(priv);
+
+ if (priv->phy) {
+ phy_stop(priv->phy);
+ phy_disconnect(priv->phy);
+ }
+
+ clk_disable_unprepare(priv->clk);
+
+ return 0;
+}
+
+static void hix5hd2_tx_timeout_task(struct work_struct *work)
+{
+ struct hix5hd2_priv *priv;
+
+ priv = container_of(work, struct hix5hd2_priv, tx_timeout_task);
+ hix5hd2_net_close(priv->netdev);
+ hix5hd2_net_open(priv->netdev);
+}
+
+static void hix5hd2_net_timeout(struct net_device *dev)
+{
+ struct hix5hd2_priv *priv = netdev_priv(dev);
+
+ schedule_work(&priv->tx_timeout_task);
+}
+
+static const struct net_device_ops hix5hd2_netdev_ops = {
+ .ndo_open = hix5hd2_net_open,
+ .ndo_stop = hix5hd2_net_close,
+ .ndo_start_xmit = hix5hd2_net_xmit,
+ .ndo_tx_timeout = hix5hd2_net_timeout,
+ .ndo_set_mac_address = hix5hd2_net_set_mac_address,
+};
+
+static int hix5hd2_get_settings(struct net_device *net_dev,
+ struct ethtool_cmd *cmd)
+{
+ struct hix5hd2_priv *priv = netdev_priv(net_dev);
+
+ if (!priv->phy)
+ return -ENODEV;
+
+ return phy_ethtool_gset(priv->phy, cmd);
+}
+
+static int hix5hd2_set_settings(struct net_device *net_dev,
+ struct ethtool_cmd *cmd)
+{
+ struct hix5hd2_priv *priv = netdev_priv(net_dev);
+
+ if (!priv->phy)
+ return -ENODEV;
+
+ return phy_ethtool_sset(priv->phy, cmd);
+}
+
+static struct ethtool_ops hix5hd2_ethtools_ops = {
+ .get_link = ethtool_op_get_link,
+ .get_settings = hix5hd2_get_settings,
+ .set_settings = hix5hd2_set_settings,
+};
+
+static int hix5hd2_mdio_wait_ready(struct mii_bus *bus)
+{
+ struct hix5hd2_priv *priv = bus->priv;
+ void __iomem *base = priv->base;
+ int i, timeout = 10000;
+
+ for (i = 0; readl_relaxed(base + MDIO_SINGLE_CMD) & MDIO_START; i++) {
+ if (i == timeout)
+ return -ETIMEDOUT;
+ usleep_range(10, 20);
+ }
+
+ return 0;
+}
+
+static int hix5hd2_mdio_read(struct mii_bus *bus, int phy, int reg)
+{
+ struct hix5hd2_priv *priv = bus->priv;
+ void __iomem *base = priv->base;
+ int val, ret;
+
+ ret = hix5hd2_mdio_wait_ready(bus);
+ if (ret < 0)
+ goto out;
+
+ writel_relaxed(MDIO_READ | phy << 8 | reg, base + MDIO_SINGLE_CMD);
+ ret = hix5hd2_mdio_wait_ready(bus);
+ if (ret < 0)
+ goto out;
+
+ val = readl_relaxed(base + MDIO_RDATA_STATUS);
+ if (val & MDIO_R_VALID) {
+ dev_err(bus->parent, "SMI bus read not valid\n");
+ ret = -ENODEV;
+ goto out;
+ }
+
+ val = readl_relaxed(priv->base + MDIO_SINGLE_DATA);
+ ret = (val >> 16) & 0xFFFF;
+out:
+ return ret;
+}
+
+static int hix5hd2_mdio_write(struct mii_bus *bus, int phy, int reg, u16 val)
+{
+ struct hix5hd2_priv *priv = bus->priv;
+ void __iomem *base = priv->base;
+ int ret;
+
+ ret = hix5hd2_mdio_wait_ready(bus);
+ if (ret < 0)
+ goto out;
+
+ writel_relaxed(val, base + MDIO_SINGLE_DATA);
+ writel_relaxed(MDIO_WRITE | phy << 8 | reg, base + MDIO_SINGLE_CMD);
+ ret = hix5hd2_mdio_wait_ready(bus);
+out:
+ return ret;
+}
+
+static void hix5hd2_destroy_hw_desc_queue(struct hix5hd2_priv *priv)
+{
+ int i;
+
+ for (i = 0; i < QUEUE_NUMS; i++) {
+ if (priv->pool[i].desc) {
+ dma_free_coherent(priv->dev, priv->pool[i].size,
+ priv->pool[i].desc,
+ priv->pool[i].phys_addr);
+ priv->pool[i].desc = NULL;
+ }
+ }
+}
+
+static int hix5hd2_init_hw_desc_queue(struct hix5hd2_priv *priv)
+{
+ struct device *dev = priv->dev;
+ struct hix5hd2_desc *virt_addr;
+ dma_addr_t phys_addr;
+ int size, i;
+
+ priv->rx_fq.count = RX_DESC_NUM;
+ priv->rx_bq.count = RX_DESC_NUM;
+ priv->tx_bq.count = TX_DESC_NUM;
+ priv->tx_rq.count = TX_DESC_NUM;
+
+ for (i = 0; i < QUEUE_NUMS; i++) {
+ size = priv->pool[i].count * sizeof(struct hix5hd2_desc);
+ virt_addr = dma_alloc_coherent(dev, size, &phys_addr,
+ GFP_KERNEL);
+ if (virt_addr == NULL)
+ goto error_free_pool;
+
+ memset(virt_addr, 0, size);
+ priv->pool[i].size = size;
+ priv->pool[i].desc = virt_addr;
+ priv->pool[i].phys_addr = phys_addr;
+ }
+ return 0;
+
+error_free_pool:
+ hix5hd2_destroy_hw_desc_queue(priv);
+
+ return -ENOMEM;
+}
+
+static int hix5hd2_dev_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *node = dev->of_node;
+ struct net_device *ndev;
+ struct hix5hd2_priv *priv;
+ struct resource *res;
+ struct mii_bus *bus;
+ const char *mac_addr;
+ int ret;
+
+ ndev = alloc_etherdev(sizeof(struct hix5hd2_priv));
+ if (!ndev)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, ndev);
+
+ priv = netdev_priv(ndev);
+ priv->dev = dev;
+ priv->netdev = ndev;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ priv->base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(priv->base)) {
+ ret = PTR_ERR(priv->base);
+ goto out_free_netdev;
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ priv->ctrl_base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(priv->ctrl_base)) {
+ ret = PTR_ERR(priv->ctrl_base);
+ goto out_free_netdev;
+ }
+
+ priv->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(priv->clk)) {
+ netdev_err(ndev, "failed to get clk\n");
+ ret = -ENODEV;
+ goto out_free_netdev;
+ }
+
+ ret = clk_prepare_enable(priv->clk);
+ if (ret < 0) {
+ netdev_err(ndev, "failed to enable clk %d\n", ret);
+ goto out_free_netdev;
+ }
+
+ bus = mdiobus_alloc();
+ if (bus == NULL) {
+ ret = -ENOMEM;
+ goto out_free_netdev;
+ }
+
+ bus->priv = priv;
+ bus->name = "hix5hd2_mii_bus";
+ bus->read = hix5hd2_mdio_read;
+ bus->write = hix5hd2_mdio_write;
+ bus->parent = &pdev->dev;
+ snprintf(bus->id, MII_BUS_ID_SIZE, "%s-mii", dev_name(&pdev->dev));
+ priv->bus = bus;
+
+ ret = of_mdiobus_register(bus, node);
+ if (ret)
+ goto err_free_mdio;
+
+ priv->phy_mode = of_get_phy_mode(node);
+ if (priv->phy_mode < 0) {
+ netdev_err(ndev, "not find phy-mode\n");
+ ret = -EINVAL;
+ goto err_mdiobus;
+ }
+
+ priv->phy_node = of_parse_phandle(node, "phy-handle", 0);
+ if (!priv->phy_node) {
+ netdev_err(ndev, "not find phy-handle\n");
+ ret = -EINVAL;
+ goto err_mdiobus;
+ }
+
+ ndev->irq = platform_get_irq(pdev, 0);
+ if (ndev->irq <= 0) {
+ netdev_err(ndev, "No irq resource\n");
+ ret = -EINVAL;
+ goto out_phy_node;
+ }
+
+ ret = devm_request_irq(dev, ndev->irq, hix5hd2_interrupt,
+ 0, pdev->name, ndev);
+ if (ret) {
+ netdev_err(ndev, "devm_request_irq failed\n");
+ goto out_phy_node;
+ }
+
+ mac_addr = of_get_mac_address(node);
+ if (mac_addr)
+ ether_addr_copy(ndev->dev_addr, mac_addr);
+ if (!is_valid_ether_addr(ndev->dev_addr)) {
+ eth_hw_addr_random(ndev);
+ netdev_warn(ndev, "using random MAC address %pM\n",
+ ndev->dev_addr);
+ }
+
+ INIT_WORK(&priv->tx_timeout_task, hix5hd2_tx_timeout_task);
+ ndev->watchdog_timeo = 6 * HZ;
+ ndev->priv_flags |= IFF_UNICAST_FLT;
+ ndev->netdev_ops = &hix5hd2_netdev_ops;
+ ndev->ethtool_ops = &hix5hd2_ethtools_ops;
+ SET_NETDEV_DEV(ndev, dev);
+
+ ret = hix5hd2_init_hw_desc_queue(priv);
+ if (ret)
+ goto out_phy_node;
+
+ netif_napi_add(ndev, &priv->napi, hix5hd2_poll, NAPI_POLL_WEIGHT);
+ ret = register_netdev(priv->netdev);
+ if (ret) {
+ netdev_err(ndev, "register_netdev failed!");
+ goto out_destroy_queue;
+ }
+
+ clk_disable_unprepare(priv->clk);
+
+ return ret;
+
+out_destroy_queue:
+ netif_napi_del(&priv->napi);
+ hix5hd2_destroy_hw_desc_queue(priv);
+out_phy_node:
+ of_node_put(priv->phy_node);
+err_mdiobus:
+ mdiobus_unregister(bus);
+err_free_mdio:
+ mdiobus_free(bus);
+out_free_netdev:
+ free_netdev(ndev);
+
+ return ret;
+}
+
+static int hix5hd2_dev_remove(struct platform_device *pdev)
+{
+ struct net_device *ndev = platform_get_drvdata(pdev);
+ struct hix5hd2_priv *priv = netdev_priv(ndev);
+
+ netif_napi_del(&priv->napi);
+ unregister_netdev(ndev);
+ mdiobus_unregister(priv->bus);
+ mdiobus_free(priv->bus);
+
+ hix5hd2_destroy_hw_desc_queue(priv);
+ of_node_put(priv->phy_node);
+ cancel_work_sync(&priv->tx_timeout_task);
+ free_netdev(ndev);
+
+ return 0;
+}
+
+static const struct of_device_id hix5hd2_of_match[] = {
+ {.compatible = "hisilicon,hix5hd2-gmac",},
+ {},
+};
+
+MODULE_DEVICE_TABLE(of, hix5hd2_of_match);
+
+static struct platform_driver hix5hd2_dev_driver = {
+ .driver = {
+ .name = "hix5hd2-gmac",
+ .of_match_table = hix5hd2_of_match,
+ },
+ .probe = hix5hd2_dev_probe,
+ .remove = hix5hd2_dev_remove,
+};
+
+module_platform_driver(hix5hd2_dev_driver);
+
+MODULE_DESCRIPTION("HISILICON HIX5HD2 Ethernet driver");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:hix5hd2-gmac");
diff --git a/drivers/net/ethernet/ibm/ehea/ehea_ethtool.c b/drivers/net/ethernet/ibm/ehea/ehea_ethtool.c
index 95837b99a464..85a3866459cf 100644
--- a/drivers/net/ethernet/ibm/ehea/ehea_ethtool.c
+++ b/drivers/net/ethernet/ibm/ehea/ehea_ethtool.c
@@ -63,8 +63,8 @@ static int ehea_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
cmd->duplex = port->full_duplex == 1 ?
DUPLEX_FULL : DUPLEX_HALF;
} else {
- speed = ~0;
- cmd->duplex = -1;
+ speed = SPEED_UNKNOWN;
+ cmd->duplex = DUPLEX_UNKNOWN;
}
ethtool_cmd_speed_set(cmd, speed);
@@ -278,5 +278,5 @@ static const struct ethtool_ops ehea_ethtool_ops = {
void ehea_set_ethtool_ops(struct net_device *netdev)
{
- SET_ETHTOOL_OPS(netdev, &ehea_ethtool_ops);
+ netdev->ethtool_ops = &ehea_ethtool_ops;
}
diff --git a/drivers/net/ethernet/ibm/ehea/ehea_main.c b/drivers/net/ethernet/ibm/ehea/ehea_main.c
index 538903bf13bc..a0b418e007a0 100644
--- a/drivers/net/ethernet/ibm/ehea/ehea_main.c
+++ b/drivers/net/ethernet/ibm/ehea/ehea_main.c
@@ -28,6 +28,7 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <linux/device.h>
#include <linux/in.h>
#include <linux/ip.h>
#include <linux/tcp.h>
@@ -3273,7 +3274,7 @@ static int ehea_probe_adapter(struct platform_device *dev)
return -EINVAL;
}
- adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
+ adapter = devm_kzalloc(&dev->dev, sizeof(*adapter), GFP_KERNEL);
if (!adapter) {
ret = -ENOMEM;
dev_err(&dev->dev, "no mem for ehea_adapter\n");
@@ -3359,7 +3360,6 @@ out_kill_eq:
out_free_ad:
list_del(&adapter->list);
- kfree(adapter);
out:
ehea_update_firmware_handles();
@@ -3386,7 +3386,6 @@ static int ehea_remove(struct platform_device *dev)
ehea_destroy_eq(adapter->neq);
ehea_remove_adapter_mr(adapter);
list_del(&adapter->list);
- kfree(adapter);
ehea_update_firmware_handles();
diff --git a/drivers/net/ethernet/ibm/ehea/ehea_qmr.c b/drivers/net/ethernet/ibm/ehea/ehea_qmr.c
index 9b03033bb557..a0820f72b25c 100644
--- a/drivers/net/ethernet/ibm/ehea/ehea_qmr.c
+++ b/drivers/net/ethernet/ibm/ehea/ehea_qmr.c
@@ -103,12 +103,14 @@ out_nomem:
static void hw_queue_dtor(struct hw_queue *queue)
{
- int pages_per_kpage = PAGE_SIZE / queue->pagesize;
+ int pages_per_kpage;
int i, nr_pages;
if (!queue || !queue->queue_pages)
return;
+ pages_per_kpage = PAGE_SIZE / queue->pagesize;
+
nr_pages = queue->queue_length / queue->pagesize;
for (i = 0; i < nr_pages; i += pages_per_kpage)
diff --git a/drivers/net/ethernet/ibm/emac/core.c b/drivers/net/ethernet/ibm/emac/core.c
index ae342fdb42c8..87bd953cc2ee 100644
--- a/drivers/net/ethernet/ibm/emac/core.c
+++ b/drivers/net/ethernet/ibm/emac/core.c
@@ -2879,7 +2879,7 @@ static int emac_probe(struct platform_device *ofdev)
dev->commac.ops = &emac_commac_sg_ops;
} else
ndev->netdev_ops = &emac_netdev_ops;
- SET_ETHTOOL_OPS(ndev, &emac_ethtool_ops);
+ ndev->ethtool_ops = &emac_ethtool_ops;
netif_carrier_off(ndev);
diff --git a/drivers/net/ethernet/icplus/ipg.c b/drivers/net/ethernet/icplus/ipg.c
index 25045ae07171..5727779a7df2 100644
--- a/drivers/net/ethernet/icplus/ipg.c
+++ b/drivers/net/ethernet/icplus/ipg.c
@@ -2245,7 +2245,7 @@ static int ipg_probe(struct pci_dev *pdev, const struct pci_device_id *id)
*/
dev->netdev_ops = &ipg_netdev_ops;
SET_NETDEV_DEV(dev, &pdev->dev);
- SET_ETHTOOL_OPS(dev, &ipg_ethtool_ops);
+ dev->ethtool_ops = &ipg_ethtool_ops;
rc = pci_request_regions(pdev, DRV_NAME);
if (rc)
diff --git a/drivers/net/ethernet/intel/e100.c b/drivers/net/ethernet/intel/e100.c
index b56461ce674c..9d979d7debef 100644
--- a/drivers/net/ethernet/intel/e100.c
+++ b/drivers/net/ethernet/intel/e100.c
@@ -2854,7 +2854,7 @@ static int e100_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
netdev->hw_features |= NETIF_F_RXALL;
netdev->netdev_ops = &e100_netdev_ops;
- SET_ETHTOOL_OPS(netdev, &e100_ethtool_ops);
+ netdev->ethtool_ops = &e100_ethtool_ops;
netdev->watchdog_timeo = E100_WATCHDOG_PERIOD;
strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1);
diff --git a/drivers/net/ethernet/intel/e1000/e1000_ethtool.c b/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
index 73a8aeefb92a..d50f78afb56d 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
@@ -168,8 +168,8 @@ static int e1000_get_settings(struct net_device *netdev,
else
ecmd->duplex = DUPLEX_HALF;
} else {
- ethtool_cmd_speed_set(ecmd, -1);
- ecmd->duplex = -1;
+ ethtool_cmd_speed_set(ecmd, SPEED_UNKNOWN);
+ ecmd->duplex = DUPLEX_UNKNOWN;
}
ecmd->autoneg = ((hw->media_type == e1000_media_type_fiber) ||
@@ -1460,7 +1460,8 @@ static int e1000_run_loopback_test(struct e1000_adapter *adapter)
* enough time to complete the receives, if it's
* exceeded, break and error off
*/
- } while (good_cnt < 64 && jiffies < (time + 20));
+ } while (good_cnt < 64 && time_after(time + 20, jiffies));
+
if (good_cnt != 64) {
ret_val = 13; /* ret_val is the same as mis-compare */
break;
@@ -1905,5 +1906,5 @@ static const struct ethtool_ops e1000_ethtool_ops = {
void e1000_set_ethtool_ops(struct net_device *netdev)
{
- SET_ETHTOOL_OPS(netdev, &e1000_ethtool_ops);
+ netdev->ethtool_ops = &e1000_ethtool_ops;
}
diff --git a/drivers/net/ethernet/intel/e1000/e1000_hw.c b/drivers/net/ethernet/intel/e1000/e1000_hw.c
index c1d3fdb296a0..e9b07ccc0eba 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_hw.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_hw.c
@@ -4877,10 +4877,10 @@ void e1000_tbi_adjust_stats(struct e1000_hw *hw, struct e1000_hw_stats *stats,
* since the test for a multicast frame will test positive on
* a broadcast frame.
*/
- if ((mac_addr[0] == (u8) 0xff) && (mac_addr[1] == (u8) 0xff))
+ if (is_broadcast_ether_addr(mac_addr))
/* Broadcast packet */
stats->bprc++;
- else if (*mac_addr & 0x01)
+ else if (is_multicast_ether_addr(mac_addr))
/* Multicast packet */
stats->mprc++;
diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c
index 27058dfe418b..660971f304b2 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_main.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_main.c
@@ -3105,11 +3105,6 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
*/
tx_ring = adapter->tx_ring;
- if (unlikely(skb->len <= 0)) {
- dev_kfree_skb_any(skb);
- return NETDEV_TX_OK;
- }
-
/* On PCI/PCI-X HW, if packet size is less than ETH_ZLEN,
* packets may get corrupted during padding by HW.
* To WA this issue, pad all small packets manually.
diff --git a/drivers/net/ethernet/intel/e1000e/80003es2lan.c b/drivers/net/ethernet/intel/e1000e/80003es2lan.c
index a5f6b11d6992..08f22f348800 100644
--- a/drivers/net/ethernet/intel/e1000e/80003es2lan.c
+++ b/drivers/net/ethernet/intel/e1000e/80003es2lan.c
@@ -1365,6 +1365,7 @@ static const struct e1000_mac_operations es2_mac_ops = {
.setup_led = e1000e_setup_led_generic,
.config_collision_dist = e1000e_config_collision_dist_generic,
.rar_set = e1000e_rar_set_generic,
+ .rar_get_count = e1000e_rar_get_count_generic,
};
static const struct e1000_phy_operations es2_phy_ops = {
diff --git a/drivers/net/ethernet/intel/e1000e/82571.c b/drivers/net/ethernet/intel/e1000e/82571.c
index e0aa7f1efb08..218481e509f9 100644
--- a/drivers/net/ethernet/intel/e1000e/82571.c
+++ b/drivers/net/ethernet/intel/e1000e/82571.c
@@ -1896,6 +1896,7 @@ static const struct e1000_mac_operations e82571_mac_ops = {
.config_collision_dist = e1000e_config_collision_dist_generic,
.read_mac_addr = e1000_read_mac_addr_82571,
.rar_set = e1000e_rar_set_generic,
+ .rar_get_count = e1000e_rar_get_count_generic,
};
static const struct e1000_phy_operations e82_phy_ops_igp = {
diff --git a/drivers/net/ethernet/intel/e1000e/e1000.h b/drivers/net/ethernet/intel/e1000e/e1000.h
index 1471c5464a89..7785240a0da1 100644
--- a/drivers/net/ethernet/intel/e1000e/e1000.h
+++ b/drivers/net/ethernet/intel/e1000e/e1000.h
@@ -265,10 +265,10 @@ struct e1000_adapter {
u32 tx_hwtstamp_timeouts;
/* Rx */
- bool (*clean_rx) (struct e1000_ring *ring, int *work_done,
- int work_to_do) ____cacheline_aligned_in_smp;
- void (*alloc_rx_buf) (struct e1000_ring *ring, int cleaned_count,
- gfp_t gfp);
+ bool (*clean_rx)(struct e1000_ring *ring, int *work_done,
+ int work_to_do) ____cacheline_aligned_in_smp;
+ void (*alloc_rx_buf)(struct e1000_ring *ring, int cleaned_count,
+ gfp_t gfp);
struct e1000_ring *rx_ring;
u32 rx_int_delay;
@@ -391,6 +391,8 @@ s32 e1000e_get_base_timinca(struct e1000_adapter *adapter, u32 *timinca);
* 25MHz 46-bit 2^46 / 10^9 / 3600 = 19.55 hours
*/
#define E1000_SYSTIM_OVERFLOW_PERIOD (HZ * 60 * 60 * 4)
+#define E1000_MAX_82574_SYSTIM_REREADS 50
+#define E1000_82574_SYSTIM_EPSILON (1ULL << 35ULL)
/* hardware capability, feature, and workaround flags */
#define FLAG_HAS_AMT (1 << 0)
@@ -573,35 +575,8 @@ static inline u32 __er32(struct e1000_hw *hw, unsigned long reg)
#define er32(reg) __er32(hw, E1000_##reg)
-/**
- * __ew32_prepare - prepare to write to MAC CSR register on certain parts
- * @hw: pointer to the HW structure
- *
- * When updating the MAC CSR registers, the Manageability Engine (ME) could
- * be accessing the registers at the same time. Normally, this is handled in
- * h/w by an arbiter but on some parts there is a bug that acknowledges Host
- * accesses later than it should which could result in the register to have
- * an incorrect value. Workaround this by checking the FWSM register which
- * has bit 24 set while ME is accessing MAC CSR registers, wait if it is set
- * and try again a number of times.
- **/
-static inline s32 __ew32_prepare(struct e1000_hw *hw)
-{
- s32 i = E1000_ICH_FWSM_PCIM2PCI_COUNT;
-
- while ((er32(FWSM) & E1000_ICH_FWSM_PCIM2PCI) && --i)
- udelay(50);
-
- return i;
-}
-
-static inline void __ew32(struct e1000_hw *hw, unsigned long reg, u32 val)
-{
- if (hw->adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA)
- __ew32_prepare(hw);
-
- writel(val, hw->hw_addr + reg);
-}
+s32 __ew32_prepare(struct e1000_hw *hw);
+void __ew32(struct e1000_hw *hw, unsigned long reg, u32 val);
#define ew32(reg, val) __ew32(hw, E1000_##reg, (val))
diff --git a/drivers/net/ethernet/intel/e1000e/ethtool.c b/drivers/net/ethernet/intel/e1000e/ethtool.c
index cad250bc1b99..815e26c6d34b 100644
--- a/drivers/net/ethernet/intel/e1000e/ethtool.c
+++ b/drivers/net/ethernet/intel/e1000e/ethtool.c
@@ -159,8 +159,8 @@ static int e1000_get_settings(struct net_device *netdev,
ecmd->transceiver = XCVR_EXTERNAL;
}
- speed = -1;
- ecmd->duplex = -1;
+ speed = SPEED_UNKNOWN;
+ ecmd->duplex = DUPLEX_UNKNOWN;
if (netif_running(netdev)) {
if (netif_carrier_ok(netdev)) {
@@ -169,6 +169,7 @@ static int e1000_get_settings(struct net_device *netdev,
}
} else if (!pm_runtime_suspended(netdev->dev.parent)) {
u32 status = er32(STATUS);
+
if (status & E1000_STATUS_LU) {
if (status & E1000_STATUS_SPEED_1000)
speed = SPEED_1000;
@@ -783,25 +784,26 @@ static bool reg_pattern_test(struct e1000_adapter *adapter, u64 *data,
reg + (offset << 2), val,
(test[pat] & write & mask));
*data = reg;
- return 1;
+ return true;
}
}
- return 0;
+ return false;
}
static bool reg_set_and_check(struct e1000_adapter *adapter, u64 *data,
int reg, u32 mask, u32 write)
{
u32 val;
+
__ew32(&adapter->hw, reg, write & mask);
val = __er32(&adapter->hw, reg);
if ((write & mask) != (val & mask)) {
e_err("set/check test failed (reg 0x%05X): got 0x%08X expected 0x%08X\n",
reg, (val & mask), (write & mask));
*data = reg;
- return 1;
+ return true;
}
- return 0;
+ return false;
}
#define REG_PATTERN_TEST_ARRAY(reg, offset, mask, write) \
@@ -1717,6 +1719,7 @@ static int e1000_link_test(struct e1000_adapter *adapter, u64 *data)
*data = 0;
if (hw->phy.media_type == e1000_media_type_internal_serdes) {
int i = 0;
+
hw->mac.serdes_has_link = false;
/* On some blade server designs, link establishment
@@ -2315,5 +2318,5 @@ static const struct ethtool_ops e1000_ethtool_ops = {
void e1000e_set_ethtool_ops(struct net_device *netdev)
{
- SET_ETHTOOL_OPS(netdev, &e1000_ethtool_ops);
+ netdev->ethtool_ops = &e1000_ethtool_ops;
}
diff --git a/drivers/net/ethernet/intel/e1000e/hw.h b/drivers/net/ethernet/intel/e1000e/hw.h
index 6b3de5f39a97..72f5475c4b90 100644
--- a/drivers/net/ethernet/intel/e1000e/hw.h
+++ b/drivers/net/ethernet/intel/e1000e/hw.h
@@ -469,8 +469,9 @@ struct e1000_mac_operations {
s32 (*setup_led)(struct e1000_hw *);
void (*write_vfta)(struct e1000_hw *, u32, u32);
void (*config_collision_dist)(struct e1000_hw *);
- void (*rar_set)(struct e1000_hw *, u8 *, u32);
+ int (*rar_set)(struct e1000_hw *, u8 *, u32);
s32 (*read_mac_addr)(struct e1000_hw *);
+ u32 (*rar_get_count)(struct e1000_hw *);
};
/* When to use various PHY register access functions:
diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.c b/drivers/net/ethernet/intel/e1000e/ich8lan.c
index f0bbd4246d71..8894ab8ed6bd 100644
--- a/drivers/net/ethernet/intel/e1000e/ich8lan.c
+++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c
@@ -139,8 +139,9 @@ static s32 e1000_k1_gig_workaround_hv(struct e1000_hw *hw, bool link);
static s32 e1000_set_mdio_slow_mode_hv(struct e1000_hw *hw);
static bool e1000_check_mng_mode_ich8lan(struct e1000_hw *hw);
static bool e1000_check_mng_mode_pchlan(struct e1000_hw *hw);
-static void e1000_rar_set_pch2lan(struct e1000_hw *hw, u8 *addr, u32 index);
-static void e1000_rar_set_pch_lpt(struct e1000_hw *hw, u8 *addr, u32 index);
+static int e1000_rar_set_pch2lan(struct e1000_hw *hw, u8 *addr, u32 index);
+static int e1000_rar_set_pch_lpt(struct e1000_hw *hw, u8 *addr, u32 index);
+static u32 e1000_rar_get_count_pch_lpt(struct e1000_hw *hw);
static s32 e1000_k1_workaround_lv(struct e1000_hw *hw);
static void e1000_gate_hw_phy_config_ich8lan(struct e1000_hw *hw, bool gate);
static s32 e1000_disable_ulp_lpt_lp(struct e1000_hw *hw, bool force);
@@ -704,6 +705,7 @@ static s32 e1000_init_mac_params_ich8lan(struct e1000_hw *hw)
mac->ops.rar_set = e1000_rar_set_pch_lpt;
mac->ops.setup_physical_interface =
e1000_setup_copper_link_pch_lpt;
+ mac->ops.rar_get_count = e1000_rar_get_count_pch_lpt;
}
/* Enable PCS Lock-loss workaround for ICH8 */
@@ -1334,6 +1336,7 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
if (((hw->mac.type == e1000_pch2lan) ||
(hw->mac.type == e1000_pch_lpt)) && link) {
u32 reg;
+
reg = er32(STATUS);
if (!(reg & (E1000_STATUS_FD | E1000_STATUS_SPEED_MASK))) {
u16 emi_addr;
@@ -1634,9 +1637,9 @@ static bool e1000_check_mng_mode_ich8lan(struct e1000_hw *hw)
u32 fwsm;
fwsm = er32(FWSM);
- return ((fwsm & E1000_ICH_FWSM_FW_VALID) &&
+ return (fwsm & E1000_ICH_FWSM_FW_VALID) &&
((fwsm & E1000_FWSM_MODE_MASK) ==
- (E1000_ICH_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT)));
+ (E1000_ICH_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT));
}
/**
@@ -1667,7 +1670,7 @@ static bool e1000_check_mng_mode_pchlan(struct e1000_hw *hw)
* contain the MAC address but RAR[1-6] are reserved for manageability (ME).
* Use SHRA[0-3] in place of those reserved for ME.
**/
-static void e1000_rar_set_pch2lan(struct e1000_hw *hw, u8 *addr, u32 index)
+static int e1000_rar_set_pch2lan(struct e1000_hw *hw, u8 *addr, u32 index)
{
u32 rar_low, rar_high;
@@ -1689,7 +1692,7 @@ static void e1000_rar_set_pch2lan(struct e1000_hw *hw, u8 *addr, u32 index)
e1e_flush();
ew32(RAH(index), rar_high);
e1e_flush();
- return;
+ return 0;
}
/* RAR[1-6] are owned by manageability. Skip those and program the
@@ -1712,7 +1715,7 @@ static void e1000_rar_set_pch2lan(struct e1000_hw *hw, u8 *addr, u32 index)
/* verify the register updates */
if ((er32(SHRAL(index - 1)) == rar_low) &&
(er32(SHRAH(index - 1)) == rar_high))
- return;
+ return 0;
e_dbg("SHRA[%d] might be locked by ME - FWSM=0x%8.8x\n",
(index - 1), er32(FWSM));
@@ -1720,6 +1723,43 @@ static void e1000_rar_set_pch2lan(struct e1000_hw *hw, u8 *addr, u32 index)
out:
e_dbg("Failed to write receive address at index %d\n", index);
+ return -E1000_ERR_CONFIG;
+}
+
+/**
+ * e1000_rar_get_count_pch_lpt - Get the number of available SHRA
+ * @hw: pointer to the HW structure
+ *
+ * Get the number of available receive registers that the Host can
+ * program. SHRA[0-10] are the shared receive address registers
+ * that are shared between the Host and manageability engine (ME).
+ * ME can reserve any number of addresses and the host needs to be
+ * able to tell how many available registers it has access to.
+ **/
+static u32 e1000_rar_get_count_pch_lpt(struct e1000_hw *hw)
+{
+ u32 wlock_mac;
+ u32 num_entries;
+
+ wlock_mac = er32(FWSM) & E1000_FWSM_WLOCK_MAC_MASK;
+ wlock_mac >>= E1000_FWSM_WLOCK_MAC_SHIFT;
+
+ switch (wlock_mac) {
+ case 0:
+ /* All SHRA[0..10] and RAR[0] available */
+ num_entries = hw->mac.rar_entry_count;
+ break;
+ case 1:
+ /* Only RAR[0] available */
+ num_entries = 1;
+ break;
+ default:
+ /* SHRA[0..(wlock_mac - 1)] available + RAR[0] */
+ num_entries = wlock_mac + 1;
+ break;
+ }
+
+ return num_entries;
}
/**
@@ -1733,7 +1773,7 @@ out:
* contain the MAC address. SHRA[0-10] are the shared receive address
* registers that are shared between the Host and manageability engine (ME).
**/
-static void e1000_rar_set_pch_lpt(struct e1000_hw *hw, u8 *addr, u32 index)
+static int e1000_rar_set_pch_lpt(struct e1000_hw *hw, u8 *addr, u32 index)
{
u32 rar_low, rar_high;
u32 wlock_mac;
@@ -1755,7 +1795,7 @@ static void e1000_rar_set_pch_lpt(struct e1000_hw *hw, u8 *addr, u32 index)
e1e_flush();
ew32(RAH(index), rar_high);
e1e_flush();
- return;
+ return 0;
}
/* The manageability engine (ME) can lock certain SHRAR registers that
@@ -1787,12 +1827,13 @@ static void e1000_rar_set_pch_lpt(struct e1000_hw *hw, u8 *addr, u32 index)
/* verify the register updates */
if ((er32(SHRAL_PCH_LPT(index - 1)) == rar_low) &&
(er32(SHRAH_PCH_LPT(index - 1)) == rar_high))
- return;
+ return 0;
}
}
out:
e_dbg("Failed to write receive address at index %d\n", index);
+ return -E1000_ERR_CONFIG;
}
/**
@@ -4976,6 +5017,7 @@ static const struct e1000_mac_operations ich8_mac_ops = {
/* id_led_init dependent on mac type */
.config_collision_dist = e1000e_config_collision_dist_generic,
.rar_set = e1000e_rar_set_generic,
+ .rar_get_count = e1000e_rar_get_count_generic,
};
static const struct e1000_phy_operations ich8_phy_ops = {
diff --git a/drivers/net/ethernet/intel/e1000e/mac.c b/drivers/net/ethernet/intel/e1000e/mac.c
index baa0a466d1d0..8c386f3a15eb 100644
--- a/drivers/net/ethernet/intel/e1000e/mac.c
+++ b/drivers/net/ethernet/intel/e1000e/mac.c
@@ -211,6 +211,11 @@ s32 e1000_check_alt_mac_addr_generic(struct e1000_hw *hw)
return 0;
}
+u32 e1000e_rar_get_count_generic(struct e1000_hw *hw)
+{
+ return hw->mac.rar_entry_count;
+}
+
/**
* e1000e_rar_set_generic - Set receive address register
* @hw: pointer to the HW structure
@@ -220,7 +225,7 @@ s32 e1000_check_alt_mac_addr_generic(struct e1000_hw *hw)
* Sets the receive address array register at index to the address passed
* in by addr.
**/
-void e1000e_rar_set_generic(struct e1000_hw *hw, u8 *addr, u32 index)
+int e1000e_rar_set_generic(struct e1000_hw *hw, u8 *addr, u32 index)
{
u32 rar_low, rar_high;
@@ -244,6 +249,8 @@ void e1000e_rar_set_generic(struct e1000_hw *hw, u8 *addr, u32 index)
e1e_flush();
ew32(RAH(index), rar_high);
e1e_flush();
+
+ return 0;
}
/**
diff --git a/drivers/net/ethernet/intel/e1000e/mac.h b/drivers/net/ethernet/intel/e1000e/mac.h
index 4e81c2825b7a..0513d90cdeea 100644
--- a/drivers/net/ethernet/intel/e1000e/mac.h
+++ b/drivers/net/ethernet/intel/e1000e/mac.h
@@ -61,7 +61,8 @@ void e1000e_update_adaptive(struct e1000_hw *hw);
void e1000_write_vfta_generic(struct e1000_hw *hw, u32 offset, u32 value);
void e1000_set_lan_id_multi_port_pcie(struct e1000_hw *hw);
-void e1000e_rar_set_generic(struct e1000_hw *hw, u8 *addr, u32 index);
+u32 e1000e_rar_get_count_generic(struct e1000_hw *hw);
+int e1000e_rar_set_generic(struct e1000_hw *hw, u8 *addr, u32 index);
void e1000e_config_collision_dist_generic(struct e1000_hw *hw);
#endif
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index 3e69386add04..201cc93f3625 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -124,6 +124,36 @@ static const struct e1000_reg_info e1000_reg_info_tbl[] = {
};
/**
+ * __ew32_prepare - prepare to write to MAC CSR register on certain parts
+ * @hw: pointer to the HW structure
+ *
+ * When updating the MAC CSR registers, the Manageability Engine (ME) could
+ * be accessing the registers at the same time. Normally, this is handled in
+ * h/w by an arbiter but on some parts there is a bug that acknowledges Host
+ * accesses later than it should which could result in the register to have
+ * an incorrect value. Workaround this by checking the FWSM register which
+ * has bit 24 set while ME is accessing MAC CSR registers, wait if it is set
+ * and try again a number of times.
+ **/
+s32 __ew32_prepare(struct e1000_hw *hw)
+{
+ s32 i = E1000_ICH_FWSM_PCIM2PCI_COUNT;
+
+ while ((er32(FWSM) & E1000_ICH_FWSM_PCIM2PCI) && --i)
+ udelay(50);
+
+ return i;
+}
+
+void __ew32(struct e1000_hw *hw, unsigned long reg, u32 val)
+{
+ if (hw->adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA)
+ __ew32_prepare(hw);
+
+ writel(val, hw->hw_addr + reg);
+}
+
+/**
* e1000_regdump - register printout routine
* @hw: pointer to the HW structure
* @reginfo: pointer to the register info table
@@ -599,6 +629,7 @@ static void e1000e_update_rdt_wa(struct e1000_ring *rx_ring, unsigned int i)
if (unlikely(!ret_val && (i != readl(rx_ring->tail)))) {
u32 rctl = er32(RCTL);
+
ew32(RCTL, rctl & ~E1000_RCTL_EN);
e_err("ME firmware caused invalid RDT - resetting\n");
schedule_work(&adapter->reset_task);
@@ -615,6 +646,7 @@ static void e1000e_update_tdt_wa(struct e1000_ring *tx_ring, unsigned int i)
if (unlikely(!ret_val && (i != readl(tx_ring->tail)))) {
u32 tctl = er32(TCTL);
+
ew32(TCTL, tctl & ~E1000_TCTL_EN);
e_err("ME firmware caused invalid TDT - resetting\n");
schedule_work(&adapter->reset_task);
@@ -1198,6 +1230,7 @@ static bool e1000_clean_tx_irq(struct e1000_ring *tx_ring)
while ((eop_desc->upper.data & cpu_to_le32(E1000_TXD_STAT_DD)) &&
(count < tx_ring->count)) {
bool cleaned = false;
+
rmb(); /* read buffer_info after eop_desc */
for (; !cleaned; count++) {
tx_desc = E1000_TX_DESC(*tx_ring, i);
@@ -1753,6 +1786,7 @@ static irqreturn_t e1000_intr_msi(int __always_unused irq, void *data)
adapter->flags & FLAG_RX_NEEDS_RESTART) {
/* disable receives */
u32 rctl = er32(RCTL);
+
ew32(RCTL, rctl & ~E1000_RCTL_EN);
adapter->flags |= FLAG_RESTART_NOW;
}
@@ -1960,6 +1994,7 @@ static void e1000_configure_msix(struct e1000_adapter *adapter)
/* Workaround issue with spurious interrupts on 82574 in MSI-X mode */
if (hw->mac.type == e1000_82574) {
u32 rfctl = er32(RFCTL);
+
rfctl |= E1000_RFCTL_ACK_DIS;
ew32(RFCTL, rfctl);
}
@@ -2204,6 +2239,7 @@ static void e1000_irq_disable(struct e1000_adapter *adapter)
if (adapter->msix_entries) {
int i;
+
for (i = 0; i < adapter->num_vectors; i++)
synchronize_irq(adapter->msix_entries[i].vector);
} else {
@@ -2921,6 +2957,7 @@ static void e1000_configure_tx(struct e1000_adapter *adapter)
if (adapter->flags2 & FLAG2_DMA_BURST) {
u32 txdctl = er32(TXDCTL(0));
+
txdctl &= ~(E1000_TXDCTL_PTHRESH | E1000_TXDCTL_HTHRESH |
E1000_TXDCTL_WTHRESH);
/* set up some performance related parameters to encourage the
@@ -3239,6 +3276,7 @@ static void e1000_configure_rx(struct e1000_adapter *adapter)
if (adapter->flags & FLAG_IS_ICH) {
u32 rxdctl = er32(RXDCTL(0));
+
ew32(RXDCTL(0), rxdctl | 0x3);
}
@@ -3303,9 +3341,11 @@ static int e1000e_write_uc_addr_list(struct net_device *netdev)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
- unsigned int rar_entries = hw->mac.rar_entry_count;
+ unsigned int rar_entries;
int count = 0;
+ rar_entries = hw->mac.ops.rar_get_count(hw);
+
/* save a rar entry for our hardware address */
rar_entries--;
@@ -3324,9 +3364,13 @@ static int e1000e_write_uc_addr_list(struct net_device *netdev)
* combining
*/
netdev_for_each_uc_addr(ha, netdev) {
+ int rval;
+
if (!rar_entries)
break;
- hw->mac.ops.rar_set(hw, ha->addr, rar_entries--);
+ rval = hw->mac.ops.rar_set(hw, ha->addr, rar_entries--);
+ if (rval < 0)
+ return -ENOMEM;
count++;
}
}
@@ -4085,12 +4129,37 @@ static cycle_t e1000e_cyclecounter_read(const struct cyclecounter *cc)
struct e1000_adapter *adapter = container_of(cc, struct e1000_adapter,
cc);
struct e1000_hw *hw = &adapter->hw;
- cycle_t systim;
+ cycle_t systim, systim_next;
/* latch SYSTIMH on read of SYSTIML */
systim = (cycle_t)er32(SYSTIML);
systim |= (cycle_t)er32(SYSTIMH) << 32;
+ if ((hw->mac.type == e1000_82574) || (hw->mac.type == e1000_82583)) {
+ u64 incvalue, time_delta, rem, temp;
+ int i;
+
+ /* errata for 82574/82583 possible bad bits read from SYSTIMH/L
+ * check to see that the time is incrementing at a reasonable
+ * rate and is a multiple of incvalue
+ */
+ incvalue = er32(TIMINCA) & E1000_TIMINCA_INCVALUE_MASK;
+ for (i = 0; i < E1000_MAX_82574_SYSTIM_REREADS; i++) {
+ /* latch SYSTIMH on read of SYSTIML */
+ systim_next = (cycle_t)er32(SYSTIML);
+ systim_next |= (cycle_t)er32(SYSTIMH) << 32;
+
+ time_delta = systim_next - systim;
+ temp = time_delta;
+ rem = do_div(temp, incvalue);
+
+ systim = systim_next;
+
+ if ((time_delta < E1000_82574_SYSTIM_EPSILON) &&
+ (rem == 0))
+ break;
+ }
+ }
return systim;
}
@@ -4491,7 +4560,7 @@ static void e1000e_update_phy_task(struct work_struct *work)
e1000_get_phy_info(hw);
/* Enable EEE on 82579 after link up */
- if (hw->phy.type == e1000_phy_82579)
+ if (hw->phy.type >= e1000_phy_82579)
e1000_set_eee_pchlan(hw);
}
@@ -4695,6 +4764,7 @@ static void e1000e_update_stats(struct e1000_adapter *adapter)
/* Correctable ECC Errors */
if (hw->mac.type == e1000_pch_lpt) {
u32 pbeccsts = er32(PBECCSTS);
+
adapter->corr_errors +=
pbeccsts & E1000_PBECCSTS_CORR_ERR_CNT_MASK;
adapter->uncorr_errors +=
@@ -4808,6 +4878,7 @@ static void e1000e_enable_receives(struct e1000_adapter *adapter)
(adapter->flags & FLAG_RESTART_NOW)) {
struct e1000_hw *hw = &adapter->hw;
u32 rctl = er32(RCTL);
+
ew32(RCTL, rctl | E1000_RCTL_EN);
adapter->flags &= ~FLAG_RESTART_NOW;
}
@@ -4930,6 +5001,7 @@ static void e1000_watchdog_task(struct work_struct *work)
if ((adapter->flags & FLAG_TARC_SPEED_MODE_BIT) &&
!txb2b) {
u32 tarc0;
+
tarc0 = er32(TARC(0));
tarc0 &= ~SPEED_MODE_BIT;
ew32(TARC(0), tarc0);
@@ -5170,7 +5242,7 @@ static bool e1000_tx_csum(struct e1000_ring *tx_ring, struct sk_buff *skb)
__be16 protocol;
if (skb->ip_summed != CHECKSUM_PARTIAL)
- return 0;
+ return false;
if (skb->protocol == cpu_to_be16(ETH_P_8021Q))
protocol = vlan_eth_hdr(skb)->h_vlan_encapsulated_proto;
@@ -5215,7 +5287,7 @@ static bool e1000_tx_csum(struct e1000_ring *tx_ring, struct sk_buff *skb)
i = 0;
tx_ring->next_to_use = i;
- return 1;
+ return true;
}
static int e1000_tx_map(struct e1000_ring *tx_ring, struct sk_buff *skb,
@@ -6209,6 +6281,7 @@ static int __e1000_resume(struct pci_dev *pdev)
e1e_wphy(&adapter->hw, BM_WUS, ~0);
} else {
u32 wus = er32(WUS);
+
if (wus) {
e_info("MAC Wakeup cause - %s\n",
wus & E1000_WUS_EX ? "Unicast Packet" :
@@ -7027,7 +7100,7 @@ static const struct pci_error_handlers e1000_err_handler = {
.resume = e1000_io_resume,
};
-static DEFINE_PCI_DEVICE_TABLE(e1000_pci_tbl) = {
+static const struct pci_device_id e1000_pci_tbl[] = {
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_COPPER), board_82571 },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_FIBER), board_82571 },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_QUAD_COPPER), board_82571 },
@@ -7144,6 +7217,7 @@ static struct pci_driver e1000_driver = {
static int __init e1000_init_module(void)
{
int ret;
+
pr_info("Intel(R) PRO/1000 Network Driver - %s\n",
e1000e_driver_version);
pr_info("Copyright(c) 1999 - 2014 Intel Corporation.\n");
diff --git a/drivers/net/ethernet/intel/e1000e/nvm.c b/drivers/net/ethernet/intel/e1000e/nvm.c
index a9a976f04bff..b1f212b7baf7 100644
--- a/drivers/net/ethernet/intel/e1000e/nvm.c
+++ b/drivers/net/ethernet/intel/e1000e/nvm.c
@@ -398,6 +398,7 @@ s32 e1000e_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
/* Loop to allow for up to whole page write of eeprom */
while (widx < words) {
u16 word_out = data[widx];
+
word_out = (word_out >> 8) | (word_out << 8);
e1000_shift_out_eec_bits(hw, word_out, 16);
widx++;
diff --git a/drivers/net/ethernet/intel/e1000e/param.c b/drivers/net/ethernet/intel/e1000e/param.c
index d0ac0f3249c8..aa1923f7ebdd 100644
--- a/drivers/net/ethernet/intel/e1000e/param.c
+++ b/drivers/net/ethernet/intel/e1000e/param.c
@@ -436,6 +436,7 @@ void e1000e_check_options(struct e1000_adapter *adapter)
if (num_IntMode > bd) {
unsigned int int_mode = IntMode[bd];
+
e1000_validate_option(&int_mode, &opt, adapter);
adapter->int_mode = int_mode;
} else {
@@ -457,6 +458,7 @@ void e1000e_check_options(struct e1000_adapter *adapter)
if (num_SmartPowerDownEnable > bd) {
unsigned int spd = SmartPowerDownEnable[bd];
+
e1000_validate_option(&spd, &opt, adapter);
if ((adapter->flags & FLAG_HAS_SMART_POWER_DOWN) && spd)
adapter->flags |= FLAG_SMART_POWER_DOWN;
@@ -473,6 +475,7 @@ void e1000e_check_options(struct e1000_adapter *adapter)
if (num_CrcStripping > bd) {
unsigned int crc_stripping = CrcStripping[bd];
+
e1000_validate_option(&crc_stripping, &opt, adapter);
if (crc_stripping == OPTION_ENABLED) {
adapter->flags2 |= FLAG2_CRC_STRIPPING;
@@ -495,6 +498,7 @@ void e1000e_check_options(struct e1000_adapter *adapter)
if (num_KumeranLockLoss > bd) {
unsigned int kmrn_lock_loss = KumeranLockLoss[bd];
+
e1000_validate_option(&kmrn_lock_loss, &opt, adapter);
enabled = kmrn_lock_loss;
}
diff --git a/drivers/net/ethernet/intel/e1000e/phy.c b/drivers/net/ethernet/intel/e1000e/phy.c
index 00b3fc98bf30..b2005e13fb01 100644
--- a/drivers/net/ethernet/intel/e1000e/phy.c
+++ b/drivers/net/ethernet/intel/e1000e/phy.c
@@ -2896,6 +2896,7 @@ static s32 __e1000_write_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 data,
(hw->phy.addr == 2) &&
!(MAX_PHY_REG_ADDRESS & reg) && (data & (1 << 11))) {
u16 data2 = 0x7EFF;
+
ret_val = e1000_access_phy_debug_regs_hv(hw,
(1 << 6) | 0x3,
&data2, false);
diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h
index beb7b4393a6c..65985846345d 100644
--- a/drivers/net/ethernet/intel/i40e/i40e.h
+++ b/drivers/net/ethernet/intel/i40e/i40e.h
@@ -72,6 +72,7 @@
#define I40E_MIN_NUM_DESCRIPTORS 64
#define I40E_MIN_MSIX 2
#define I40E_DEFAULT_NUM_VMDQ_VSI 8 /* max 256 VSIs */
+#define I40E_MIN_VSI_ALLOC 51 /* LAN, ATR, FCOE, 32 VF, 16 VMDQ */
#define I40E_DEFAULT_QUEUES_PER_VMDQ 2 /* max 16 qps */
#define I40E_DEFAULT_QUEUES_PER_VF 4
#define I40E_DEFAULT_QUEUES_PER_TC 1 /* should be a power of 2 */
@@ -97,10 +98,6 @@
#define STRINGIFY(foo) #foo
#define XSTRINGIFY(bar) STRINGIFY(bar)
-#ifndef ARCH_HAS_PREFETCH
-#define prefetch(X)
-#endif
-
#define I40E_RX_DESC(R, i) \
((ring_is_16byte_desc_enabled(R)) \
? (union i40e_32byte_rx_desc *) \
@@ -157,11 +154,23 @@ struct i40e_lump_tracking {
#define I40E_FDIR_BUFFER_FULL_MARGIN 10
#define I40E_FDIR_BUFFER_HEAD_ROOM 200
+enum i40e_fd_stat_idx {
+ I40E_FD_STAT_ATR,
+ I40E_FD_STAT_SB,
+ I40E_FD_STAT_PF_COUNT
+};
+#define I40E_FD_STAT_PF_IDX(pf_id) ((pf_id) * I40E_FD_STAT_PF_COUNT)
+#define I40E_FD_ATR_STAT_IDX(pf_id) \
+ (I40E_FD_STAT_PF_IDX(pf_id) + I40E_FD_STAT_ATR)
+#define I40E_FD_SB_STAT_IDX(pf_id) \
+ (I40E_FD_STAT_PF_IDX(pf_id) + I40E_FD_STAT_SB)
+
struct i40e_fdir_filter {
struct hlist_node fdir_node;
/* filter ipnut set */
u8 flow_type;
u8 ip4_proto;
+ /* TX packet view of src and dst */
__be32 dst_ip[4];
__be32 src_ip[4];
__be16 src_port;
@@ -205,7 +214,6 @@ struct i40e_pf {
unsigned long state;
unsigned long link_check_timeout;
struct msix_entry *msix_entries;
- u16 num_msix_entries;
bool fc_autoneg_status;
u16 eeprom_version;
@@ -220,11 +228,14 @@ struct i40e_pf {
u16 rss_size; /* num queues in the RSS array */
u16 rss_size_max; /* HW defined max RSS queues */
u16 fdir_pf_filter_count; /* num of guaranteed filters for this PF */
+ u16 num_alloc_vsi; /* num VSIs this driver supports */
u8 atr_sample_rate;
bool wol_en;
struct hlist_head fdir_filter_list;
u16 fdir_pf_active_filters;
+ u16 fd_sb_cnt_idx;
+ u16 fd_atr_cnt_idx;
#ifdef CONFIG_I40E_VXLAN
__be16 vxlan_ports[I40E_MAX_PF_UDP_OFFLOAD_PORTS];
@@ -266,6 +277,7 @@ struct i40e_pf {
#ifdef CONFIG_I40E_VXLAN
#define I40E_FLAG_VXLAN_FILTER_SYNC (u64)(1 << 27)
#endif
+#define I40E_FLAG_DCB_CAPABLE (u64)(1 << 29)
/* tracks features that get auto disabled by errors */
u64 auto_disable_flags;
@@ -300,7 +312,6 @@ struct i40e_pf {
u16 pf_seid;
u16 main_vsi_seid;
u16 mac_seid;
- struct i40e_aqc_get_switch_config_data *sw_config;
struct kobject *switch_kobj;
#ifdef CONFIG_DEBUG_FS
struct dentry *i40e_dbg_pf;
@@ -329,9 +340,7 @@ struct i40e_pf {
struct ptp_clock *ptp_clock;
struct ptp_clock_info ptp_caps;
struct sk_buff *ptp_tx_skb;
- struct work_struct ptp_tx_work;
struct hwtstamp_config tstamp_config;
- unsigned long ptp_tx_start;
unsigned long last_rx_ptp_check;
spinlock_t tmreg_lock; /* Used to protect the device time registers. */
u64 ptp_base_adj;
@@ -420,6 +429,7 @@ struct i40e_vsi {
struct i40e_q_vector **q_vectors;
int num_q_vectors;
int base_vector;
+ bool irqs_ready;
u16 seid; /* HW index of this VSI (absolute index) */
u16 id; /* VSI number */
@@ -540,6 +550,15 @@ static inline bool i40e_rx_is_programming_status(u64 qw)
(qw >> I40E_RX_PROG_STATUS_DESC_LENGTH_SHIFT);
}
+/**
+ * i40e_get_fd_cnt_all - get the total FD filter space available
+ * @pf: pointer to the pf struct
+ **/
+static inline int i40e_get_fd_cnt_all(struct i40e_pf *pf)
+{
+ return pf->hw.fdir_shared_filter_count + pf->fdir_pf_filter_count;
+}
+
/* needed by i40e_ethtool.c */
int i40e_up(struct i40e_vsi *vsi);
void i40e_down(struct i40e_vsi *vsi);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq.c b/drivers/net/ethernet/intel/i40e/i40e_adminq.c
index ed3902bf249b..7a027499fc57 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_adminq.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_adminq.c
@@ -33,6 +33,16 @@
static void i40e_resume_aq(struct i40e_hw *hw);
/**
+ * i40e_is_nvm_update_op - return true if this is an NVM update operation
+ * @desc: API request descriptor
+ **/
+static inline bool i40e_is_nvm_update_op(struct i40e_aq_desc *desc)
+{
+ return (desc->opcode == i40e_aqc_opc_nvm_erase) ||
+ (desc->opcode == i40e_aqc_opc_nvm_update);
+}
+
+/**
* i40e_adminq_init_regs - Initialize AdminQ registers
* @hw: pointer to the hardware structure
*
@@ -281,8 +291,11 @@ static void i40e_free_asq_bufs(struct i40e_hw *hw)
*
* Configure base address and length registers for the transmit queue
**/
-static void i40e_config_asq_regs(struct i40e_hw *hw)
+static i40e_status i40e_config_asq_regs(struct i40e_hw *hw)
{
+ i40e_status ret_code = 0;
+ u32 reg = 0;
+
if (hw->mac.type == I40E_MAC_VF) {
/* configure the transmit queue */
wr32(hw, I40E_VF_ATQBAH1,
@@ -291,6 +304,7 @@ static void i40e_config_asq_regs(struct i40e_hw *hw)
lower_32_bits(hw->aq.asq.desc_buf.pa));
wr32(hw, I40E_VF_ATQLEN1, (hw->aq.num_asq_entries |
I40E_VF_ATQLEN1_ATQENABLE_MASK));
+ reg = rd32(hw, I40E_VF_ATQBAL1);
} else {
/* configure the transmit queue */
wr32(hw, I40E_PF_ATQBAH,
@@ -299,7 +313,14 @@ static void i40e_config_asq_regs(struct i40e_hw *hw)
lower_32_bits(hw->aq.asq.desc_buf.pa));
wr32(hw, I40E_PF_ATQLEN, (hw->aq.num_asq_entries |
I40E_PF_ATQLEN_ATQENABLE_MASK));
+ reg = rd32(hw, I40E_PF_ATQBAL);
}
+
+ /* Check one register to verify that config was applied */
+ if (reg != lower_32_bits(hw->aq.asq.desc_buf.pa))
+ ret_code = I40E_ERR_ADMIN_QUEUE_ERROR;
+
+ return ret_code;
}
/**
@@ -308,8 +329,11 @@ static void i40e_config_asq_regs(struct i40e_hw *hw)
*
* Configure base address and length registers for the receive (event queue)
**/
-static void i40e_config_arq_regs(struct i40e_hw *hw)
+static i40e_status i40e_config_arq_regs(struct i40e_hw *hw)
{
+ i40e_status ret_code = 0;
+ u32 reg = 0;
+
if (hw->mac.type == I40E_MAC_VF) {
/* configure the receive queue */
wr32(hw, I40E_VF_ARQBAH1,
@@ -318,6 +342,7 @@ static void i40e_config_arq_regs(struct i40e_hw *hw)
lower_32_bits(hw->aq.arq.desc_buf.pa));
wr32(hw, I40E_VF_ARQLEN1, (hw->aq.num_arq_entries |
I40E_VF_ARQLEN1_ARQENABLE_MASK));
+ reg = rd32(hw, I40E_VF_ARQBAL1);
} else {
/* configure the receive queue */
wr32(hw, I40E_PF_ARQBAH,
@@ -326,10 +351,17 @@ static void i40e_config_arq_regs(struct i40e_hw *hw)
lower_32_bits(hw->aq.arq.desc_buf.pa));
wr32(hw, I40E_PF_ARQLEN, (hw->aq.num_arq_entries |
I40E_PF_ARQLEN_ARQENABLE_MASK));
+ reg = rd32(hw, I40E_PF_ARQBAL);
}
/* Update tail in the HW to post pre-allocated buffers */
wr32(hw, hw->aq.arq.tail, hw->aq.num_arq_entries - 1);
+
+ /* Check one register to verify that config was applied */
+ if (reg != lower_32_bits(hw->aq.arq.desc_buf.pa))
+ ret_code = I40E_ERR_ADMIN_QUEUE_ERROR;
+
+ return ret_code;
}
/**
@@ -377,7 +409,9 @@ static i40e_status i40e_init_asq(struct i40e_hw *hw)
goto init_adminq_free_rings;
/* initialize base registers */
- i40e_config_asq_regs(hw);
+ ret_code = i40e_config_asq_regs(hw);
+ if (ret_code)
+ goto init_adminq_free_rings;
/* success! */
goto init_adminq_exit;
@@ -434,7 +468,9 @@ static i40e_status i40e_init_arq(struct i40e_hw *hw)
goto init_adminq_free_rings;
/* initialize base registers */
- i40e_config_arq_regs(hw);
+ ret_code = i40e_config_arq_regs(hw);
+ if (ret_code)
+ goto init_adminq_free_rings;
/* success! */
goto init_adminq_exit;
@@ -577,14 +613,14 @@ i40e_status i40e_init_adminq(struct i40e_hw *hw)
i40e_read_nvm_word(hw, I40E_SR_NVM_EETRACK_HI, &eetrack_hi);
hw->nvm.eetrack = (eetrack_hi << 16) | eetrack_lo;
- if (hw->aq.api_maj_ver != I40E_FW_API_VERSION_MAJOR ||
- hw->aq.api_min_ver > I40E_FW_API_VERSION_MINOR) {
+ if (hw->aq.api_maj_ver > I40E_FW_API_VERSION_MAJOR) {
ret_code = I40E_ERR_FIRMWARE_API_VERSION;
goto init_adminq_free_arq;
}
/* pre-emptive resource lock release */
i40e_aq_release_resource(hw, I40E_NVM_RESOURCE_ID, 0, NULL);
+ hw->aq.nvm_busy = false;
ret_code = i40e_aq_set_hmc_resource_profile(hw,
I40E_HMC_PROFILE_DEFAULT,
@@ -708,6 +744,12 @@ i40e_status i40e_asq_send_command(struct i40e_hw *hw,
goto asq_send_command_exit;
}
+ if (i40e_is_nvm_update_op(desc) && hw->aq.nvm_busy) {
+ i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, "AQTX: NVM busy.\n");
+ status = I40E_ERR_NVM;
+ goto asq_send_command_exit;
+ }
+
details = I40E_ADMINQ_DETAILS(hw->aq.asq, hw->aq.asq.next_to_use);
if (cmd_details) {
*details = *cmd_details;
@@ -835,6 +877,9 @@ i40e_status i40e_asq_send_command(struct i40e_hw *hw,
hw->aq.asq_last_status = (enum i40e_admin_queue_err)retval;
}
+ if (i40e_is_nvm_update_op(desc))
+ hw->aq.nvm_busy = true;
+
/* update the error if time out occurred */
if ((!cmd_completed) &&
(!details->async && !details->postpone)) {
@@ -929,6 +974,9 @@ i40e_status i40e_clean_arq_element(struct i40e_hw *hw,
e->msg_size);
}
+ if (i40e_is_nvm_update_op(&e->desc))
+ hw->aq.nvm_busy = false;
+
/* Restore the original datalen and buffer address in the desc,
* FW updates datalen to indicate the event message
* size
diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq.h b/drivers/net/ethernet/intel/i40e/i40e_adminq.h
index 993f7685a911..b1552fbc48a0 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_adminq.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_adminq.h
@@ -90,6 +90,7 @@ struct i40e_adminq_info {
u16 fw_min_ver; /* firmware minor version */
u16 api_maj_ver; /* api major version */
u16 api_min_ver; /* api minor version */
+ bool nvm_busy;
struct mutex asq_mutex; /* Send queue lock */
struct mutex arq_mutex; /* Receive queue lock */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
index 7b6374a8f8da..15f289f2917f 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
@@ -34,7 +34,7 @@
*/
#define I40E_FW_API_VERSION_MAJOR 0x0001
-#define I40E_FW_API_VERSION_MINOR 0x0001
+#define I40E_FW_API_VERSION_MINOR 0x0002
struct i40e_aq_desc {
__le16 flags;
@@ -123,6 +123,7 @@ enum i40e_admin_queue_opc {
i40e_aqc_opc_get_version = 0x0001,
i40e_aqc_opc_driver_version = 0x0002,
i40e_aqc_opc_queue_shutdown = 0x0003,
+ i40e_aqc_opc_set_pf_context = 0x0004,
/* resource ownership */
i40e_aqc_opc_request_resource = 0x0008,
@@ -182,9 +183,6 @@ enum i40e_admin_queue_opc {
i40e_aqc_opc_add_mirror_rule = 0x0260,
i40e_aqc_opc_delete_mirror_rule = 0x0261,
- i40e_aqc_opc_set_storm_control_config = 0x0280,
- i40e_aqc_opc_get_storm_control_config = 0x0281,
-
/* DCB commands */
i40e_aqc_opc_dcb_ignore_pfc = 0x0301,
i40e_aqc_opc_dcb_updated = 0x0302,
@@ -207,6 +205,7 @@ enum i40e_admin_queue_opc {
i40e_aqc_opc_query_switching_comp_bw_config = 0x041A,
i40e_aqc_opc_suspend_port_tx = 0x041B,
i40e_aqc_opc_resume_port_tx = 0x041C,
+ i40e_aqc_opc_configure_partition_bw = 0x041D,
/* hmc */
i40e_aqc_opc_query_hmc_resource_profile = 0x0500,
@@ -224,13 +223,15 @@ enum i40e_admin_queue_opc {
i40e_aqc_opc_get_partner_advt = 0x0616,
i40e_aqc_opc_set_lb_modes = 0x0618,
i40e_aqc_opc_get_phy_wol_caps = 0x0621,
- i40e_aqc_opc_set_phy_reset = 0x0622,
+ i40e_aqc_opc_set_phy_debug = 0x0622,
i40e_aqc_opc_upload_ext_phy_fm = 0x0625,
/* NVM commands */
- i40e_aqc_opc_nvm_read = 0x0701,
- i40e_aqc_opc_nvm_erase = 0x0702,
- i40e_aqc_opc_nvm_update = 0x0703,
+ i40e_aqc_opc_nvm_read = 0x0701,
+ i40e_aqc_opc_nvm_erase = 0x0702,
+ i40e_aqc_opc_nvm_update = 0x0703,
+ i40e_aqc_opc_nvm_config_read = 0x0704,
+ i40e_aqc_opc_nvm_config_write = 0x0705,
/* virtualization commands */
i40e_aqc_opc_send_msg_to_pf = 0x0801,
@@ -272,8 +273,6 @@ enum i40e_admin_queue_opc {
i40e_aqc_opc_debug_set_mode = 0xFF01,
i40e_aqc_opc_debug_read_reg = 0xFF03,
i40e_aqc_opc_debug_write_reg = 0xFF04,
- i40e_aqc_opc_debug_read_reg_sg = 0xFF05,
- i40e_aqc_opc_debug_write_reg_sg = 0xFF06,
i40e_aqc_opc_debug_modify_reg = 0xFF07,
i40e_aqc_opc_debug_dump_internals = 0xFF08,
i40e_aqc_opc_debug_modify_internals = 0xFF09,
@@ -341,6 +340,14 @@ struct i40e_aqc_queue_shutdown {
I40E_CHECK_CMD_LENGTH(i40e_aqc_queue_shutdown);
+/* Set PF context (0x0004, direct) */
+struct i40e_aqc_set_pf_context {
+ u8 pf_id;
+ u8 reserved[15];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_set_pf_context);
+
/* Request resource ownership (direct 0x0008)
* Release resource ownership (direct 0x0009)
*/
@@ -1289,27 +1296,6 @@ struct i40e_aqc_add_delete_mirror_rule_completion {
I40E_CHECK_CMD_LENGTH(i40e_aqc_add_delete_mirror_rule_completion);
-/* Set Storm Control Configuration (direct 0x0280)
- * Get Storm Control Configuration (direct 0x0281)
- * the command and response use the same descriptor structure
- */
-struct i40e_aqc_set_get_storm_control_config {
- __le32 broadcast_threshold;
- __le32 multicast_threshold;
- __le32 control_flags;
-#define I40E_AQC_STORM_CONTROL_MDIPW 0x01
-#define I40E_AQC_STORM_CONTROL_MDICW 0x02
-#define I40E_AQC_STORM_CONTROL_BDIPW 0x04
-#define I40E_AQC_STORM_CONTROL_BDICW 0x08
-#define I40E_AQC_STORM_CONTROL_BIDU 0x10
-#define I40E_AQC_STORM_CONTROL_INTERVAL_SHIFT 8
-#define I40E_AQC_STORM_CONTROL_INTERVAL_MASK (0x3FF << \
- I40E_AQC_STORM_CONTROL_INTERVAL_SHIFT)
- u8 reserved[4];
-};
-
-I40E_CHECK_CMD_LENGTH(i40e_aqc_set_get_storm_control_config);
-
/* DCB 0x03xx*/
/* PFC Ignore (direct 0x0301)
@@ -1427,11 +1413,12 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_configure_switching_comp_bw_limit);
struct i40e_aqc_configure_switching_comp_ets_data {
u8 reserved[4];
u8 tc_valid_bits;
- u8 reserved1;
+ u8 seepage;
+#define I40E_AQ_ETS_SEEPAGE_EN_MASK 0x1
u8 tc_strict_priority_flags;
- u8 reserved2[17];
+ u8 reserved1[17];
u8 tc_bw_share_credits[8];
- u8 reserved3[96];
+ u8 reserved2[96];
};
/* Configure Switching Component Bandwidth Limits per Tc (indirect 0x0416) */
@@ -1499,6 +1486,15 @@ struct i40e_aqc_query_switching_comp_bw_config_resp {
* (direct 0x041B and 0x041C) uses the generic SEID struct
*/
+/* Configure partition BW
+ * (indirect 0x041D)
+ */
+struct i40e_aqc_configure_partition_bw_data {
+ __le16 pf_valid_bits;
+ u8 min_bw[16]; /* guaranteed bandwidth */
+ u8 max_bw[16]; /* bandwidth limit */
+};
+
/* Get and set the active HMC resource profile and status.
* (direct 0x0500) and (direct 0x0501)
*/
@@ -1539,6 +1535,8 @@ enum i40e_aq_phy_type {
I40E_PHY_TYPE_XLPPI = 0x9,
I40E_PHY_TYPE_40GBASE_CR4_CU = 0xA,
I40E_PHY_TYPE_10GBASE_CR1_CU = 0xB,
+ I40E_PHY_TYPE_10GBASE_AOC = 0xC,
+ I40E_PHY_TYPE_40GBASE_AOC = 0xD,
I40E_PHY_TYPE_100BASE_TX = 0x11,
I40E_PHY_TYPE_1000BASE_T = 0x12,
I40E_PHY_TYPE_10GBASE_T = 0x13,
@@ -1549,7 +1547,10 @@ enum i40e_aq_phy_type {
I40E_PHY_TYPE_40GBASE_CR4 = 0x18,
I40E_PHY_TYPE_40GBASE_SR4 = 0x19,
I40E_PHY_TYPE_40GBASE_LR4 = 0x1A,
- I40E_PHY_TYPE_20GBASE_KR2 = 0x1B,
+ I40E_PHY_TYPE_1000BASE_SX = 0x1B,
+ I40E_PHY_TYPE_1000BASE_LX = 0x1C,
+ I40E_PHY_TYPE_1000BASE_T_OPTICAL = 0x1D,
+ I40E_PHY_TYPE_20GBASE_KR2 = 0x1E,
I40E_PHY_TYPE_MAX
};
@@ -1583,11 +1584,8 @@ struct i40e_aq_get_phy_abilities_resp {
#define I40E_AQ_PHY_FLAG_PAUSE_TX 0x01
#define I40E_AQ_PHY_FLAG_PAUSE_RX 0x02
#define I40E_AQ_PHY_FLAG_LOW_POWER 0x04
-#define I40E_AQ_PHY_FLAG_AN_SHIFT 3
-#define I40E_AQ_PHY_FLAG_AN_MASK (0x3 << I40E_AQ_PHY_FLAG_AN_SHIFT)
-#define I40E_AQ_PHY_FLAG_AN_OFF 0x00 /* link forced on */
-#define I40E_AQ_PHY_FLAG_AN_OFF_LINK_DOWN 0x01
-#define I40E_AQ_PHY_FLAG_AN_ON 0x02
+#define I40E_AQ_PHY_LINK_ENABLED 0x08
+#define I40E_AQ_PHY_AN_ENABLED 0x10
#define I40E_AQ_PHY_FLAG_MODULE_QUAL 0x20
__le16 eee_capability;
#define I40E_AQ_EEE_100BASE_TX 0x0002
@@ -1696,6 +1694,7 @@ struct i40e_aqc_get_link_status {
#define I40E_AQ_LINK_TX_ACTIVE 0x00
#define I40E_AQ_LINK_TX_DRAINED 0x01
#define I40E_AQ_LINK_TX_FLUSHED 0x03
+#define I40E_AQ_LINK_FORCED_40G 0x10
u8 loopback; /* use defines from i40e_aqc_set_lb_mode */
__le16 max_frame_size;
u8 config;
@@ -1747,14 +1746,21 @@ struct i40e_aqc_set_lb_mode {
I40E_CHECK_CMD_LENGTH(i40e_aqc_set_lb_mode);
-/* Set PHY Reset command (0x0622) */
-struct i40e_aqc_set_phy_reset {
- u8 reset_flags;
-#define I40E_AQ_PHY_RESET_REQUEST 0x02
+/* Set PHY Debug command (0x0622) */
+struct i40e_aqc_set_phy_debug {
+ u8 command_flags;
+#define I40E_AQ_PHY_DEBUG_RESET_INTERNAL 0x02
+#define I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_SHIFT 2
+#define I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_MASK (0x03 << \
+ I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_SHIFT)
+#define I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_NONE 0x00
+#define I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_HARD 0x01
+#define I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_SOFT 0x02
+#define I40E_AQ_PHY_DEBUG_DISABLE_LINK_FW 0x10
u8 reserved[15];
};
-I40E_CHECK_CMD_LENGTH(i40e_aqc_set_phy_reset);
+I40E_CHECK_CMD_LENGTH(i40e_aqc_set_phy_debug);
enum i40e_aq_phy_reg_type {
I40E_AQC_PHY_REG_INTERNAL = 0x1,
@@ -1779,6 +1785,47 @@ struct i40e_aqc_nvm_update {
I40E_CHECK_CMD_LENGTH(i40e_aqc_nvm_update);
+/* NVM Config Read (indirect 0x0704) */
+struct i40e_aqc_nvm_config_read {
+ __le16 cmd_flags;
+#define ANVM_SINGLE_OR_MULTIPLE_FEATURES_MASK 1
+#define ANVM_READ_SINGLE_FEATURE 0
+#define ANVM_READ_MULTIPLE_FEATURES 1
+ __le16 element_count;
+ __le16 element_id; /* Feature/field ID */
+ u8 reserved[2];
+ __le32 address_high;
+ __le32 address_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_nvm_config_read);
+
+/* NVM Config Write (indirect 0x0705) */
+struct i40e_aqc_nvm_config_write {
+ __le16 cmd_flags;
+ __le16 element_count;
+ u8 reserved[4];
+ __le32 address_high;
+ __le32 address_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_nvm_config_write);
+
+struct i40e_aqc_nvm_config_data_feature {
+ __le16 feature_id;
+ __le16 instance_id;
+ __le16 feature_options;
+ __le16 feature_selection;
+};
+
+struct i40e_aqc_nvm_config_data_immediate_field {
+#define ANVM_FEATURE_OR_IMMEDIATE_MASK 0x2
+ __le16 field_id;
+ __le16 instance_id;
+ __le16 field_options;
+ __le16 field_value;
+};
+
/* Send to PF command (indirect 0x0801) id is only used by PF
* Send to VF command (indirect 0x0802) id is only used by PF
* Send to Peer PF command (indirect 0x0803)
diff --git a/drivers/net/ethernet/intel/i40e/i40e_common.c b/drivers/net/ethernet/intel/i40e/i40e_common.c
index 922cdcc45c54..6e65f19dd6e5 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_common.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_common.c
@@ -43,12 +43,10 @@ static i40e_status i40e_set_mac_type(struct i40e_hw *hw)
if (hw->vendor_id == PCI_VENDOR_ID_INTEL) {
switch (hw->device_id) {
case I40E_DEV_ID_SFP_XL710:
- case I40E_DEV_ID_SFP_X710:
case I40E_DEV_ID_QEMU:
case I40E_DEV_ID_KX_A:
case I40E_DEV_ID_KX_B:
case I40E_DEV_ID_KX_C:
- case I40E_DEV_ID_KX_D:
case I40E_DEV_ID_QSFP_A:
case I40E_DEV_ID_QSFP_B:
case I40E_DEV_ID_QSFP_C:
@@ -133,7 +131,11 @@ void i40e_debug_aq(struct i40e_hw *hw, enum i40e_debug_mask mask, void *desc,
**/
bool i40e_check_asq_alive(struct i40e_hw *hw)
{
- return !!(rd32(hw, hw->aq.asq.len) & I40E_PF_ATQLEN_ATQENABLE_MASK);
+ if (hw->aq.asq.len)
+ return !!(rd32(hw, hw->aq.asq.len) &
+ I40E_PF_ATQLEN_ATQENABLE_MASK);
+ else
+ return false;
}
/**
@@ -653,6 +655,36 @@ i40e_status i40e_get_mac_addr(struct i40e_hw *hw, u8 *mac_addr)
}
/**
+ * i40e_pre_tx_queue_cfg - pre tx queue configure
+ * @hw: pointer to the HW structure
+ * @queue: target pf queue index
+ * @enable: state change request
+ *
+ * Handles hw requirement to indicate intention to enable
+ * or disable target queue.
+ **/
+void i40e_pre_tx_queue_cfg(struct i40e_hw *hw, u32 queue, bool enable)
+{
+ u32 abs_queue_idx = hw->func_caps.base_queue + queue;
+ u32 reg_block = 0;
+ u32 reg_val;
+
+ if (abs_queue_idx >= 128)
+ reg_block = abs_queue_idx / 128;
+
+ reg_val = rd32(hw, I40E_GLLAN_TXPRE_QDIS(reg_block));
+ reg_val &= ~I40E_GLLAN_TXPRE_QDIS_QINDX_MASK;
+ reg_val |= (abs_queue_idx << I40E_GLLAN_TXPRE_QDIS_QINDX_SHIFT);
+
+ if (enable)
+ reg_val |= I40E_GLLAN_TXPRE_QDIS_CLEAR_QDIS_MASK;
+ else
+ reg_val |= I40E_GLLAN_TXPRE_QDIS_SET_QDIS_MASK;
+
+ wr32(hw, I40E_GLLAN_TXPRE_QDIS(reg_block), reg_val);
+}
+
+/**
* i40e_get_media_type - Gets media type
* @hw: pointer to the hardware structure
**/
@@ -699,7 +731,7 @@ static enum i40e_media_type i40e_get_media_type(struct i40e_hw *hw)
}
#define I40E_PF_RESET_WAIT_COUNT_A0 200
-#define I40E_PF_RESET_WAIT_COUNT 10
+#define I40E_PF_RESET_WAIT_COUNT 100
/**
* i40e_pf_reset - Reset the PF
* @hw: pointer to the hardware structure
@@ -789,6 +821,9 @@ void i40e_clear_pxe_mode(struct i40e_hw *hw)
{
u32 reg;
+ if (i40e_check_asq_alive(hw))
+ i40e_aq_clear_pxe_mode(hw, NULL);
+
/* Clear single descriptor fetch/write-back mode */
reg = rd32(hw, I40E_GLLAN_RCTL_0);
@@ -907,6 +942,33 @@ void i40e_led_set(struct i40e_hw *hw, u32 mode, bool blink)
/* Admin command wrappers */
/**
+ * i40e_aq_clear_pxe_mode
+ * @hw: pointer to the hw struct
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Tell the firmware that the driver is taking over from PXE
+ **/
+i40e_status i40e_aq_clear_pxe_mode(struct i40e_hw *hw,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ i40e_status status;
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_clear_pxe *cmd =
+ (struct i40e_aqc_clear_pxe *)&desc.params.raw;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_clear_pxe_mode);
+
+ cmd->rx_cnt = 0x2;
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ wr32(hw, I40E_GLLAN_RCTL_0, 0x1);
+
+ return status;
+}
+
+/**
* i40e_aq_set_link_restart_an
* @hw: pointer to the hw struct
* @cmd_details: pointer to command details structure or NULL
@@ -975,6 +1037,13 @@ i40e_status i40e_aq_get_link_info(struct i40e_hw *hw,
hw_link_info->an_info = resp->an_info;
hw_link_info->ext_info = resp->ext_info;
hw_link_info->loopback = resp->loopback;
+ hw_link_info->max_frame_size = le16_to_cpu(resp->max_frame_size);
+ hw_link_info->pacing = resp->config & I40E_AQ_CONFIG_PACING_MASK;
+
+ if (resp->config & I40E_AQ_CONFIG_CRC_ENA)
+ hw_link_info->crc_enable = true;
+ else
+ hw_link_info->crc_enable = false;
if (resp->command_flags & cpu_to_le16(I40E_AQ_LSE_ENABLE))
hw_link_info->lse_enable = true;
@@ -1021,8 +1090,6 @@ i40e_status i40e_aq_add_vsi(struct i40e_hw *hw,
cmd->vsi_flags = cpu_to_le16(vsi_ctx->flags);
desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
- if (sizeof(vsi_ctx->info) > I40E_AQ_LARGE_BUF)
- desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
status = i40e_asq_send_command(hw, &desc, &vsi_ctx->info,
sizeof(vsi_ctx->info), cmd_details);
@@ -1163,8 +1230,6 @@ i40e_status i40e_aq_get_vsi_params(struct i40e_hw *hw,
cmd->uplink_seid = cpu_to_le16(vsi_ctx->seid);
desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
- if (sizeof(vsi_ctx->info) > I40E_AQ_LARGE_BUF)
- desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
status = i40e_asq_send_command(hw, &desc, &vsi_ctx->info,
sizeof(vsi_ctx->info), NULL);
@@ -1203,8 +1268,6 @@ i40e_status i40e_aq_update_vsi_params(struct i40e_hw *hw,
cmd->uplink_seid = cpu_to_le16(vsi_ctx->seid);
desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
- if (sizeof(vsi_ctx->info) > I40E_AQ_LARGE_BUF)
- desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
status = i40e_asq_send_command(hw, &desc, &vsi_ctx->info,
sizeof(vsi_ctx->info), cmd_details);
@@ -1300,6 +1363,7 @@ i40e_status i40e_aq_send_driver_version(struct i40e_hw *hw,
struct i40e_aqc_driver_version *cmd =
(struct i40e_aqc_driver_version *)&desc.params.raw;
i40e_status status;
+ u16 len;
if (dv == NULL)
return I40E_ERR_PARAM;
@@ -1311,7 +1375,14 @@ i40e_status i40e_aq_send_driver_version(struct i40e_hw *hw,
cmd->driver_minor_ver = dv->minor_version;
cmd->driver_build_ver = dv->build_version;
cmd->driver_subbuild_ver = dv->subbuild_version;
- status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ len = 0;
+ while (len < sizeof(dv->driver_string) &&
+ (dv->driver_string[len] < 0x80) &&
+ dv->driver_string[len])
+ len++;
+ status = i40e_asq_send_command(hw, &desc, dv->driver_string,
+ len, cmd_details);
return status;
}
@@ -1900,6 +1971,12 @@ static void i40e_parse_discover_capabilities(struct i40e_hw *hw, void *buff,
}
}
+ /* Software override ensuring FCoE is disabled if npar or mfp
+ * mode because it is not supported in these modes.
+ */
+ if (p->npar_enable || p->mfp_mode_1)
+ p->fcoe = false;
+
/* additional HW specific goodies that might
* someday be HW version specific
*/
@@ -2094,8 +2171,8 @@ i40e_status i40e_aq_start_lldp(struct i40e_hw *hw,
* @cmd_details: pointer to command details structure or NULL
**/
i40e_status i40e_aq_add_udp_tunnel(struct i40e_hw *hw,
- u16 udp_port, u8 header_len,
- u8 protocol_index, u8 *filter_index,
+ u16 udp_port, u8 protocol_index,
+ u8 *filter_index,
struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aq_desc desc;
@@ -2253,6 +2330,35 @@ static i40e_status i40e_aq_tx_sched_cmd(struct i40e_hw *hw, u16 seid,
}
/**
+ * i40e_aq_config_vsi_bw_limit - Configure VSI BW Limit
+ * @hw: pointer to the hw struct
+ * @seid: VSI seid
+ * @credit: BW limit credits (0 = disabled)
+ * @max_credit: Max BW limit credits
+ * @cmd_details: pointer to command details structure or NULL
+ **/
+i40e_status i40e_aq_config_vsi_bw_limit(struct i40e_hw *hw,
+ u16 seid, u16 credit, u8 max_credit,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_configure_vsi_bw_limit *cmd =
+ (struct i40e_aqc_configure_vsi_bw_limit *)&desc.params.raw;
+ i40e_status status;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_configure_vsi_bw_limit);
+
+ cmd->vsi_seid = cpu_to_le16(seid);
+ cmd->credit = cpu_to_le16(credit);
+ cmd->max_credit = max_credit;
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ return status;
+}
+
+/**
* i40e_aq_config_vsi_tc_bw - Config VSI BW Allocation per TC
* @hw: pointer to the hw struct
* @seid: VSI seid
@@ -2405,7 +2511,7 @@ static i40e_status i40e_validate_filter_settings(struct i40e_hw *hw,
{
u32 fcoe_cntx_size, fcoe_filt_size;
u32 pe_cntx_size, pe_filt_size;
- u32 fcoe_fmax, pe_fmax;
+ u32 fcoe_fmax;
u32 val;
/* Validate FCoE settings passed */
@@ -2480,13 +2586,6 @@ static i40e_status i40e_validate_filter_settings(struct i40e_hw *hw,
if (fcoe_filt_size + fcoe_cntx_size > fcoe_fmax)
return I40E_ERR_INVALID_SIZE;
- /* PEHSIZE + PEDSIZE should not be greater than PMPEXFMAX */
- val = rd32(hw, I40E_GLHMC_PEXFMAX);
- pe_fmax = (val & I40E_GLHMC_PEXFMAX_PMPEXFMAX_MASK)
- >> I40E_GLHMC_PEXFMAX_PMPEXFMAX_SHIFT;
- if (pe_filt_size + pe_cntx_size > pe_fmax)
- return I40E_ERR_INVALID_SIZE;
-
return 0;
}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c b/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c
index 6e8103abfd0d..00bc0cdb3a03 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c
@@ -232,7 +232,7 @@ static void i40e_dcbnl_del_app(struct i40e_pf *pf,
struct i40e_ieee_app_priority_table *app)
{
int v, err;
- for (v = 0; v < pf->hw.func_caps.num_vsis; v++) {
+ for (v = 0; v < pf->num_alloc_vsi; v++) {
if (pf->vsi[v] && pf->vsi[v]->netdev) {
err = i40e_dcbnl_vsi_del_app(pf->vsi[v], app);
if (err)
@@ -302,8 +302,8 @@ void i40e_dcbnl_setup(struct i40e_vsi *vsi)
struct net_device *dev = vsi->netdev;
struct i40e_pf *pf = i40e_netdev_to_pf(dev);
- /* DCB not enabled */
- if (!(pf->flags & I40E_FLAG_DCB_ENABLED))
+ /* Not DCB capable */
+ if (!(pf->flags & I40E_FLAG_DCB_CAPABLE))
return;
/* Do not setup DCB NL ops for MFP mode */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
index 3c37386fd138..cffdfc21290f 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
@@ -45,7 +45,7 @@ static struct i40e_vsi *i40e_dbg_find_vsi(struct i40e_pf *pf, int seid)
if (seid < 0)
dev_info(&pf->pdev->dev, "%d: bad seid\n", seid);
else
- for (i = 0; i < pf->hw.func_caps.num_vsis; i++)
+ for (i = 0; i < pf->num_alloc_vsi; i++)
if (pf->vsi[i] && (pf->vsi[i]->seid == seid))
return pf->vsi[i];
@@ -843,7 +843,7 @@ static void i40e_dbg_dump_vsi_no_seid(struct i40e_pf *pf)
{
int i;
- for (i = 0; i < pf->hw.func_caps.num_vsis; i++)
+ for (i = 0; i < pf->num_alloc_vsi; i++)
if (pf->vsi[i])
dev_info(&pf->pdev->dev, "dump vsi[%d]: %d\n",
i, pf->vsi[i]->seid);
@@ -862,12 +862,11 @@ static void i40e_dbg_dump_eth_stats(struct i40e_pf *pf,
" rx_bytes = \t%lld \trx_unicast = \t\t%lld \trx_multicast = \t%lld\n",
estats->rx_bytes, estats->rx_unicast, estats->rx_multicast);
dev_info(&pf->pdev->dev,
- " rx_broadcast = \t%lld \trx_discards = \t\t%lld \trx_errors = \t%lld\n",
- estats->rx_broadcast, estats->rx_discards, estats->rx_errors);
+ " rx_broadcast = \t%lld \trx_discards = \t\t%lld\n",
+ estats->rx_broadcast, estats->rx_discards);
dev_info(&pf->pdev->dev,
- " rx_missed = \t%lld \trx_unknown_protocol = \t%lld \ttx_bytes = \t%lld\n",
- estats->rx_missed, estats->rx_unknown_protocol,
- estats->tx_bytes);
+ " rx_unknown_protocol = \t%lld \ttx_bytes = \t%lld\n",
+ estats->rx_unknown_protocol, estats->tx_bytes);
dev_info(&pf->pdev->dev,
" tx_unicast = \t%lld \ttx_multicast = \t\t%lld \ttx_broadcast = \t%lld\n",
estats->tx_unicast, estats->tx_multicast, estats->tx_broadcast);
@@ -1527,7 +1526,7 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
cnt = sscanf(&cmd_buf[15], "%i", &vsi_seid);
if (cnt == 0) {
int i;
- for (i = 0; i < pf->hw.func_caps.num_vsis; i++)
+ for (i = 0; i < pf->num_alloc_vsi; i++)
i40e_vsi_reset_stats(pf->vsi[i]);
dev_info(&pf->pdev->dev, "vsi clear stats called for all vsi's\n");
} else if (cnt == 1) {
@@ -1744,10 +1743,6 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
i40e_dbg_cmd_fd_ctrl(pf, I40E_FLAG_FD_ATR_ENABLED, false);
} else if (strncmp(cmd_buf, "fd-atr on", 9) == 0) {
i40e_dbg_cmd_fd_ctrl(pf, I40E_FLAG_FD_ATR_ENABLED, true);
- } else if (strncmp(cmd_buf, "fd-sb off", 9) == 0) {
- i40e_dbg_cmd_fd_ctrl(pf, I40E_FLAG_FD_SB_ENABLED, false);
- } else if (strncmp(cmd_buf, "fd-sb on", 8) == 0) {
- i40e_dbg_cmd_fd_ctrl(pf, I40E_FLAG_FD_SB_ENABLED, true);
} else if (strncmp(cmd_buf, "lldp", 4) == 0) {
if (strncmp(&cmd_buf[5], "stop", 4) == 0) {
int ret;
@@ -1967,8 +1962,6 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
dev_info(&pf->pdev->dev, " rem fd_filter <dest q_index> <flex_off> <pctype> <dest_vsi> <dest_ctl> <fd_status> <cnt_index> <fd_id> <packet_len> <packet>\n");
dev_info(&pf->pdev->dev, " fd-atr off\n");
dev_info(&pf->pdev->dev, " fd-atr on\n");
- dev_info(&pf->pdev->dev, " fd-sb off\n");
- dev_info(&pf->pdev->dev, " fd-sb on\n");
dev_info(&pf->pdev->dev, " lldp start\n");
dev_info(&pf->pdev->dev, " lldp stop\n");
dev_info(&pf->pdev->dev, " lldp get local\n");
diff --git a/drivers/net/ethernet/intel/i40e/i40e_diag.c b/drivers/net/ethernet/intel/i40e/i40e_diag.c
index b2380daef8c1..56438bd579e6 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_diag.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_diag.c
@@ -67,17 +67,25 @@ static i40e_status i40e_diag_reg_pattern_test(struct i40e_hw *hw,
struct i40e_diag_reg_test_info i40e_reg_list[] = {
/* offset mask elements stride */
- {I40E_QTX_CTL(0), 0x0000FFBF, 4, I40E_QTX_CTL(1) - I40E_QTX_CTL(0)},
- {I40E_PFINT_ITR0(0), 0x00000FFF, 3, I40E_PFINT_ITR0(1) - I40E_PFINT_ITR0(0)},
- {I40E_PFINT_ITRN(0, 0), 0x00000FFF, 8, I40E_PFINT_ITRN(0, 1) - I40E_PFINT_ITRN(0, 0)},
- {I40E_PFINT_ITRN(1, 0), 0x00000FFF, 8, I40E_PFINT_ITRN(1, 1) - I40E_PFINT_ITRN(1, 0)},
- {I40E_PFINT_ITRN(2, 0), 0x00000FFF, 8, I40E_PFINT_ITRN(2, 1) - I40E_PFINT_ITRN(2, 0)},
- {I40E_PFINT_STAT_CTL0, 0x0000000C, 1, 0},
- {I40E_PFINT_LNKLST0, 0x00001FFF, 1, 0},
- {I40E_PFINT_LNKLSTN(0), 0x000007FF, 64, I40E_PFINT_LNKLSTN(1) - I40E_PFINT_LNKLSTN(0)},
- {I40E_QINT_TQCTL(0), 0x000000FF, 64, I40E_QINT_TQCTL(1) - I40E_QINT_TQCTL(0)},
- {I40E_QINT_RQCTL(0), 0x000000FF, 64, I40E_QINT_RQCTL(1) - I40E_QINT_RQCTL(0)},
- {I40E_PFINT_ICR0_ENA, 0xF7F20000, 1, 0},
+ {I40E_QTX_CTL(0), 0x0000FFBF, 1,
+ I40E_QTX_CTL(1) - I40E_QTX_CTL(0)},
+ {I40E_PFINT_ITR0(0), 0x00000FFF, 3,
+ I40E_PFINT_ITR0(1) - I40E_PFINT_ITR0(0)},
+ {I40E_PFINT_ITRN(0, 0), 0x00000FFF, 1,
+ I40E_PFINT_ITRN(0, 1) - I40E_PFINT_ITRN(0, 0)},
+ {I40E_PFINT_ITRN(1, 0), 0x00000FFF, 1,
+ I40E_PFINT_ITRN(1, 1) - I40E_PFINT_ITRN(1, 0)},
+ {I40E_PFINT_ITRN(2, 0), 0x00000FFF, 1,
+ I40E_PFINT_ITRN(2, 1) - I40E_PFINT_ITRN(2, 0)},
+ {I40E_PFINT_STAT_CTL0, 0x0000000C, 1, 0},
+ {I40E_PFINT_LNKLST0, 0x00001FFF, 1, 0},
+ {I40E_PFINT_LNKLSTN(0), 0x000007FF, 1,
+ I40E_PFINT_LNKLSTN(1) - I40E_PFINT_LNKLSTN(0)},
+ {I40E_QINT_TQCTL(0), 0x000000FF, 1,
+ I40E_QINT_TQCTL(1) - I40E_QINT_TQCTL(0)},
+ {I40E_QINT_RQCTL(0), 0x000000FF, 1,
+ I40E_QINT_RQCTL(1) - I40E_QINT_RQCTL(0)},
+ {I40E_PFINT_ICR0_ENA, 0xF7F20000, 1, 0},
{ 0 }
};
@@ -93,9 +101,25 @@ i40e_status i40e_diag_reg_test(struct i40e_hw *hw)
u32 reg, mask;
u32 i, j;
- for (i = 0; (i40e_reg_list[i].offset != 0) && !ret_code; i++) {
+ for (i = 0; i40e_reg_list[i].offset != 0 &&
+ !ret_code; i++) {
+
+ /* set actual reg range for dynamically allocated resources */
+ if (i40e_reg_list[i].offset == I40E_QTX_CTL(0) &&
+ hw->func_caps.num_tx_qp != 0)
+ i40e_reg_list[i].elements = hw->func_caps.num_tx_qp;
+ if ((i40e_reg_list[i].offset == I40E_PFINT_ITRN(0, 0) ||
+ i40e_reg_list[i].offset == I40E_PFINT_ITRN(1, 0) ||
+ i40e_reg_list[i].offset == I40E_PFINT_ITRN(2, 0) ||
+ i40e_reg_list[i].offset == I40E_QINT_TQCTL(0) ||
+ i40e_reg_list[i].offset == I40E_QINT_RQCTL(0)) &&
+ hw->func_caps.num_msix_vectors != 0)
+ i40e_reg_list[i].elements =
+ hw->func_caps.num_msix_vectors - 1;
+
+ /* test register access */
mask = i40e_reg_list[i].mask;
- for (j = 0; (j < i40e_reg_list[i].elements) && !ret_code; j++) {
+ for (j = 0; j < i40e_reg_list[i].elements && !ret_code; j++) {
reg = i40e_reg_list[i].offset +
(j * i40e_reg_list[i].stride);
ret_code = i40e_diag_reg_pattern_test(hw, reg, mask);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
index 03d99cbc5c25..4a488ffcd6b0 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
@@ -46,6 +46,8 @@ struct i40e_stats {
I40E_STAT(struct i40e_pf, _name, _stat)
#define I40E_VSI_STAT(_name, _stat) \
I40E_STAT(struct i40e_vsi, _name, _stat)
+#define I40E_VEB_STAT(_name, _stat) \
+ I40E_STAT(struct i40e_veb, _name, _stat)
static const struct i40e_stats i40e_gstrings_net_stats[] = {
I40E_NETDEV_STAT(rx_packets),
@@ -56,12 +58,36 @@ static const struct i40e_stats i40e_gstrings_net_stats[] = {
I40E_NETDEV_STAT(tx_errors),
I40E_NETDEV_STAT(rx_dropped),
I40E_NETDEV_STAT(tx_dropped),
- I40E_NETDEV_STAT(multicast),
I40E_NETDEV_STAT(collisions),
I40E_NETDEV_STAT(rx_length_errors),
I40E_NETDEV_STAT(rx_crc_errors),
};
+static const struct i40e_stats i40e_gstrings_veb_stats[] = {
+ I40E_VEB_STAT("rx_bytes", stats.rx_bytes),
+ I40E_VEB_STAT("tx_bytes", stats.tx_bytes),
+ I40E_VEB_STAT("rx_unicast", stats.rx_unicast),
+ I40E_VEB_STAT("tx_unicast", stats.tx_unicast),
+ I40E_VEB_STAT("rx_multicast", stats.rx_multicast),
+ I40E_VEB_STAT("tx_multicast", stats.tx_multicast),
+ I40E_VEB_STAT("rx_broadcast", stats.rx_broadcast),
+ I40E_VEB_STAT("tx_broadcast", stats.tx_broadcast),
+ I40E_VEB_STAT("rx_discards", stats.rx_discards),
+ I40E_VEB_STAT("tx_discards", stats.tx_discards),
+ I40E_VEB_STAT("tx_errors", stats.tx_errors),
+ I40E_VEB_STAT("rx_unknown_protocol", stats.rx_unknown_protocol),
+};
+
+static const struct i40e_stats i40e_gstrings_misc_stats[] = {
+ I40E_VSI_STAT("rx_unicast", eth_stats.rx_unicast),
+ I40E_VSI_STAT("tx_unicast", eth_stats.tx_unicast),
+ I40E_VSI_STAT("rx_multicast", eth_stats.rx_multicast),
+ I40E_VSI_STAT("tx_multicast", eth_stats.tx_multicast),
+ I40E_VSI_STAT("rx_broadcast", eth_stats.rx_broadcast),
+ I40E_VSI_STAT("tx_broadcast", eth_stats.tx_broadcast),
+ I40E_VSI_STAT("rx_unknown_protocol", eth_stats.rx_unknown_protocol),
+};
+
static int i40e_add_fdir_ethtool(struct i40e_vsi *vsi,
struct ethtool_rxnfc *cmd);
@@ -78,7 +104,12 @@ static int i40e_add_fdir_ethtool(struct i40e_vsi *vsi,
static struct i40e_stats i40e_gstrings_stats[] = {
I40E_PF_STAT("rx_bytes", stats.eth.rx_bytes),
I40E_PF_STAT("tx_bytes", stats.eth.tx_bytes),
- I40E_PF_STAT("rx_errors", stats.eth.rx_errors),
+ I40E_PF_STAT("rx_unicast", stats.eth.rx_unicast),
+ I40E_PF_STAT("tx_unicast", stats.eth.tx_unicast),
+ I40E_PF_STAT("rx_multicast", stats.eth.rx_multicast),
+ I40E_PF_STAT("tx_multicast", stats.eth.tx_multicast),
+ I40E_PF_STAT("rx_broadcast", stats.eth.rx_broadcast),
+ I40E_PF_STAT("tx_broadcast", stats.eth.tx_broadcast),
I40E_PF_STAT("tx_errors", stats.eth.tx_errors),
I40E_PF_STAT("rx_dropped", stats.eth.rx_discards),
I40E_PF_STAT("tx_dropped", stats.eth.tx_discards),
@@ -88,6 +119,7 @@ static struct i40e_stats i40e_gstrings_stats[] = {
I40E_PF_STAT("mac_local_faults", stats.mac_local_faults),
I40E_PF_STAT("mac_remote_faults", stats.mac_remote_faults),
I40E_PF_STAT("tx_timeout", tx_timeout_count),
+ I40E_PF_STAT("rx_csum_bad", hw_csum_rx_error),
I40E_PF_STAT("rx_length_errors", stats.rx_length_errors),
I40E_PF_STAT("link_xon_rx", stats.link_xon_rx),
I40E_PF_STAT("link_xoff_rx", stats.link_xoff_rx),
@@ -112,8 +144,10 @@ static struct i40e_stats i40e_gstrings_stats[] = {
I40E_PF_STAT("rx_oversize", stats.rx_oversize),
I40E_PF_STAT("rx_jabber", stats.rx_jabber),
I40E_PF_STAT("VF_admin_queue_requests", vf_aq_requests),
- I40E_PF_STAT("tx_hwtstamp_timeouts", tx_hwtstamp_timeouts),
I40E_PF_STAT("rx_hwtstamp_cleared", rx_hwtstamp_cleared),
+ I40E_PF_STAT("fdir_atr_match", stats.fd_atr_match),
+ I40E_PF_STAT("fdir_sb_match", stats.fd_sb_match),
+
/* LPI stats */
I40E_PF_STAT("tx_lpi_status", stats.tx_lpi_status),
I40E_PF_STAT("rx_lpi_status", stats.rx_lpi_status),
@@ -122,11 +156,14 @@ static struct i40e_stats i40e_gstrings_stats[] = {
};
#define I40E_QUEUE_STATS_LEN(n) \
- ((((struct i40e_netdev_priv *)netdev_priv((n)))->vsi->num_queue_pairs + \
- ((struct i40e_netdev_priv *)netdev_priv((n)))->vsi->num_queue_pairs) * 2)
+ (((struct i40e_netdev_priv *)netdev_priv((n)))->vsi->num_queue_pairs \
+ * 2 /* Tx and Rx together */ \
+ * (sizeof(struct i40e_queue_stats) / sizeof(u64)))
#define I40E_GLOBAL_STATS_LEN ARRAY_SIZE(i40e_gstrings_stats)
#define I40E_NETDEV_STATS_LEN ARRAY_SIZE(i40e_gstrings_net_stats)
+#define I40E_MISC_STATS_LEN ARRAY_SIZE(i40e_gstrings_misc_stats)
#define I40E_VSI_STATS_LEN(n) (I40E_NETDEV_STATS_LEN + \
+ I40E_MISC_STATS_LEN + \
I40E_QUEUE_STATS_LEN((n)))
#define I40E_PFC_STATS_LEN ( \
(FIELD_SIZEOF(struct i40e_pf, stats.priority_xoff_rx) + \
@@ -135,6 +172,7 @@ static struct i40e_stats i40e_gstrings_stats[] = {
FIELD_SIZEOF(struct i40e_pf, stats.priority_xon_tx) + \
FIELD_SIZEOF(struct i40e_pf, stats.priority_xon_2_xoff)) \
/ sizeof(u64))
+#define I40E_VEB_STATS_LEN ARRAY_SIZE(i40e_gstrings_veb_stats)
#define I40E_PF_STATS_LEN(n) (I40E_GLOBAL_STATS_LEN + \
I40E_PFC_STATS_LEN + \
I40E_VSI_STATS_LEN((n)))
@@ -620,10 +658,15 @@ static int i40e_get_sset_count(struct net_device *netdev, int sset)
case ETH_SS_TEST:
return I40E_TEST_LEN;
case ETH_SS_STATS:
- if (vsi == pf->vsi[pf->lan_vsi])
- return I40E_PF_STATS_LEN(netdev);
- else
+ if (vsi == pf->vsi[pf->lan_vsi]) {
+ int len = I40E_PF_STATS_LEN(netdev);
+
+ if (pf->lan_veb != I40E_NO_VEB)
+ len += I40E_VEB_STATS_LEN;
+ return len;
+ } else {
return I40E_VSI_STATS_LEN(netdev);
+ }
default:
return -EOPNOTSUPP;
}
@@ -633,6 +676,7 @@ static void i40e_get_ethtool_stats(struct net_device *netdev,
struct ethtool_stats *stats, u64 *data)
{
struct i40e_netdev_priv *np = netdev_priv(netdev);
+ struct i40e_ring *tx_ring, *rx_ring;
struct i40e_vsi *vsi = np->vsi;
struct i40e_pf *pf = vsi->back;
int i = 0;
@@ -648,10 +692,14 @@ static void i40e_get_ethtool_stats(struct net_device *netdev,
data[i++] = (i40e_gstrings_net_stats[j].sizeof_stat ==
sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
}
+ for (j = 0; j < I40E_MISC_STATS_LEN; j++) {
+ p = (char *)vsi + i40e_gstrings_misc_stats[j].stat_offset;
+ data[i++] = (i40e_gstrings_misc_stats[j].sizeof_stat ==
+ sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
+ }
rcu_read_lock();
- for (j = 0; j < vsi->num_queue_pairs; j++, i += 4) {
- struct i40e_ring *tx_ring = ACCESS_ONCE(vsi->tx_rings[j]);
- struct i40e_ring *rx_ring;
+ for (j = 0; j < vsi->num_queue_pairs; j++) {
+ tx_ring = ACCESS_ONCE(vsi->tx_rings[j]);
if (!tx_ring)
continue;
@@ -662,33 +710,45 @@ static void i40e_get_ethtool_stats(struct net_device *netdev,
data[i] = tx_ring->stats.packets;
data[i + 1] = tx_ring->stats.bytes;
} while (u64_stats_fetch_retry_irq(&tx_ring->syncp, start));
+ i += 2;
/* Rx ring is the 2nd half of the queue pair */
rx_ring = &tx_ring[1];
do {
start = u64_stats_fetch_begin_irq(&rx_ring->syncp);
- data[i + 2] = rx_ring->stats.packets;
- data[i + 3] = rx_ring->stats.bytes;
+ data[i] = rx_ring->stats.packets;
+ data[i + 1] = rx_ring->stats.bytes;
} while (u64_stats_fetch_retry_irq(&rx_ring->syncp, start));
+ i += 2;
}
rcu_read_unlock();
- if (vsi == pf->vsi[pf->lan_vsi]) {
- for (j = 0; j < I40E_GLOBAL_STATS_LEN; j++) {
- p = (char *)pf + i40e_gstrings_stats[j].stat_offset;
- data[i++] = (i40e_gstrings_stats[j].sizeof_stat ==
- sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
- }
- for (j = 0; j < I40E_MAX_USER_PRIORITY; j++) {
- data[i++] = pf->stats.priority_xon_tx[j];
- data[i++] = pf->stats.priority_xoff_tx[j];
- }
- for (j = 0; j < I40E_MAX_USER_PRIORITY; j++) {
- data[i++] = pf->stats.priority_xon_rx[j];
- data[i++] = pf->stats.priority_xoff_rx[j];
+ if (vsi != pf->vsi[pf->lan_vsi])
+ return;
+
+ if (pf->lan_veb != I40E_NO_VEB) {
+ struct i40e_veb *veb = pf->veb[pf->lan_veb];
+ for (j = 0; j < I40E_VEB_STATS_LEN; j++) {
+ p = (char *)veb;
+ p += i40e_gstrings_veb_stats[j].stat_offset;
+ data[i++] = (i40e_gstrings_veb_stats[j].sizeof_stat ==
+ sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
}
- for (j = 0; j < I40E_MAX_USER_PRIORITY; j++)
- data[i++] = pf->stats.priority_xon_2_xoff[j];
}
+ for (j = 0; j < I40E_GLOBAL_STATS_LEN; j++) {
+ p = (char *)pf + i40e_gstrings_stats[j].stat_offset;
+ data[i++] = (i40e_gstrings_stats[j].sizeof_stat ==
+ sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
+ }
+ for (j = 0; j < I40E_MAX_USER_PRIORITY; j++) {
+ data[i++] = pf->stats.priority_xon_tx[j];
+ data[i++] = pf->stats.priority_xoff_tx[j];
+ }
+ for (j = 0; j < I40E_MAX_USER_PRIORITY; j++) {
+ data[i++] = pf->stats.priority_xon_rx[j];
+ data[i++] = pf->stats.priority_xoff_rx[j];
+ }
+ for (j = 0; j < I40E_MAX_USER_PRIORITY; j++)
+ data[i++] = pf->stats.priority_xon_2_xoff[j];
}
static void i40e_get_strings(struct net_device *netdev, u32 stringset,
@@ -713,6 +773,11 @@ static void i40e_get_strings(struct net_device *netdev, u32 stringset,
i40e_gstrings_net_stats[i].stat_string);
p += ETH_GSTRING_LEN;
}
+ for (i = 0; i < I40E_MISC_STATS_LEN; i++) {
+ snprintf(p, ETH_GSTRING_LEN, "%s",
+ i40e_gstrings_misc_stats[i].stat_string);
+ p += ETH_GSTRING_LEN;
+ }
for (i = 0; i < vsi->num_queue_pairs; i++) {
snprintf(p, ETH_GSTRING_LEN, "tx-%u.tx_packets", i);
p += ETH_GSTRING_LEN;
@@ -723,34 +788,42 @@ static void i40e_get_strings(struct net_device *netdev, u32 stringset,
snprintf(p, ETH_GSTRING_LEN, "rx-%u.rx_bytes", i);
p += ETH_GSTRING_LEN;
}
- if (vsi == pf->vsi[pf->lan_vsi]) {
- for (i = 0; i < I40E_GLOBAL_STATS_LEN; i++) {
- snprintf(p, ETH_GSTRING_LEN, "port.%s",
- i40e_gstrings_stats[i].stat_string);
- p += ETH_GSTRING_LEN;
- }
- for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) {
- snprintf(p, ETH_GSTRING_LEN,
- "port.tx_priority_%u_xon", i);
- p += ETH_GSTRING_LEN;
- snprintf(p, ETH_GSTRING_LEN,
- "port.tx_priority_%u_xoff", i);
- p += ETH_GSTRING_LEN;
- }
- for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) {
- snprintf(p, ETH_GSTRING_LEN,
- "port.rx_priority_%u_xon", i);
- p += ETH_GSTRING_LEN;
- snprintf(p, ETH_GSTRING_LEN,
- "port.rx_priority_%u_xoff", i);
- p += ETH_GSTRING_LEN;
- }
- for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) {
- snprintf(p, ETH_GSTRING_LEN,
- "port.rx_priority_%u_xon_2_xoff", i);
+ if (vsi != pf->vsi[pf->lan_vsi])
+ return;
+
+ if (pf->lan_veb != I40E_NO_VEB) {
+ for (i = 0; i < I40E_VEB_STATS_LEN; i++) {
+ snprintf(p, ETH_GSTRING_LEN, "veb.%s",
+ i40e_gstrings_veb_stats[i].stat_string);
p += ETH_GSTRING_LEN;
}
}
+ for (i = 0; i < I40E_GLOBAL_STATS_LEN; i++) {
+ snprintf(p, ETH_GSTRING_LEN, "port.%s",
+ i40e_gstrings_stats[i].stat_string);
+ p += ETH_GSTRING_LEN;
+ }
+ for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) {
+ snprintf(p, ETH_GSTRING_LEN,
+ "port.tx_priority_%u_xon", i);
+ p += ETH_GSTRING_LEN;
+ snprintf(p, ETH_GSTRING_LEN,
+ "port.tx_priority_%u_xoff", i);
+ p += ETH_GSTRING_LEN;
+ }
+ for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) {
+ snprintf(p, ETH_GSTRING_LEN,
+ "port.rx_priority_%u_xon", i);
+ p += ETH_GSTRING_LEN;
+ snprintf(p, ETH_GSTRING_LEN,
+ "port.rx_priority_%u_xoff", i);
+ p += ETH_GSTRING_LEN;
+ }
+ for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) {
+ snprintf(p, ETH_GSTRING_LEN,
+ "port.rx_priority_%u_xon_2_xoff", i);
+ p += ETH_GSTRING_LEN;
+ }
/* BUG_ON(p - data != I40E_STATS_LEN * ETH_GSTRING_LEN); */
break;
}
@@ -1007,14 +1080,13 @@ static int i40e_get_coalesce(struct net_device *netdev,
ec->rx_max_coalesced_frames_irq = vsi->work_limit;
if (ITR_IS_DYNAMIC(vsi->rx_itr_setting))
- ec->rx_coalesce_usecs = 1;
- else
- ec->rx_coalesce_usecs = vsi->rx_itr_setting;
+ ec->use_adaptive_rx_coalesce = 1;
if (ITR_IS_DYNAMIC(vsi->tx_itr_setting))
- ec->tx_coalesce_usecs = 1;
- else
- ec->tx_coalesce_usecs = vsi->tx_itr_setting;
+ ec->use_adaptive_tx_coalesce = 1;
+
+ ec->rx_coalesce_usecs = vsi->rx_itr_setting & ~I40E_ITR_DYNAMIC;
+ ec->tx_coalesce_usecs = vsi->tx_itr_setting & ~I40E_ITR_DYNAMIC;
return 0;
}
@@ -1033,37 +1105,27 @@ static int i40e_set_coalesce(struct net_device *netdev,
if (ec->tx_max_coalesced_frames_irq || ec->rx_max_coalesced_frames_irq)
vsi->work_limit = ec->tx_max_coalesced_frames_irq;
- switch (ec->rx_coalesce_usecs) {
- case 0:
- vsi->rx_itr_setting = 0;
- break;
- case 1:
- vsi->rx_itr_setting = (I40E_ITR_DYNAMIC |
- ITR_REG_TO_USEC(I40E_ITR_RX_DEF));
- break;
- default:
- if ((ec->rx_coalesce_usecs < (I40E_MIN_ITR << 1)) ||
- (ec->rx_coalesce_usecs > (I40E_MAX_ITR << 1)))
- return -EINVAL;
+ if ((ec->rx_coalesce_usecs >= (I40E_MIN_ITR << 1)) &&
+ (ec->rx_coalesce_usecs <= (I40E_MAX_ITR << 1)))
vsi->rx_itr_setting = ec->rx_coalesce_usecs;
- break;
- }
+ else
+ return -EINVAL;
- switch (ec->tx_coalesce_usecs) {
- case 0:
- vsi->tx_itr_setting = 0;
- break;
- case 1:
- vsi->tx_itr_setting = (I40E_ITR_DYNAMIC |
- ITR_REG_TO_USEC(I40E_ITR_TX_DEF));
- break;
- default:
- if ((ec->tx_coalesce_usecs < (I40E_MIN_ITR << 1)) ||
- (ec->tx_coalesce_usecs > (I40E_MAX_ITR << 1)))
- return -EINVAL;
+ if ((ec->tx_coalesce_usecs >= (I40E_MIN_ITR << 1)) &&
+ (ec->tx_coalesce_usecs <= (I40E_MAX_ITR << 1)))
vsi->tx_itr_setting = ec->tx_coalesce_usecs;
- break;
- }
+ else
+ return -EINVAL;
+
+ if (ec->use_adaptive_rx_coalesce)
+ vsi->rx_itr_setting |= I40E_ITR_DYNAMIC;
+ else
+ vsi->rx_itr_setting &= ~I40E_ITR_DYNAMIC;
+
+ if (ec->use_adaptive_tx_coalesce)
+ vsi->tx_itr_setting |= I40E_ITR_DYNAMIC;
+ else
+ vsi->tx_itr_setting &= ~I40E_ITR_DYNAMIC;
vector = vsi->base_vector;
for (i = 0; i < vsi->num_q_vectors; i++, vector++) {
@@ -1140,8 +1202,7 @@ static int i40e_get_ethtool_fdir_all(struct i40e_pf *pf,
int cnt = 0;
/* report total rule count */
- cmd->data = pf->hw.fdir_shared_filter_count +
- pf->fdir_pf_filter_count;
+ cmd->data = i40e_get_fd_cnt_all(pf);
hlist_for_each_entry_safe(rule, node2,
&pf->fdir_filter_list, fdir_node) {
@@ -1175,10 +1236,6 @@ static int i40e_get_ethtool_fdir_entry(struct i40e_pf *pf,
struct i40e_fdir_filter *rule = NULL;
struct hlist_node *node2;
- /* report total rule count */
- cmd->data = pf->hw.fdir_shared_filter_count +
- pf->fdir_pf_filter_count;
-
hlist_for_each_entry_safe(rule, node2,
&pf->fdir_filter_list, fdir_node) {
if (fsp->location <= rule->fd_id)
@@ -1189,11 +1246,24 @@ static int i40e_get_ethtool_fdir_entry(struct i40e_pf *pf,
return -EINVAL;
fsp->flow_type = rule->flow_type;
- fsp->h_u.tcp_ip4_spec.psrc = rule->src_port;
- fsp->h_u.tcp_ip4_spec.pdst = rule->dst_port;
- fsp->h_u.tcp_ip4_spec.ip4src = rule->src_ip[0];
- fsp->h_u.tcp_ip4_spec.ip4dst = rule->dst_ip[0];
- fsp->ring_cookie = rule->q_index;
+ if (fsp->flow_type == IP_USER_FLOW) {
+ fsp->h_u.usr_ip4_spec.ip_ver = ETH_RX_NFC_IP4;
+ fsp->h_u.usr_ip4_spec.proto = 0;
+ fsp->m_u.usr_ip4_spec.proto = 0;
+ }
+
+ /* Reverse the src and dest notion, since the HW views them from
+ * Tx perspective where as the user expects it from Rx filter view.
+ */
+ fsp->h_u.tcp_ip4_spec.psrc = rule->dst_port;
+ fsp->h_u.tcp_ip4_spec.pdst = rule->src_port;
+ fsp->h_u.tcp_ip4_spec.ip4src = rule->dst_ip[0];
+ fsp->h_u.tcp_ip4_spec.ip4dst = rule->src_ip[0];
+
+ if (rule->dest_ctl == I40E_FILTER_PROGRAM_DESC_DEST_DROP_PACKET)
+ fsp->ring_cookie = RX_CLS_FLOW_DISC;
+ else
+ fsp->ring_cookie = rule->q_index;
return 0;
}
@@ -1223,6 +1293,8 @@ static int i40e_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd,
break;
case ETHTOOL_GRXCLSRLCNT:
cmd->rule_cnt = pf->fdir_pf_active_filters;
+ /* report total rule count */
+ cmd->data = i40e_get_fd_cnt_all(pf);
ret = 0;
break;
case ETHTOOL_GRXCLSRULE:
@@ -1291,16 +1363,12 @@ static int i40e_set_rss_hash_opt(struct i40e_pf *pf, struct ethtool_rxnfc *nfc)
case UDP_V4_FLOW:
switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
case 0:
- hena &=
- ~(((u64)1 << I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP) |
- ((u64)1 << I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP) |
- ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4));
+ hena &= ~(((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP) |
+ ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4));
break;
case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
- hena |=
- (((u64)1 << I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP) |
- ((u64)1 << I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP) |
- ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4));
+ hena |= (((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP) |
+ ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4));
break;
default:
return -EINVAL;
@@ -1309,16 +1377,12 @@ static int i40e_set_rss_hash_opt(struct i40e_pf *pf, struct ethtool_rxnfc *nfc)
case UDP_V6_FLOW:
switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
case 0:
- hena &=
- ~(((u64)1 << I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP) |
- ((u64)1 << I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP) |
- ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6));
+ hena &= ~(((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP) |
+ ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6));
break;
case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
- hena |=
- (((u64)1 << I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP) |
- ((u64)1 << I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP) |
- ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6));
+ hena |= (((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP) |
+ ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6));
break;
default:
return -EINVAL;
@@ -1503,7 +1567,8 @@ static int i40e_add_fdir_ethtool(struct i40e_vsi *vsi,
return -EINVAL;
}
- if (fsp->ring_cookie >= vsi->num_queue_pairs)
+ if ((fsp->ring_cookie != RX_CLS_FLOW_DISC) &&
+ (fsp->ring_cookie >= vsi->num_queue_pairs))
return -EINVAL;
input = kzalloc(sizeof(*input), GFP_KERNEL);
@@ -1524,13 +1589,17 @@ static int i40e_add_fdir_ethtool(struct i40e_vsi *vsi,
input->pctype = 0;
input->dest_vsi = vsi->id;
input->fd_status = I40E_FILTER_PROGRAM_DESC_FD_STATUS_FD_ID;
- input->cnt_index = 0;
+ input->cnt_index = pf->fd_sb_cnt_idx;
input->flow_type = fsp->flow_type;
input->ip4_proto = fsp->h_u.usr_ip4_spec.proto;
- input->src_port = fsp->h_u.tcp_ip4_spec.psrc;
- input->dst_port = fsp->h_u.tcp_ip4_spec.pdst;
- input->src_ip[0] = fsp->h_u.tcp_ip4_spec.ip4src;
- input->dst_ip[0] = fsp->h_u.tcp_ip4_spec.ip4dst;
+
+ /* Reverse the src and dest notion, since the HW expects them to be from
+ * Tx perspective where as the input from user is from Rx filter view.
+ */
+ input->dst_port = fsp->h_u.tcp_ip4_spec.psrc;
+ input->src_port = fsp->h_u.tcp_ip4_spec.pdst;
+ input->dst_ip[0] = fsp->h_u.tcp_ip4_spec.ip4src;
+ input->src_ip[0] = fsp->h_u.tcp_ip4_spec.ip4dst;
ret = i40e_add_del_fdir(vsi, input, true);
if (ret)
@@ -1692,5 +1761,5 @@ static const struct ethtool_ops i40e_ethtool_ops = {
void i40e_set_ethtool_ops(struct net_device *netdev)
{
- SET_ETHTOOL_OPS(netdev, &i40e_ethtool_ops);
+ netdev->ethtool_ops = &i40e_ethtool_ops;
}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_hmc.c b/drivers/net/ethernet/intel/i40e/i40e_hmc.c
index bf2d4cc5b569..9b987ccc9e82 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_hmc.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_hmc.c
@@ -201,7 +201,7 @@ exit:
**/
i40e_status i40e_remove_pd_bp(struct i40e_hw *hw,
struct i40e_hmc_info *hmc_info,
- u32 idx, bool is_pf)
+ u32 idx)
{
i40e_status ret_code = 0;
struct i40e_hmc_pd_entry *pd_entry;
@@ -237,10 +237,7 @@ i40e_status i40e_remove_pd_bp(struct i40e_hw *hw,
pd_addr = (u64 *)pd_table->pd_page_addr.va;
pd_addr += rel_pd_idx;
memset(pd_addr, 0, sizeof(u64));
- if (is_pf)
- I40E_INVALIDATE_PF_HMC_PD(hw, sd_idx, idx);
- else
- I40E_INVALIDATE_VF_HMC_PD(hw, sd_idx, idx, hmc_info->hmc_fn_id);
+ I40E_INVALIDATE_PF_HMC_PD(hw, sd_idx, idx);
/* free memory here */
ret_code = i40e_free_dma_mem(hw, &(pd_entry->bp.addr));
diff --git a/drivers/net/ethernet/intel/i40e/i40e_hmc.h b/drivers/net/ethernet/intel/i40e/i40e_hmc.h
index 0cd4701234f8..b45d8fedc5e7 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_hmc.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_hmc.h
@@ -163,11 +163,6 @@ struct i40e_hmc_info {
(((sd_idx) << I40E_PFHMC_PDINV_PMSDIDX_SHIFT) | \
((pd_idx) << I40E_PFHMC_PDINV_PMPDIDX_SHIFT)))
-#define I40E_INVALIDATE_VF_HMC_PD(hw, sd_idx, pd_idx, hmc_fn_id) \
- wr32((hw), I40E_GLHMC_VFPDINV((hmc_fn_id) - I40E_FIRST_VF_FPM_ID), \
- (((sd_idx) << I40E_PFHMC_PDINV_PMSDIDX_SHIFT) | \
- ((pd_idx) << I40E_PFHMC_PDINV_PMPDIDX_SHIFT)))
-
/**
* I40E_FIND_SD_INDEX_LIMIT - finds segment descriptor index limit
* @hmc_info: pointer to the HMC configuration information structure
@@ -226,7 +221,7 @@ i40e_status i40e_add_pd_table_entry(struct i40e_hw *hw,
u32 pd_index);
i40e_status i40e_remove_pd_bp(struct i40e_hw *hw,
struct i40e_hmc_info *hmc_info,
- u32 idx, bool is_pf);
+ u32 idx);
i40e_status i40e_prep_remove_sd_bp(struct i40e_hmc_info *hmc_info,
u32 idx);
i40e_status i40e_remove_sd_bp_new(struct i40e_hw *hw,
diff --git a/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c b/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c
index d5d98fe2691d..870ab1ee072c 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c
@@ -397,7 +397,7 @@ static i40e_status i40e_create_lan_hmc_object(struct i40e_hw *hw,
/* remove the backing pages from pd_idx1 to i */
while (i && (i > pd_idx1)) {
i40e_remove_pd_bp(hw, info->hmc_info,
- (i - 1), true);
+ (i - 1));
i--;
}
}
@@ -433,11 +433,7 @@ exit_sd_error:
((j - 1) * I40E_HMC_MAX_BP_COUNT));
pd_lmt1 = min(pd_lmt, (j * I40E_HMC_MAX_BP_COUNT));
for (i = pd_idx1; i < pd_lmt1; i++) {
- i40e_remove_pd_bp(
- hw,
- info->hmc_info,
- i,
- true);
+ i40e_remove_pd_bp(hw, info->hmc_info, i);
}
i40e_remove_pd_page(hw, info->hmc_info, (j - 1));
break;
@@ -616,8 +612,7 @@ static i40e_status i40e_delete_lan_hmc_object(struct i40e_hw *hw,
pd_table =
&info->hmc_info->sd_table.sd_entry[sd_idx].u.pd_table;
if (pd_table->pd_entry[rel_pd_idx].valid) {
- ret_code = i40e_remove_pd_bp(hw, info->hmc_info,
- j, true);
+ ret_code = i40e_remove_pd_bp(hw, info->hmc_info, j);
if (ret_code)
goto exit;
}
@@ -747,6 +742,7 @@ static struct i40e_context_ele i40e_hmc_rxq_ce_info[] = {
{ I40E_HMC_STORE(i40e_hmc_obj_rxq, tphdata_ena), 1, 195 },
{ I40E_HMC_STORE(i40e_hmc_obj_rxq, tphhead_ena), 1, 196 },
{ I40E_HMC_STORE(i40e_hmc_obj_rxq, lrxqthresh), 3, 198 },
+ { I40E_HMC_STORE(i40e_hmc_obj_rxq, prefena), 1, 201 },
{ 0 }
};
diff --git a/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.h b/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.h
index 341de925a298..eb65fe23c4a7 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.h
@@ -56,6 +56,7 @@ struct i40e_hmc_obj_rxq {
u8 tphdata_ena;
u8 tphhead_ena;
u8 lrxqthresh;
+ u8 prefena; /* NOTE: normally must be set to 1 at init */
};
/* Tx queue context data */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index 2e72449f1265..275ca9a1719e 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -38,8 +38,8 @@ static const char i40e_driver_string[] =
#define DRV_KERN "-k"
#define DRV_VERSION_MAJOR 0
-#define DRV_VERSION_MINOR 3
-#define DRV_VERSION_BUILD 36
+#define DRV_VERSION_MINOR 4
+#define DRV_VERSION_BUILD 10
#define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \
__stringify(DRV_VERSION_MINOR) "." \
__stringify(DRV_VERSION_BUILD) DRV_KERN
@@ -67,12 +67,10 @@ static int i40e_veb_get_bw_info(struct i40e_veb *veb);
*/
static DEFINE_PCI_DEVICE_TABLE(i40e_pci_tbl) = {
{PCI_VDEVICE(INTEL, I40E_DEV_ID_SFP_XL710), 0},
- {PCI_VDEVICE(INTEL, I40E_DEV_ID_SFP_X710), 0},
{PCI_VDEVICE(INTEL, I40E_DEV_ID_QEMU), 0},
{PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_A), 0},
{PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_B), 0},
{PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_C), 0},
- {PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_D), 0},
{PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_A), 0},
{PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_B), 0},
{PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_C), 0},
@@ -356,6 +354,7 @@ static struct rtnl_link_stats64 *i40e_get_netdev_stats_struct(
struct rtnl_link_stats64 *stats)
{
struct i40e_netdev_priv *np = netdev_priv(netdev);
+ struct i40e_ring *tx_ring, *rx_ring;
struct i40e_vsi *vsi = np->vsi;
struct rtnl_link_stats64 *vsi_stats = i40e_get_vsi_stats_struct(vsi);
int i;
@@ -368,7 +367,6 @@ static struct rtnl_link_stats64 *i40e_get_netdev_stats_struct(
rcu_read_lock();
for (i = 0; i < vsi->num_queue_pairs; i++) {
- struct i40e_ring *tx_ring, *rx_ring;
u64 bytes, packets;
unsigned int start;
@@ -397,7 +395,7 @@ static struct rtnl_link_stats64 *i40e_get_netdev_stats_struct(
}
rcu_read_unlock();
- /* following stats updated by ixgbe_watchdog_task() */
+ /* following stats updated by i40e_watchdog_subtask() */
stats->multicast = vsi_stats->multicast;
stats->tx_errors = vsi_stats->tx_errors;
stats->tx_dropped = vsi_stats->tx_dropped;
@@ -530,6 +528,12 @@ void i40e_update_eth_stats(struct i40e_vsi *vsi)
i40e_stat_update32(hw, I40E_GLV_RDPC(stat_idx),
vsi->stat_offsets_loaded,
&oes->rx_discards, &es->rx_discards);
+ i40e_stat_update32(hw, I40E_GLV_RUPP(stat_idx),
+ vsi->stat_offsets_loaded,
+ &oes->rx_unknown_protocol, &es->rx_unknown_protocol);
+ i40e_stat_update32(hw, I40E_GLV_TEPC(stat_idx),
+ vsi->stat_offsets_loaded,
+ &oes->tx_errors, &es->tx_errors);
i40e_stat_update48(hw, I40E_GLV_GORCH(stat_idx),
I40E_GLV_GORCL(stat_idx),
@@ -648,10 +652,10 @@ static void i40e_update_link_xoff_rx(struct i40e_pf *pf)
return;
/* Clear the __I40E_HANG_CHECK_ARMED bit for all Tx rings */
- for (v = 0; v < pf->hw.func_caps.num_vsis; v++) {
+ for (v = 0; v < pf->num_alloc_vsi; v++) {
struct i40e_vsi *vsi = pf->vsi[v];
- if (!vsi)
+ if (!vsi || !vsi->tx_rings[0])
continue;
for (i = 0; i < vsi->num_queue_pairs; i++) {
@@ -702,10 +706,10 @@ static void i40e_update_prio_xoff_rx(struct i40e_pf *pf)
}
/* Clear the __I40E_HANG_CHECK_ARMED bit for Tx rings */
- for (v = 0; v < pf->hw.func_caps.num_vsis; v++) {
+ for (v = 0; v < pf->num_alloc_vsi; v++) {
struct i40e_vsi *vsi = pf->vsi[v];
- if (!vsi)
+ if (!vsi || !vsi->tx_rings[0])
continue;
for (i = 0; i < vsi->num_queue_pairs; i++) {
@@ -720,19 +724,18 @@ static void i40e_update_prio_xoff_rx(struct i40e_pf *pf)
}
/**
- * i40e_update_stats - Update the board statistics counters.
+ * i40e_update_vsi_stats - Update the vsi statistics counters.
* @vsi: the VSI to be updated
*
* There are a few instances where we store the same stat in a
* couple of different structs. This is partly because we have
* the netdev stats that need to be filled out, which is slightly
* different from the "eth_stats" defined by the chip and used in
- * VF communications. We sort it all out here in a central place.
+ * VF communications. We sort it out here.
**/
-void i40e_update_stats(struct i40e_vsi *vsi)
+static void i40e_update_vsi_stats(struct i40e_vsi *vsi)
{
struct i40e_pf *pf = vsi->back;
- struct i40e_hw *hw = &pf->hw;
struct rtnl_link_stats64 *ons;
struct rtnl_link_stats64 *ns; /* netdev stats */
struct i40e_eth_stats *oes;
@@ -741,8 +744,6 @@ void i40e_update_stats(struct i40e_vsi *vsi)
u32 rx_page, rx_buf;
u64 rx_p, rx_b;
u64 tx_p, tx_b;
- u32 val;
- int i;
u16 q;
if (test_bit(__I40E_DOWN, &vsi->state) ||
@@ -804,196 +805,256 @@ void i40e_update_stats(struct i40e_vsi *vsi)
ns->tx_packets = tx_p;
ns->tx_bytes = tx_b;
- i40e_update_eth_stats(vsi);
/* update netdev stats from eth stats */
- ons->rx_errors = oes->rx_errors;
- ns->rx_errors = es->rx_errors;
+ i40e_update_eth_stats(vsi);
ons->tx_errors = oes->tx_errors;
ns->tx_errors = es->tx_errors;
ons->multicast = oes->rx_multicast;
ns->multicast = es->rx_multicast;
+ ons->rx_dropped = oes->rx_discards;
+ ns->rx_dropped = es->rx_discards;
ons->tx_dropped = oes->tx_discards;
ns->tx_dropped = es->tx_discards;
- /* Get the port data only if this is the main PF VSI */
+ /* pull in a couple PF stats if this is the main vsi */
if (vsi == pf->vsi[pf->lan_vsi]) {
- struct i40e_hw_port_stats *nsd = &pf->stats;
- struct i40e_hw_port_stats *osd = &pf->stats_offsets;
+ ns->rx_crc_errors = pf->stats.crc_errors;
+ ns->rx_errors = pf->stats.crc_errors + pf->stats.illegal_bytes;
+ ns->rx_length_errors = pf->stats.rx_length_errors;
+ }
+}
- i40e_stat_update48(hw, I40E_GLPRT_GORCH(hw->port),
- I40E_GLPRT_GORCL(hw->port),
- pf->stat_offsets_loaded,
- &osd->eth.rx_bytes, &nsd->eth.rx_bytes);
- i40e_stat_update48(hw, I40E_GLPRT_GOTCH(hw->port),
- I40E_GLPRT_GOTCL(hw->port),
- pf->stat_offsets_loaded,
- &osd->eth.tx_bytes, &nsd->eth.tx_bytes);
- i40e_stat_update32(hw, I40E_GLPRT_RDPC(hw->port),
- pf->stat_offsets_loaded,
- &osd->eth.rx_discards,
- &nsd->eth.rx_discards);
- i40e_stat_update32(hw, I40E_GLPRT_TDPC(hw->port),
- pf->stat_offsets_loaded,
- &osd->eth.tx_discards,
- &nsd->eth.tx_discards);
- i40e_stat_update48(hw, I40E_GLPRT_MPRCH(hw->port),
- I40E_GLPRT_MPRCL(hw->port),
- pf->stat_offsets_loaded,
- &osd->eth.rx_multicast,
- &nsd->eth.rx_multicast);
+/**
+ * i40e_update_pf_stats - Update the pf statistics counters.
+ * @pf: the PF to be updated
+ **/
+static void i40e_update_pf_stats(struct i40e_pf *pf)
+{
+ struct i40e_hw_port_stats *osd = &pf->stats_offsets;
+ struct i40e_hw_port_stats *nsd = &pf->stats;
+ struct i40e_hw *hw = &pf->hw;
+ u32 val;
+ int i;
- i40e_stat_update32(hw, I40E_GLPRT_TDOLD(hw->port),
- pf->stat_offsets_loaded,
- &osd->tx_dropped_link_down,
- &nsd->tx_dropped_link_down);
+ i40e_stat_update48(hw, I40E_GLPRT_GORCH(hw->port),
+ I40E_GLPRT_GORCL(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->eth.rx_bytes, &nsd->eth.rx_bytes);
+ i40e_stat_update48(hw, I40E_GLPRT_GOTCH(hw->port),
+ I40E_GLPRT_GOTCL(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->eth.tx_bytes, &nsd->eth.tx_bytes);
+ i40e_stat_update32(hw, I40E_GLPRT_RDPC(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->eth.rx_discards,
+ &nsd->eth.rx_discards);
+ i40e_stat_update32(hw, I40E_GLPRT_TDPC(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->eth.tx_discards,
+ &nsd->eth.tx_discards);
- i40e_stat_update32(hw, I40E_GLPRT_CRCERRS(hw->port),
- pf->stat_offsets_loaded,
- &osd->crc_errors, &nsd->crc_errors);
- ns->rx_crc_errors = nsd->crc_errors;
+ i40e_stat_update48(hw, I40E_GLPRT_UPRCH(hw->port),
+ I40E_GLPRT_UPRCL(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->eth.rx_unicast,
+ &nsd->eth.rx_unicast);
+ i40e_stat_update48(hw, I40E_GLPRT_MPRCH(hw->port),
+ I40E_GLPRT_MPRCL(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->eth.rx_multicast,
+ &nsd->eth.rx_multicast);
+ i40e_stat_update48(hw, I40E_GLPRT_BPRCH(hw->port),
+ I40E_GLPRT_BPRCL(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->eth.rx_broadcast,
+ &nsd->eth.rx_broadcast);
+ i40e_stat_update48(hw, I40E_GLPRT_UPTCH(hw->port),
+ I40E_GLPRT_UPTCL(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->eth.tx_unicast,
+ &nsd->eth.tx_unicast);
+ i40e_stat_update48(hw, I40E_GLPRT_MPTCH(hw->port),
+ I40E_GLPRT_MPTCL(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->eth.tx_multicast,
+ &nsd->eth.tx_multicast);
+ i40e_stat_update48(hw, I40E_GLPRT_BPTCH(hw->port),
+ I40E_GLPRT_BPTCL(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->eth.tx_broadcast,
+ &nsd->eth.tx_broadcast);
- i40e_stat_update32(hw, I40E_GLPRT_ILLERRC(hw->port),
- pf->stat_offsets_loaded,
- &osd->illegal_bytes, &nsd->illegal_bytes);
- ns->rx_errors = nsd->crc_errors
- + nsd->illegal_bytes;
+ i40e_stat_update32(hw, I40E_GLPRT_TDOLD(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->tx_dropped_link_down,
+ &nsd->tx_dropped_link_down);
- i40e_stat_update32(hw, I40E_GLPRT_MLFC(hw->port),
- pf->stat_offsets_loaded,
- &osd->mac_local_faults,
- &nsd->mac_local_faults);
- i40e_stat_update32(hw, I40E_GLPRT_MRFC(hw->port),
- pf->stat_offsets_loaded,
- &osd->mac_remote_faults,
- &nsd->mac_remote_faults);
+ i40e_stat_update32(hw, I40E_GLPRT_CRCERRS(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->crc_errors, &nsd->crc_errors);
- i40e_stat_update32(hw, I40E_GLPRT_RLEC(hw->port),
- pf->stat_offsets_loaded,
- &osd->rx_length_errors,
- &nsd->rx_length_errors);
- ns->rx_length_errors = nsd->rx_length_errors;
+ i40e_stat_update32(hw, I40E_GLPRT_ILLERRC(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->illegal_bytes, &nsd->illegal_bytes);
- i40e_stat_update32(hw, I40E_GLPRT_LXONRXC(hw->port),
- pf->stat_offsets_loaded,
- &osd->link_xon_rx, &nsd->link_xon_rx);
- i40e_stat_update32(hw, I40E_GLPRT_LXONTXC(hw->port),
- pf->stat_offsets_loaded,
- &osd->link_xon_tx, &nsd->link_xon_tx);
- i40e_update_prio_xoff_rx(pf); /* handles I40E_GLPRT_LXOFFRXC */
- i40e_stat_update32(hw, I40E_GLPRT_LXOFFTXC(hw->port),
- pf->stat_offsets_loaded,
- &osd->link_xoff_tx, &nsd->link_xoff_tx);
-
- for (i = 0; i < 8; i++) {
- i40e_stat_update32(hw, I40E_GLPRT_PXONRXC(hw->port, i),
- pf->stat_offsets_loaded,
- &osd->priority_xon_rx[i],
- &nsd->priority_xon_rx[i]);
- i40e_stat_update32(hw, I40E_GLPRT_PXONTXC(hw->port, i),
- pf->stat_offsets_loaded,
- &osd->priority_xon_tx[i],
- &nsd->priority_xon_tx[i]);
- i40e_stat_update32(hw, I40E_GLPRT_PXOFFTXC(hw->port, i),
- pf->stat_offsets_loaded,
- &osd->priority_xoff_tx[i],
- &nsd->priority_xoff_tx[i]);
- i40e_stat_update32(hw,
- I40E_GLPRT_RXON2OFFCNT(hw->port, i),
- pf->stat_offsets_loaded,
- &osd->priority_xon_2_xoff[i],
- &nsd->priority_xon_2_xoff[i]);
- }
+ i40e_stat_update32(hw, I40E_GLPRT_MLFC(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->mac_local_faults,
+ &nsd->mac_local_faults);
+ i40e_stat_update32(hw, I40E_GLPRT_MRFC(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->mac_remote_faults,
+ &nsd->mac_remote_faults);
- i40e_stat_update48(hw, I40E_GLPRT_PRC64H(hw->port),
- I40E_GLPRT_PRC64L(hw->port),
- pf->stat_offsets_loaded,
- &osd->rx_size_64, &nsd->rx_size_64);
- i40e_stat_update48(hw, I40E_GLPRT_PRC127H(hw->port),
- I40E_GLPRT_PRC127L(hw->port),
- pf->stat_offsets_loaded,
- &osd->rx_size_127, &nsd->rx_size_127);
- i40e_stat_update48(hw, I40E_GLPRT_PRC255H(hw->port),
- I40E_GLPRT_PRC255L(hw->port),
- pf->stat_offsets_loaded,
- &osd->rx_size_255, &nsd->rx_size_255);
- i40e_stat_update48(hw, I40E_GLPRT_PRC511H(hw->port),
- I40E_GLPRT_PRC511L(hw->port),
- pf->stat_offsets_loaded,
- &osd->rx_size_511, &nsd->rx_size_511);
- i40e_stat_update48(hw, I40E_GLPRT_PRC1023H(hw->port),
- I40E_GLPRT_PRC1023L(hw->port),
- pf->stat_offsets_loaded,
- &osd->rx_size_1023, &nsd->rx_size_1023);
- i40e_stat_update48(hw, I40E_GLPRT_PRC1522H(hw->port),
- I40E_GLPRT_PRC1522L(hw->port),
- pf->stat_offsets_loaded,
- &osd->rx_size_1522, &nsd->rx_size_1522);
- i40e_stat_update48(hw, I40E_GLPRT_PRC9522H(hw->port),
- I40E_GLPRT_PRC9522L(hw->port),
- pf->stat_offsets_loaded,
- &osd->rx_size_big, &nsd->rx_size_big);
+ i40e_stat_update32(hw, I40E_GLPRT_RLEC(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->rx_length_errors,
+ &nsd->rx_length_errors);
- i40e_stat_update48(hw, I40E_GLPRT_PTC64H(hw->port),
- I40E_GLPRT_PTC64L(hw->port),
- pf->stat_offsets_loaded,
- &osd->tx_size_64, &nsd->tx_size_64);
- i40e_stat_update48(hw, I40E_GLPRT_PTC127H(hw->port),
- I40E_GLPRT_PTC127L(hw->port),
- pf->stat_offsets_loaded,
- &osd->tx_size_127, &nsd->tx_size_127);
- i40e_stat_update48(hw, I40E_GLPRT_PTC255H(hw->port),
- I40E_GLPRT_PTC255L(hw->port),
- pf->stat_offsets_loaded,
- &osd->tx_size_255, &nsd->tx_size_255);
- i40e_stat_update48(hw, I40E_GLPRT_PTC511H(hw->port),
- I40E_GLPRT_PTC511L(hw->port),
- pf->stat_offsets_loaded,
- &osd->tx_size_511, &nsd->tx_size_511);
- i40e_stat_update48(hw, I40E_GLPRT_PTC1023H(hw->port),
- I40E_GLPRT_PTC1023L(hw->port),
- pf->stat_offsets_loaded,
- &osd->tx_size_1023, &nsd->tx_size_1023);
- i40e_stat_update48(hw, I40E_GLPRT_PTC1522H(hw->port),
- I40E_GLPRT_PTC1522L(hw->port),
- pf->stat_offsets_loaded,
- &osd->tx_size_1522, &nsd->tx_size_1522);
- i40e_stat_update48(hw, I40E_GLPRT_PTC9522H(hw->port),
- I40E_GLPRT_PTC9522L(hw->port),
- pf->stat_offsets_loaded,
- &osd->tx_size_big, &nsd->tx_size_big);
+ i40e_stat_update32(hw, I40E_GLPRT_LXONRXC(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->link_xon_rx, &nsd->link_xon_rx);
+ i40e_stat_update32(hw, I40E_GLPRT_LXONTXC(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->link_xon_tx, &nsd->link_xon_tx);
+ i40e_update_prio_xoff_rx(pf); /* handles I40E_GLPRT_LXOFFRXC */
+ i40e_stat_update32(hw, I40E_GLPRT_LXOFFTXC(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->link_xoff_tx, &nsd->link_xoff_tx);
- i40e_stat_update32(hw, I40E_GLPRT_RUC(hw->port),
- pf->stat_offsets_loaded,
- &osd->rx_undersize, &nsd->rx_undersize);
- i40e_stat_update32(hw, I40E_GLPRT_RFC(hw->port),
+ for (i = 0; i < 8; i++) {
+ i40e_stat_update32(hw, I40E_GLPRT_PXONRXC(hw->port, i),
pf->stat_offsets_loaded,
- &osd->rx_fragments, &nsd->rx_fragments);
- i40e_stat_update32(hw, I40E_GLPRT_ROC(hw->port),
+ &osd->priority_xon_rx[i],
+ &nsd->priority_xon_rx[i]);
+ i40e_stat_update32(hw, I40E_GLPRT_PXONTXC(hw->port, i),
pf->stat_offsets_loaded,
- &osd->rx_oversize, &nsd->rx_oversize);
- i40e_stat_update32(hw, I40E_GLPRT_RJC(hw->port),
+ &osd->priority_xon_tx[i],
+ &nsd->priority_xon_tx[i]);
+ i40e_stat_update32(hw, I40E_GLPRT_PXOFFTXC(hw->port, i),
pf->stat_offsets_loaded,
- &osd->rx_jabber, &nsd->rx_jabber);
-
- val = rd32(hw, I40E_PRTPM_EEE_STAT);
- nsd->tx_lpi_status =
- (val & I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_MASK) >>
- I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_SHIFT;
- nsd->rx_lpi_status =
- (val & I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_MASK) >>
- I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_SHIFT;
- i40e_stat_update32(hw, I40E_PRTPM_TLPIC,
+ &osd->priority_xoff_tx[i],
+ &nsd->priority_xoff_tx[i]);
+ i40e_stat_update32(hw,
+ I40E_GLPRT_RXON2OFFCNT(hw->port, i),
pf->stat_offsets_loaded,
- &osd->tx_lpi_count, &nsd->tx_lpi_count);
- i40e_stat_update32(hw, I40E_PRTPM_RLPIC,
- pf->stat_offsets_loaded,
- &osd->rx_lpi_count, &nsd->rx_lpi_count);
+ &osd->priority_xon_2_xoff[i],
+ &nsd->priority_xon_2_xoff[i]);
}
+ i40e_stat_update48(hw, I40E_GLPRT_PRC64H(hw->port),
+ I40E_GLPRT_PRC64L(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->rx_size_64, &nsd->rx_size_64);
+ i40e_stat_update48(hw, I40E_GLPRT_PRC127H(hw->port),
+ I40E_GLPRT_PRC127L(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->rx_size_127, &nsd->rx_size_127);
+ i40e_stat_update48(hw, I40E_GLPRT_PRC255H(hw->port),
+ I40E_GLPRT_PRC255L(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->rx_size_255, &nsd->rx_size_255);
+ i40e_stat_update48(hw, I40E_GLPRT_PRC511H(hw->port),
+ I40E_GLPRT_PRC511L(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->rx_size_511, &nsd->rx_size_511);
+ i40e_stat_update48(hw, I40E_GLPRT_PRC1023H(hw->port),
+ I40E_GLPRT_PRC1023L(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->rx_size_1023, &nsd->rx_size_1023);
+ i40e_stat_update48(hw, I40E_GLPRT_PRC1522H(hw->port),
+ I40E_GLPRT_PRC1522L(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->rx_size_1522, &nsd->rx_size_1522);
+ i40e_stat_update48(hw, I40E_GLPRT_PRC9522H(hw->port),
+ I40E_GLPRT_PRC9522L(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->rx_size_big, &nsd->rx_size_big);
+
+ i40e_stat_update48(hw, I40E_GLPRT_PTC64H(hw->port),
+ I40E_GLPRT_PTC64L(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->tx_size_64, &nsd->tx_size_64);
+ i40e_stat_update48(hw, I40E_GLPRT_PTC127H(hw->port),
+ I40E_GLPRT_PTC127L(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->tx_size_127, &nsd->tx_size_127);
+ i40e_stat_update48(hw, I40E_GLPRT_PTC255H(hw->port),
+ I40E_GLPRT_PTC255L(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->tx_size_255, &nsd->tx_size_255);
+ i40e_stat_update48(hw, I40E_GLPRT_PTC511H(hw->port),
+ I40E_GLPRT_PTC511L(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->tx_size_511, &nsd->tx_size_511);
+ i40e_stat_update48(hw, I40E_GLPRT_PTC1023H(hw->port),
+ I40E_GLPRT_PTC1023L(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->tx_size_1023, &nsd->tx_size_1023);
+ i40e_stat_update48(hw, I40E_GLPRT_PTC1522H(hw->port),
+ I40E_GLPRT_PTC1522L(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->tx_size_1522, &nsd->tx_size_1522);
+ i40e_stat_update48(hw, I40E_GLPRT_PTC9522H(hw->port),
+ I40E_GLPRT_PTC9522L(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->tx_size_big, &nsd->tx_size_big);
+
+ i40e_stat_update32(hw, I40E_GLPRT_RUC(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->rx_undersize, &nsd->rx_undersize);
+ i40e_stat_update32(hw, I40E_GLPRT_RFC(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->rx_fragments, &nsd->rx_fragments);
+ i40e_stat_update32(hw, I40E_GLPRT_ROC(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->rx_oversize, &nsd->rx_oversize);
+ i40e_stat_update32(hw, I40E_GLPRT_RJC(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->rx_jabber, &nsd->rx_jabber);
+
+ /* FDIR stats */
+ i40e_stat_update32(hw, I40E_GLQF_PCNT(pf->fd_atr_cnt_idx),
+ pf->stat_offsets_loaded,
+ &osd->fd_atr_match, &nsd->fd_atr_match);
+ i40e_stat_update32(hw, I40E_GLQF_PCNT(pf->fd_sb_cnt_idx),
+ pf->stat_offsets_loaded,
+ &osd->fd_sb_match, &nsd->fd_sb_match);
+
+ val = rd32(hw, I40E_PRTPM_EEE_STAT);
+ nsd->tx_lpi_status =
+ (val & I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_MASK) >>
+ I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_SHIFT;
+ nsd->rx_lpi_status =
+ (val & I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_MASK) >>
+ I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_SHIFT;
+ i40e_stat_update32(hw, I40E_PRTPM_TLPIC,
+ pf->stat_offsets_loaded,
+ &osd->tx_lpi_count, &nsd->tx_lpi_count);
+ i40e_stat_update32(hw, I40E_PRTPM_RLPIC,
+ pf->stat_offsets_loaded,
+ &osd->rx_lpi_count, &nsd->rx_lpi_count);
+
pf->stat_offsets_loaded = true;
}
/**
+ * i40e_update_stats - Update the various statistics counters.
+ * @vsi: the VSI to be updated
+ *
+ * Update the various stats for this VSI and its related entities.
+ **/
+void i40e_update_stats(struct i40e_vsi *vsi)
+{
+ struct i40e_pf *pf = vsi->back;
+
+ if (vsi == pf->vsi[pf->lan_vsi])
+ i40e_update_pf_stats(pf);
+
+ i40e_update_vsi_stats(vsi);
+}
+
+/**
* i40e_find_filter - Search VSI filter list for specific mac/vlan filter
* @vsi: the VSI to be searched
* @macaddr: the MAC address
@@ -1101,6 +1162,30 @@ struct i40e_mac_filter *i40e_put_mac_in_vlan(struct i40e_vsi *vsi, u8 *macaddr,
}
/**
+ * i40e_rm_default_mac_filter - Remove the default MAC filter set by NVM
+ * @vsi: the PF Main VSI - inappropriate for any other VSI
+ * @macaddr: the MAC address
+ **/
+static void i40e_rm_default_mac_filter(struct i40e_vsi *vsi, u8 *macaddr)
+{
+ struct i40e_aqc_remove_macvlan_element_data element;
+ struct i40e_pf *pf = vsi->back;
+ i40e_status aq_ret;
+
+ /* Only appropriate for the PF main VSI */
+ if (vsi->type != I40E_VSI_MAIN)
+ return;
+
+ ether_addr_copy(element.mac_addr, macaddr);
+ element.vlan_tag = 0;
+ element.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH |
+ I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
+ aq_ret = i40e_aq_remove_macvlan(&pf->hw, vsi->seid, &element, 1, NULL);
+ if (aq_ret)
+ dev_err(&pf->pdev->dev, "Could not remove default MAC-VLAN\n");
+}
+
+/**
* i40e_add_filter - Add a mac/vlan filter to the VSI
* @vsi: the VSI to be searched
* @macaddr: the MAC address
@@ -1125,7 +1210,7 @@ struct i40e_mac_filter *i40e_add_filter(struct i40e_vsi *vsi,
if (!f)
goto add_filter_out;
- memcpy(f->macaddr, macaddr, ETH_ALEN);
+ ether_addr_copy(f->macaddr, macaddr);
f->vlan = vlan;
f->changed = true;
@@ -1249,7 +1334,7 @@ static int i40e_set_mac(struct net_device *netdev, void *p)
return -EADDRNOTAVAIL;
}
- memcpy(vsi->back->hw.mac.addr, addr->sa_data, netdev->addr_len);
+ ether_addr_copy(vsi->back->hw.mac.addr, addr->sa_data);
}
/* In order to be sure to not drop any packets, add the new address
@@ -1263,7 +1348,7 @@ static int i40e_set_mac(struct net_device *netdev, void *p)
i40e_del_filter(vsi, netdev->dev_addr, I40E_VLAN_ANY, false, false);
i40e_sync_vsi_filters(vsi);
- memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
+ ether_addr_copy(netdev->dev_addr, addr->sa_data);
return 0;
}
@@ -1313,7 +1398,7 @@ static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi,
vsi->tc_config.numtc = numtc;
vsi->tc_config.enabled_tc = enabled_tc ? enabled_tc : 1;
/* Number of queues per enabled TC */
- num_tc_qps = rounddown_pow_of_two(vsi->alloc_queue_pairs/numtc);
+ num_tc_qps = vsi->alloc_queue_pairs/numtc;
num_tc_qps = min_t(int, num_tc_qps, I40E_MAX_QUEUES_PER_TC);
/* Setup queue offset/count for all TCs for given VSI */
@@ -1520,8 +1605,7 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
cmd_flags = 0;
/* add to delete list */
- memcpy(del_list[num_del].mac_addr,
- f->macaddr, ETH_ALEN);
+ ether_addr_copy(del_list[num_del].mac_addr, f->macaddr);
del_list[num_del].vlan_tag =
cpu_to_le16((u16)(f->vlan ==
I40E_VLAN_ANY ? 0 : f->vlan));
@@ -1542,7 +1626,9 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
num_del = 0;
memset(del_list, 0, sizeof(*del_list));
- if (aq_ret)
+ if (aq_ret &&
+ pf->hw.aq.asq_last_status !=
+ I40E_AQ_RC_ENOENT)
dev_info(&pf->pdev->dev,
"ignoring delete macvlan error, err %d, aq_err %d while flushing a full buffer\n",
aq_ret,
@@ -1554,7 +1640,8 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
del_list, num_del, NULL);
num_del = 0;
- if (aq_ret)
+ if (aq_ret &&
+ pf->hw.aq.asq_last_status != I40E_AQ_RC_ENOENT)
dev_info(&pf->pdev->dev,
"ignoring delete macvlan error, err %d, aq_err %d\n",
aq_ret, pf->hw.aq.asq_last_status);
@@ -1583,8 +1670,7 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
cmd_flags = 0;
/* add to add array */
- memcpy(add_list[num_add].mac_addr,
- f->macaddr, ETH_ALEN);
+ ether_addr_copy(add_list[num_add].mac_addr, f->macaddr);
add_list[num_add].vlan_tag =
cpu_to_le16(
(u16)(f->vlan == I40E_VLAN_ANY ? 0 : f->vlan));
@@ -1681,7 +1767,7 @@ static void i40e_sync_filters_subtask(struct i40e_pf *pf)
return;
pf->flags &= ~I40E_FLAG_FILTER_SYNC;
- for (v = 0; v < pf->hw.func_caps.num_vsis; v++) {
+ for (v = 0; v < pf->num_alloc_vsi; v++) {
if (pf->vsi[v] &&
(pf->vsi[v]->flags & I40E_VSI_FLAG_FILTER_CHANGED))
i40e_sync_vsi_filters(pf->vsi[v]);
@@ -1698,7 +1784,7 @@ static void i40e_sync_filters_subtask(struct i40e_pf *pf)
static int i40e_change_mtu(struct net_device *netdev, int new_mtu)
{
struct i40e_netdev_priv *np = netdev_priv(netdev);
- int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
+ int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
struct i40e_vsi *vsi = np->vsi;
/* MTU < 68 is an error and causes problems on some kernels */
@@ -2312,6 +2398,8 @@ static int i40e_configure_rx_ring(struct i40e_ring *ring)
rx_ctx.crcstrip = 1;
rx_ctx.l2tsel = 1;
rx_ctx.showiv = 1;
+ /* set the prefena field to 1 because the manual says to */
+ rx_ctx.prefena = 1;
/* clear the context in the HMC */
err = i40e_clear_lan_rx_queue_context(hw, pf_q);
@@ -2413,6 +2501,7 @@ static int i40e_vsi_configure_rx(struct i40e_vsi *vsi)
**/
static void i40e_vsi_config_dcb_rings(struct i40e_vsi *vsi)
{
+ struct i40e_ring *tx_ring, *rx_ring;
u16 qoffset, qcount;
int i, n;
@@ -2426,8 +2515,8 @@ static void i40e_vsi_config_dcb_rings(struct i40e_vsi *vsi)
qoffset = vsi->tc_config.tc_info[n].qoffset;
qcount = vsi->tc_config.tc_info[n].qcount;
for (i = qoffset; i < (qoffset + qcount); i++) {
- struct i40e_ring *rx_ring = vsi->rx_rings[i];
- struct i40e_ring *tx_ring = vsi->tx_rings[i];
+ rx_ring = vsi->rx_rings[i];
+ tx_ring = vsi->tx_rings[i];
rx_ring->dcb_tc = n;
tx_ring->dcb_tc = n;
}
@@ -2565,7 +2654,6 @@ static void i40e_enable_misc_int_causes(struct i40e_hw *hw)
I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK |
I40E_PFINT_ICR0_ENA_GPIO_MASK |
I40E_PFINT_ICR0_ENA_TIMESYNC_MASK |
- I40E_PFINT_ICR0_ENA_STORM_DETECT_MASK |
I40E_PFINT_ICR0_ENA_HMC_ERR_MASK |
I40E_PFINT_ICR0_ENA_VFLR_MASK |
I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
@@ -2733,6 +2821,7 @@ static int i40e_vsi_request_irq_msix(struct i40e_vsi *vsi, char *basename)
&q_vector->affinity_mask);
}
+ vsi->irqs_ready = true;
return 0;
free_queue_irqs:
@@ -3152,6 +3241,12 @@ static int i40e_vsi_control_tx(struct i40e_vsi *vsi, bool enable)
pf_q = vsi->base_queue;
for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) {
+
+ /* warn the TX unit of coming changes */
+ i40e_pre_tx_queue_cfg(&pf->hw, pf_q, enable);
+ if (!enable)
+ udelay(10);
+
for (j = 0; j < 50; j++) {
tx_reg = rd32(hw, I40E_QTX_ENA(pf_q));
if (((tx_reg >> I40E_QTX_ENA_QENA_REQ_SHIFT) & 1) ==
@@ -3160,9 +3255,7 @@ static int i40e_vsi_control_tx(struct i40e_vsi *vsi, bool enable)
usleep_range(1000, 2000);
}
/* Skip if the queue is already in the requested state */
- if (enable && (tx_reg & I40E_QTX_ENA_QENA_STAT_MASK))
- continue;
- if (!enable && !(tx_reg & I40E_QTX_ENA_QENA_STAT_MASK))
+ if (enable == !!(tx_reg & I40E_QTX_ENA_QENA_STAT_MASK))
continue;
/* turn on/off the queue */
@@ -3178,13 +3271,8 @@ static int i40e_vsi_control_tx(struct i40e_vsi *vsi, bool enable)
/* wait for the change to finish */
for (j = 0; j < 10; j++) {
tx_reg = rd32(hw, I40E_QTX_ENA(pf_q));
- if (enable) {
- if ((tx_reg & I40E_QTX_ENA_QENA_STAT_MASK))
- break;
- } else {
- if (!(tx_reg & I40E_QTX_ENA_QENA_STAT_MASK))
- break;
- }
+ if (enable == !!(tx_reg & I40E_QTX_ENA_QENA_STAT_MASK))
+ break;
udelay(10);
}
@@ -3223,15 +3311,9 @@ static int i40e_vsi_control_rx(struct i40e_vsi *vsi, bool enable)
usleep_range(1000, 2000);
}
- if (enable) {
- /* is STAT set ? */
- if ((rx_reg & I40E_QRX_ENA_QENA_STAT_MASK))
- continue;
- } else {
- /* is !STAT set ? */
- if (!(rx_reg & I40E_QRX_ENA_QENA_STAT_MASK))
- continue;
- }
+ /* Skip if the queue is already in the requested state */
+ if (enable == !!(rx_reg & I40E_QRX_ENA_QENA_STAT_MASK))
+ continue;
/* turn on/off the queue */
if (enable)
@@ -3244,13 +3326,8 @@ static int i40e_vsi_control_rx(struct i40e_vsi *vsi, bool enable)
for (j = 0; j < 10; j++) {
rx_reg = rd32(hw, I40E_QRX_ENA(pf_q));
- if (enable) {
- if ((rx_reg & I40E_QRX_ENA_QENA_STAT_MASK))
- break;
- } else {
- if (!(rx_reg & I40E_QRX_ENA_QENA_STAT_MASK))
- break;
- }
+ if (enable == !!(rx_reg & I40E_QRX_ENA_QENA_STAT_MASK))
+ break;
udelay(10);
}
@@ -3304,6 +3381,10 @@ static void i40e_vsi_free_irq(struct i40e_vsi *vsi)
if (!vsi->q_vectors)
return;
+ if (!vsi->irqs_ready)
+ return;
+
+ vsi->irqs_ready = false;
for (i = 0; i < vsi->num_q_vectors; i++) {
u16 vector = i + base;
@@ -3476,7 +3557,7 @@ static void i40e_clear_interrupt_scheme(struct i40e_pf *pf)
int i;
i40e_put_lump(pf->irq_pile, 0, I40E_PILE_VALID_BIT-1);
- for (i = 0; i < pf->hw.func_caps.num_vsis; i++)
+ for (i = 0; i < pf->num_alloc_vsi; i++)
if (pf->vsi[i])
i40e_vsi_free_q_vectors(pf->vsi[i]);
i40e_reset_interrupt_capability(pf);
@@ -3513,6 +3594,19 @@ static void i40e_napi_disable_all(struct i40e_vsi *vsi)
}
/**
+ * i40e_vsi_close - Shut down a VSI
+ * @vsi: the vsi to be quelled
+ **/
+static void i40e_vsi_close(struct i40e_vsi *vsi)
+{
+ if (!test_and_set_bit(__I40E_DOWN, &vsi->state))
+ i40e_down(vsi);
+ i40e_vsi_free_irq(vsi);
+ i40e_vsi_free_tx_resources(vsi);
+ i40e_vsi_free_rx_resources(vsi);
+}
+
+/**
* i40e_quiesce_vsi - Pause a given VSI
* @vsi: the VSI being paused
**/
@@ -3525,8 +3619,7 @@ static void i40e_quiesce_vsi(struct i40e_vsi *vsi)
if (vsi->netdev && netif_running(vsi->netdev)) {
vsi->netdev->netdev_ops->ndo_stop(vsi->netdev);
} else {
- set_bit(__I40E_DOWN, &vsi->state);
- i40e_down(vsi);
+ i40e_vsi_close(vsi);
}
}
@@ -3543,7 +3636,7 @@ static void i40e_unquiesce_vsi(struct i40e_vsi *vsi)
if (vsi->netdev && netif_running(vsi->netdev))
vsi->netdev->netdev_ops->ndo_open(vsi->netdev);
else
- i40e_up(vsi); /* this clears the DOWN bit */
+ i40e_vsi_open(vsi); /* this clears the DOWN bit */
}
/**
@@ -3554,7 +3647,7 @@ static void i40e_pf_quiesce_all_vsi(struct i40e_pf *pf)
{
int v;
- for (v = 0; v < pf->hw.func_caps.num_vsis; v++) {
+ for (v = 0; v < pf->num_alloc_vsi; v++) {
if (pf->vsi[v])
i40e_quiesce_vsi(pf->vsi[v]);
}
@@ -3568,7 +3661,7 @@ static void i40e_pf_unquiesce_all_vsi(struct i40e_pf *pf)
{
int v;
- for (v = 0; v < pf->hw.func_caps.num_vsis; v++) {
+ for (v = 0; v < pf->num_alloc_vsi; v++) {
if (pf->vsi[v])
i40e_unquiesce_vsi(pf->vsi[v]);
}
@@ -4009,7 +4102,7 @@ static void i40e_dcb_reconfigure(struct i40e_pf *pf)
}
/* Update each VSI */
- for (v = 0; v < pf->hw.func_caps.num_vsis; v++) {
+ for (v = 0; v < pf->num_alloc_vsi; v++) {
if (!pf->vsi[v])
continue;
@@ -4028,6 +4121,8 @@ static void i40e_dcb_reconfigure(struct i40e_pf *pf)
pf->vsi[v]->seid);
/* Will try to configure as many components */
} else {
+ /* Re-configure VSI vectors based on updated TC map */
+ i40e_vsi_map_rings_to_vectors(pf->vsi[v]);
if (pf->vsi[v]->netdev)
i40e_dcbnl_set_all(pf->vsi[v]);
}
@@ -4065,14 +4160,69 @@ static int i40e_init_pf_dcb(struct i40e_pf *pf)
/* When status is not DISABLED then DCBX in FW */
pf->dcbx_cap = DCB_CAP_DCBX_LLD_MANAGED |
DCB_CAP_DCBX_VER_IEEE;
- pf->flags |= I40E_FLAG_DCB_ENABLED;
+
+ pf->flags |= I40E_FLAG_DCB_CAPABLE;
+ /* Enable DCB tagging only when more than one TC */
+ if (i40e_dcb_get_num_tc(&hw->local_dcbx_config) > 1)
+ pf->flags |= I40E_FLAG_DCB_ENABLED;
}
+ } else {
+ dev_info(&pf->pdev->dev, "AQ Querying DCB configuration failed: %d\n",
+ pf->hw.aq.asq_last_status);
}
out:
return err;
}
#endif /* CONFIG_I40E_DCB */
+#define SPEED_SIZE 14
+#define FC_SIZE 8
+/**
+ * i40e_print_link_message - print link up or down
+ * @vsi: the VSI for which link needs a message
+ */
+static void i40e_print_link_message(struct i40e_vsi *vsi, bool isup)
+{
+ char speed[SPEED_SIZE] = "Unknown";
+ char fc[FC_SIZE] = "RX/TX";
+
+ if (!isup) {
+ netdev_info(vsi->netdev, "NIC Link is Down\n");
+ return;
+ }
+
+ switch (vsi->back->hw.phy.link_info.link_speed) {
+ case I40E_LINK_SPEED_40GB:
+ strncpy(speed, "40 Gbps", SPEED_SIZE);
+ break;
+ case I40E_LINK_SPEED_10GB:
+ strncpy(speed, "10 Gbps", SPEED_SIZE);
+ break;
+ case I40E_LINK_SPEED_1GB:
+ strncpy(speed, "1000 Mbps", SPEED_SIZE);
+ break;
+ default:
+ break;
+ }
+
+ switch (vsi->back->hw.fc.current_mode) {
+ case I40E_FC_FULL:
+ strncpy(fc, "RX/TX", FC_SIZE);
+ break;
+ case I40E_FC_TX_PAUSE:
+ strncpy(fc, "TX", FC_SIZE);
+ break;
+ case I40E_FC_RX_PAUSE:
+ strncpy(fc, "RX", FC_SIZE);
+ break;
+ default:
+ strncpy(fc, "None", FC_SIZE);
+ break;
+ }
+
+ netdev_info(vsi->netdev, "NIC Link is Up %s Full Duplex, Flow Control: %s\n",
+ speed, fc);
+}
/**
* i40e_up_complete - Finish the last steps of bringing up a connection
@@ -4099,11 +4249,11 @@ static int i40e_up_complete(struct i40e_vsi *vsi)
if ((pf->hw.phy.link_info.link_info & I40E_AQ_LINK_UP) &&
(vsi->netdev)) {
- netdev_info(vsi->netdev, "NIC Link is Up\n");
+ i40e_print_link_message(vsi, true);
netif_tx_start_all_queues(vsi->netdev);
netif_carrier_on(vsi->netdev);
} else if (vsi->netdev) {
- netdev_info(vsi->netdev, "NIC Link is Down\n");
+ i40e_print_link_message(vsi, false);
}
/* replay FDIR SB filters */
@@ -4309,24 +4459,32 @@ int i40e_vsi_open(struct i40e_vsi *vsi)
if (err)
goto err_setup_rx;
- if (!vsi->netdev) {
- err = EINVAL;
- goto err_setup_rx;
- }
- snprintf(int_name, sizeof(int_name) - 1, "%s-%s",
- dev_driver_string(&pf->pdev->dev), vsi->netdev->name);
- err = i40e_vsi_request_irq(vsi, int_name);
- if (err)
- goto err_setup_rx;
+ if (vsi->netdev) {
+ snprintf(int_name, sizeof(int_name) - 1, "%s-%s",
+ dev_driver_string(&pf->pdev->dev), vsi->netdev->name);
+ err = i40e_vsi_request_irq(vsi, int_name);
+ if (err)
+ goto err_setup_rx;
- /* Notify the stack of the actual queue counts. */
- err = netif_set_real_num_tx_queues(vsi->netdev, vsi->num_queue_pairs);
- if (err)
- goto err_set_queues;
+ /* Notify the stack of the actual queue counts. */
+ err = netif_set_real_num_tx_queues(vsi->netdev,
+ vsi->num_queue_pairs);
+ if (err)
+ goto err_set_queues;
- err = netif_set_real_num_rx_queues(vsi->netdev, vsi->num_queue_pairs);
- if (err)
- goto err_set_queues;
+ err = netif_set_real_num_rx_queues(vsi->netdev,
+ vsi->num_queue_pairs);
+ if (err)
+ goto err_set_queues;
+
+ } else if (vsi->type == I40E_VSI_FDIR) {
+ snprintf(int_name, sizeof(int_name) - 1, "%s-fdir",
+ dev_driver_string(&pf->pdev->dev));
+ err = i40e_vsi_request_irq(vsi, int_name);
+ } else {
+ err = -EINVAL;
+ goto err_setup_rx;
+ }
err = i40e_up_complete(vsi);
if (err)
@@ -4383,14 +4541,7 @@ static int i40e_close(struct net_device *netdev)
struct i40e_netdev_priv *np = netdev_priv(netdev);
struct i40e_vsi *vsi = np->vsi;
- if (test_and_set_bit(__I40E_DOWN, &vsi->state))
- return 0;
-
- i40e_down(vsi);
- i40e_vsi_free_irq(vsi);
-
- i40e_vsi_free_tx_resources(vsi);
- i40e_vsi_free_rx_resources(vsi);
+ i40e_vsi_close(vsi);
return 0;
}
@@ -4410,6 +4561,9 @@ void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags)
WARN_ON(in_interrupt());
+ if (i40e_check_asq_alive(&pf->hw))
+ i40e_vc_notify_reset(pf);
+
/* do the biggest reset indicated */
if (reset_flags & (1 << __I40E_GLOBAL_RESET_REQUESTED)) {
@@ -4475,7 +4629,7 @@ void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags)
/* Find the VSI(s) that requested a re-init */
dev_info(&pf->pdev->dev,
"VSI reinit requested\n");
- for (v = 0; v < pf->hw.func_caps.num_vsis; v++) {
+ for (v = 0; v < pf->num_alloc_vsi; v++) {
struct i40e_vsi *vsi = pf->vsi[v];
if (vsi != NULL &&
test_bit(__I40E_REINIT_REQUESTED, &vsi->state)) {
@@ -4565,6 +4719,10 @@ static int i40e_handle_lldp_event(struct i40e_pf *pf,
int ret = 0;
u8 type;
+ /* Not DCB capable or capability disabled */
+ if (!(pf->flags & I40E_FLAG_DCB_CAPABLE))
+ return ret;
+
/* Ignore if event is not for Nearest Bridge */
type = ((mib->type >> I40E_AQ_LLDP_BRIDGE_TYPE_SHIFT)
& I40E_AQ_LLDP_BRIDGE_TYPE_MASK);
@@ -4606,6 +4764,12 @@ static int i40e_handle_lldp_event(struct i40e_pf *pf,
if (!need_reconfig)
goto exit;
+ /* Enable DCB tagging only when more than one TC */
+ if (i40e_dcb_get_num_tc(dcbx_cfg) > 1)
+ pf->flags |= I40E_FLAG_DCB_ENABLED;
+ else
+ pf->flags &= ~I40E_FLAG_DCB_ENABLED;
+
/* Reconfiguration needed quiesce all VSIs */
i40e_pf_quiesce_all_vsi(pf);
@@ -4709,8 +4873,7 @@ void i40e_fdir_check_and_reenable(struct i40e_pf *pf)
(pf->flags & I40E_FLAG_FD_SB_ENABLED))
return;
fcnt_prog = i40e_get_current_fd_count(pf);
- fcnt_avail = pf->hw.fdir_shared_filter_count +
- pf->fdir_pf_filter_count;
+ fcnt_avail = i40e_get_fd_cnt_all(pf);
if (fcnt_prog < (fcnt_avail - I40E_FDIR_BUFFER_HEAD_ROOM)) {
if ((pf->flags & I40E_FLAG_FD_SB_ENABLED) &&
(pf->auto_disable_flags & I40E_FLAG_FD_SB_ENABLED)) {
@@ -4803,7 +4966,7 @@ static void i40e_veb_link_event(struct i40e_veb *veb, bool link_up)
i40e_veb_link_event(pf->veb[i], link_up);
/* ... now the local VSIs */
- for (i = 0; i < pf->hw.func_caps.num_vsis; i++)
+ for (i = 0; i < pf->num_alloc_vsi; i++)
if (pf->vsi[i] && (pf->vsi[i]->uplink_seid == veb->seid))
i40e_vsi_link_event(pf->vsi[i], link_up);
}
@@ -4821,10 +4984,8 @@ static void i40e_link_event(struct i40e_pf *pf)
if (new_link == old_link)
return;
-
if (!test_bit(__I40E_DOWN, &pf->vsi[pf->lan_vsi]->state))
- netdev_info(pf->vsi[pf->lan_vsi]->netdev,
- "NIC Link is %s\n", (new_link ? "Up" : "Down"));
+ i40e_print_link_message(pf->vsi[pf->lan_vsi], new_link);
/* Notify the base of the switch tree connected to
* the link. Floating VEBs are not notified.
@@ -4862,7 +5023,7 @@ static void i40e_check_hang_subtask(struct i40e_pf *pf)
* for each q_vector
* force an interrupt
*/
- for (v = 0; v < pf->hw.func_caps.num_vsis; v++) {
+ for (v = 0; v < pf->num_alloc_vsi; v++) {
struct i40e_vsi *vsi = pf->vsi[v];
int armed = 0;
@@ -4912,7 +5073,7 @@ static void i40e_watchdog_subtask(struct i40e_pf *pf)
/* Update the stats for active netdevs so the network stack
* can look at updated numbers whenever it cares to
*/
- for (i = 0; i < pf->hw.func_caps.num_vsis; i++)
+ for (i = 0; i < pf->num_alloc_vsi; i++)
if (pf->vsi[i] && pf->vsi[i]->netdev)
i40e_update_stats(pf->vsi[i]);
@@ -5018,11 +5179,47 @@ static void i40e_clean_adminq_subtask(struct i40e_pf *pf)
u16 pending, i = 0;
i40e_status ret;
u16 opcode;
+ u32 oldval;
u32 val;
if (!test_bit(__I40E_ADMINQ_EVENT_PENDING, &pf->state))
return;
+ /* check for error indications */
+ val = rd32(&pf->hw, pf->hw.aq.arq.len);
+ oldval = val;
+ if (val & I40E_PF_ARQLEN_ARQVFE_MASK) {
+ dev_info(&pf->pdev->dev, "ARQ VF Error detected\n");
+ val &= ~I40E_PF_ARQLEN_ARQVFE_MASK;
+ }
+ if (val & I40E_PF_ARQLEN_ARQOVFL_MASK) {
+ dev_info(&pf->pdev->dev, "ARQ Overflow Error detected\n");
+ val &= ~I40E_PF_ARQLEN_ARQOVFL_MASK;
+ }
+ if (val & I40E_PF_ARQLEN_ARQCRIT_MASK) {
+ dev_info(&pf->pdev->dev, "ARQ Critical Error detected\n");
+ val &= ~I40E_PF_ARQLEN_ARQCRIT_MASK;
+ }
+ if (oldval != val)
+ wr32(&pf->hw, pf->hw.aq.arq.len, val);
+
+ val = rd32(&pf->hw, pf->hw.aq.asq.len);
+ oldval = val;
+ if (val & I40E_PF_ATQLEN_ATQVFE_MASK) {
+ dev_info(&pf->pdev->dev, "ASQ VF Error detected\n");
+ val &= ~I40E_PF_ATQLEN_ATQVFE_MASK;
+ }
+ if (val & I40E_PF_ATQLEN_ATQOVFL_MASK) {
+ dev_info(&pf->pdev->dev, "ASQ Overflow Error detected\n");
+ val &= ~I40E_PF_ATQLEN_ATQOVFL_MASK;
+ }
+ if (val & I40E_PF_ATQLEN_ATQCRIT_MASK) {
+ dev_info(&pf->pdev->dev, "ASQ Critical Error detected\n");
+ val &= ~I40E_PF_ATQLEN_ATQCRIT_MASK;
+ }
+ if (oldval != val)
+ wr32(&pf->hw, pf->hw.aq.asq.len, val);
+
event.msg_size = I40E_MAX_AQ_BUF_SIZE;
event.msg_buf = kzalloc(event.msg_size, GFP_KERNEL);
if (!event.msg_buf)
@@ -5128,7 +5325,7 @@ static int i40e_reconstitute_veb(struct i40e_veb *veb)
int ret;
/* build VSI that owns this VEB, temporarily attached to base VEB */
- for (v = 0; v < pf->hw.func_caps.num_vsis && !ctl_vsi; v++) {
+ for (v = 0; v < pf->num_alloc_vsi && !ctl_vsi; v++) {
if (pf->vsi[v] &&
pf->vsi[v]->veb_idx == veb->idx &&
pf->vsi[v]->flags & I40E_VSI_FLAG_VEB_OWNER) {
@@ -5158,7 +5355,7 @@ static int i40e_reconstitute_veb(struct i40e_veb *veb)
goto end_reconstitute;
/* create the remaining VSIs attached to this VEB */
- for (v = 0; v < pf->hw.func_caps.num_vsis; v++) {
+ for (v = 0; v < pf->num_alloc_vsi; v++) {
if (!pf->vsi[v] || pf->vsi[v] == ctl_vsi)
continue;
@@ -5226,9 +5423,6 @@ static int i40e_get_capabilities(struct i40e_pf *pf)
}
} while (err);
- /* increment MSI-X count because current FW skips one */
- pf->hw.func_caps.num_msix_vectors++;
-
if (((pf->hw.aq.fw_maj_ver == 2) && (pf->hw.aq.fw_min_ver < 22)) ||
(pf->hw.aq.fw_maj_ver < 2)) {
pf->hw.func_caps.num_msix_vectors++;
@@ -5267,15 +5461,14 @@ static int i40e_vsi_clear(struct i40e_vsi *vsi);
static void i40e_fdir_sb_setup(struct i40e_pf *pf)
{
struct i40e_vsi *vsi;
- bool new_vsi = false;
- int err, i;
+ int i;
if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED))
return;
/* find existing VSI and see if it needs configuring */
vsi = NULL;
- for (i = 0; i < pf->hw.func_caps.num_vsis; i++) {
+ for (i = 0; i < pf->num_alloc_vsi; i++) {
if (pf->vsi[i] && pf->vsi[i]->type == I40E_VSI_FDIR) {
vsi = pf->vsi[i];
break;
@@ -5288,47 +5481,12 @@ static void i40e_fdir_sb_setup(struct i40e_pf *pf)
pf->vsi[pf->lan_vsi]->seid, 0);
if (!vsi) {
dev_info(&pf->pdev->dev, "Couldn't create FDir VSI\n");
- goto err_vsi;
+ pf->flags &= ~I40E_FLAG_FD_SB_ENABLED;
+ return;
}
- new_vsi = true;
- }
- i40e_vsi_setup_irqhandler(vsi, i40e_fdir_clean_ring);
-
- err = i40e_vsi_setup_tx_resources(vsi);
- if (err)
- goto err_setup_tx;
- err = i40e_vsi_setup_rx_resources(vsi);
- if (err)
- goto err_setup_rx;
-
- if (new_vsi) {
- char int_name[IFNAMSIZ + 9];
- err = i40e_vsi_configure(vsi);
- if (err)
- goto err_setup_rx;
- snprintf(int_name, sizeof(int_name) - 1, "%s-fdir",
- dev_driver_string(&pf->pdev->dev));
- err = i40e_vsi_request_irq(vsi, int_name);
- if (err)
- goto err_setup_rx;
- err = i40e_up_complete(vsi);
- if (err)
- goto err_up_complete;
- clear_bit(__I40E_NEEDS_RESTART, &vsi->state);
}
- return;
-
-err_up_complete:
- i40e_down(vsi);
- i40e_vsi_free_irq(vsi);
-err_setup_rx:
- i40e_vsi_free_rx_resources(vsi);
-err_setup_tx:
- i40e_vsi_free_tx_resources(vsi);
-err_vsi:
- pf->flags &= ~I40E_FLAG_FD_SB_ENABLED;
- i40e_vsi_clear(vsi);
+ i40e_vsi_setup_irqhandler(vsi, i40e_fdir_clean_ring);
}
/**
@@ -5340,7 +5498,7 @@ static void i40e_fdir_teardown(struct i40e_pf *pf)
int i;
i40e_fdir_filter_exit(pf);
- for (i = 0; i < pf->hw.func_caps.num_vsis; i++) {
+ for (i = 0; i < pf->num_alloc_vsi; i++) {
if (pf->vsi[i] && pf->vsi[i]->type == I40E_VSI_FDIR) {
i40e_vsi_release(pf->vsi[i]);
break;
@@ -5357,7 +5515,7 @@ static void i40e_fdir_teardown(struct i40e_pf *pf)
static int i40e_prep_for_reset(struct i40e_pf *pf)
{
struct i40e_hw *hw = &pf->hw;
- i40e_status ret;
+ i40e_status ret = 0;
u32 v;
clear_bit(__I40E_RESET_INTR_RECEIVED, &pf->state);
@@ -5366,13 +5524,10 @@ static int i40e_prep_for_reset(struct i40e_pf *pf)
dev_dbg(&pf->pdev->dev, "Tearing down internal switch for reset\n");
- if (i40e_check_asq_alive(hw))
- i40e_vc_notify_reset(pf);
-
/* quiesce the VSIs and their queues that are not already DOWN */
i40e_pf_quiesce_all_vsi(pf);
- for (v = 0; v < pf->hw.func_caps.num_vsis; v++) {
+ for (v = 0; v < pf->num_alloc_vsi; v++) {
if (pf->vsi[v])
pf->vsi[v]->seid = 0;
}
@@ -5380,22 +5535,40 @@ static int i40e_prep_for_reset(struct i40e_pf *pf)
i40e_shutdown_adminq(&pf->hw);
/* call shutdown HMC */
- ret = i40e_shutdown_lan_hmc(hw);
- if (ret) {
- dev_info(&pf->pdev->dev, "shutdown_lan_hmc failed: %d\n", ret);
- clear_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state);
+ if (hw->hmc.hmc_obj) {
+ ret = i40e_shutdown_lan_hmc(hw);
+ if (ret) {
+ dev_warn(&pf->pdev->dev,
+ "shutdown_lan_hmc failed: %d\n", ret);
+ clear_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state);
+ }
}
return ret;
}
/**
+ * i40e_send_version - update firmware with driver version
+ * @pf: PF struct
+ */
+static void i40e_send_version(struct i40e_pf *pf)
+{
+ struct i40e_driver_version dv;
+
+ dv.major_version = DRV_VERSION_MAJOR;
+ dv.minor_version = DRV_VERSION_MINOR;
+ dv.build_version = DRV_VERSION_BUILD;
+ dv.subbuild_version = 0;
+ strncpy(dv.driver_string, DRV_VERSION, sizeof(dv.driver_string));
+ i40e_aq_send_driver_version(&pf->hw, &dv, NULL);
+}
+
+/**
* i40e_reset_and_rebuild - reset and rebuild using a saved config
* @pf: board private structure
* @reinit: if the Main VSI needs to re-initialized.
**/
static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit)
{
- struct i40e_driver_version dv;
struct i40e_hw *hw = &pf->hw;
i40e_status ret;
u32 v;
@@ -5405,8 +5578,10 @@ static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit)
* because the reset will make them disappear.
*/
ret = i40e_pf_reset(hw);
- if (ret)
+ if (ret) {
dev_info(&pf->pdev->dev, "PF reset failed, %d\n", ret);
+ goto end_core_reset;
+ }
pf->pfr_count++;
if (test_bit(__I40E_DOWN, &pf->state))
@@ -5426,6 +5601,7 @@ static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit)
i40e_verify_eeprom(pf);
}
+ i40e_clear_pxe_mode(hw);
ret = i40e_get_capabilities(pf);
if (ret) {
dev_info(&pf->pdev->dev, "i40e_get_capabilities failed, %d\n",
@@ -5526,13 +5702,7 @@ static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit)
}
/* tell the firmware that we're starting */
- dv.major_version = DRV_VERSION_MAJOR;
- dv.minor_version = DRV_VERSION_MINOR;
- dv.build_version = DRV_VERSION_BUILD;
- dv.subbuild_version = 0;
- i40e_aq_send_driver_version(&pf->hw, &dv, NULL);
-
- dev_info(&pf->pdev->dev, "reset complete\n");
+ i40e_send_version(pf);
end_core_reset:
clear_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state);
@@ -5642,7 +5812,6 @@ static void i40e_handle_mdd_event(struct i40e_pf *pf)
**/
static void i40e_sync_vxlan_filters_subtask(struct i40e_pf *pf)
{
- const int vxlan_hdr_qwords = 4;
struct i40e_hw *hw = &pf->hw;
i40e_status ret;
u8 filter_index;
@@ -5660,7 +5829,6 @@ static void i40e_sync_vxlan_filters_subtask(struct i40e_pf *pf)
port = pf->vxlan_ports[i];
ret = port ?
i40e_aq_add_udp_tunnel(hw, ntohs(port),
- vxlan_hdr_qwords,
I40E_AQC_TUNNEL_TYPE_VXLAN,
&filter_index, NULL)
: i40e_aq_del_udp_tunnel(hw, i, NULL);
@@ -5839,15 +6007,15 @@ static int i40e_vsi_mem_alloc(struct i40e_pf *pf, enum i40e_vsi_type type)
* find next empty vsi slot, looping back around if necessary
*/
i = pf->next_vsi;
- while (i < pf->hw.func_caps.num_vsis && pf->vsi[i])
+ while (i < pf->num_alloc_vsi && pf->vsi[i])
i++;
- if (i >= pf->hw.func_caps.num_vsis) {
+ if (i >= pf->num_alloc_vsi) {
i = 0;
while (i < pf->next_vsi && pf->vsi[i])
i++;
}
- if (i < pf->hw.func_caps.num_vsis && !pf->vsi[i]) {
+ if (i < pf->num_alloc_vsi && !pf->vsi[i]) {
vsi_idx = i; /* Found one! */
} else {
ret = -ENODEV;
@@ -5870,6 +6038,7 @@ static int i40e_vsi_mem_alloc(struct i40e_pf *pf, enum i40e_vsi_type type)
vsi->netdev_registered = false;
vsi->work_limit = I40E_DEFAULT_IRQ_WORK;
INIT_LIST_HEAD(&vsi->mac_filter_list);
+ vsi->irqs_ready = false;
ret = i40e_set_num_rings_in_vsi(vsi);
if (ret)
@@ -5987,14 +6156,12 @@ static void i40e_vsi_clear_rings(struct i40e_vsi *vsi)
**/
static int i40e_alloc_rings(struct i40e_vsi *vsi)
{
+ struct i40e_ring *tx_ring, *rx_ring;
struct i40e_pf *pf = vsi->back;
int i;
/* Set basic values in the rings to be used later during open() */
for (i = 0; i < vsi->alloc_queue_pairs; i++) {
- struct i40e_ring *tx_ring;
- struct i40e_ring *rx_ring;
-
/* allocate space for both Tx and Rx in one shot */
tx_ring = kzalloc(sizeof(struct i40e_ring) * 2, GFP_KERNEL);
if (!tx_ring)
@@ -6052,8 +6219,6 @@ static int i40e_reserve_msix_vectors(struct i40e_pf *pf, int vectors)
vectors = 0;
}
- pf->num_msix_entries = vectors;
-
return vectors;
}
@@ -6107,6 +6272,16 @@ static int i40e_init_msix(struct i40e_pf *pf)
for (i = 0; i < v_budget; i++)
pf->msix_entries[i].entry = i;
vec = i40e_reserve_msix_vectors(pf, v_budget);
+
+ if (vec != v_budget) {
+ /* If we have limited resources, we will start with no vectors
+ * for the special features and then allocate vectors to some
+ * of these features based on the policy and at the end disable
+ * the features that did not get any vectors.
+ */
+ pf->num_vmdq_msix = 0;
+ }
+
if (vec < I40E_MIN_MSIX) {
pf->flags &= ~I40E_FLAG_MSIX_ENABLED;
kfree(pf->msix_entries);
@@ -6115,27 +6290,25 @@ static int i40e_init_msix(struct i40e_pf *pf)
} else if (vec == I40E_MIN_MSIX) {
/* Adjust for minimal MSIX use */
- dev_info(&pf->pdev->dev, "Features disabled, not enough MSI-X vectors\n");
- pf->flags &= ~I40E_FLAG_VMDQ_ENABLED;
pf->num_vmdq_vsis = 0;
pf->num_vmdq_qps = 0;
- pf->num_vmdq_msix = 0;
pf->num_lan_qps = 1;
pf->num_lan_msix = 1;
} else if (vec != v_budget) {
+ /* reserve the misc vector */
+ vec--;
+
/* Scale vector usage down */
pf->num_vmdq_msix = 1; /* force VMDqs to only one vector */
- vec--; /* reserve the misc vector */
+ pf->num_vmdq_vsis = 1;
/* partition out the remaining vectors */
switch (vec) {
case 2:
- pf->num_vmdq_vsis = 1;
pf->num_lan_msix = 1;
break;
case 3:
- pf->num_vmdq_vsis = 1;
pf->num_lan_msix = 2;
break;
default:
@@ -6147,6 +6320,11 @@ static int i40e_init_msix(struct i40e_pf *pf)
}
}
+ if ((pf->flags & I40E_FLAG_VMDQ_ENABLED) &&
+ (pf->num_vmdq_msix == 0)) {
+ dev_info(&pf->pdev->dev, "VMDq disabled, not enough MSI-X vectors\n");
+ pf->flags &= ~I40E_FLAG_VMDQ_ENABLED;
+ }
return err;
}
@@ -6171,7 +6349,7 @@ static int i40e_vsi_alloc_q_vector(struct i40e_vsi *vsi, int v_idx)
cpumask_set_cpu(v_idx, &q_vector->affinity_mask);
if (vsi->netdev)
netif_napi_add(vsi->netdev, &q_vector->napi,
- i40e_napi_poll, vsi->work_limit);
+ i40e_napi_poll, NAPI_POLL_WEIGHT);
q_vector->rx.latency_range = I40E_LOW_LATENCY;
q_vector->tx.latency_range = I40E_LOW_LATENCY;
@@ -6231,7 +6409,7 @@ static void i40e_init_interrupt_scheme(struct i40e_pf *pf)
if (err) {
pf->flags &= ~(I40E_FLAG_MSIX_ENABLED |
I40E_FLAG_RSS_ENABLED |
- I40E_FLAG_DCB_ENABLED |
+ I40E_FLAG_DCB_CAPABLE |
I40E_FLAG_SRIOV_ENABLED |
I40E_FLAG_FD_SB_ENABLED |
I40E_FLAG_FD_ATR_ENABLED |
@@ -6364,7 +6542,6 @@ int i40e_reconfig_rss_queues(struct i40e_pf *pf, int queue_count)
return 0;
queue_count = min_t(int, queue_count, pf->rss_size_max);
- queue_count = rounddown_pow_of_two(queue_count);
if (queue_count != pf->rss_size) {
i40e_prep_for_reset(pf);
@@ -6407,6 +6584,10 @@ static int i40e_sw_init(struct i40e_pf *pf)
I40E_FLAG_MSIX_ENABLED |
I40E_FLAG_RX_1BUF_ENABLED;
+ /* Set default ITR */
+ pf->rx_itr_default = I40E_ITR_DYNAMIC | I40E_ITR_RX_DEF;
+ pf->tx_itr_default = I40E_ITR_DYNAMIC | I40E_ITR_TX_DEF;
+
/* Depending on PF configurations, it is possible that the RSS
* maximum might end up larger than the available queues
*/
@@ -6416,7 +6597,6 @@ static int i40e_sw_init(struct i40e_pf *pf)
if (pf->hw.func_caps.rss) {
pf->flags |= I40E_FLAG_RSS_ENABLED;
pf->rss_size = min_t(int, pf->rss_size_max, num_online_cpus());
- pf->rss_size = rounddown_pow_of_two(pf->rss_size);
} else {
pf->rss_size = 1;
}
@@ -6432,8 +6612,12 @@ static int i40e_sw_init(struct i40e_pf *pf)
(pf->hw.func_caps.fd_filters_best_effort > 0)) {
pf->flags |= I40E_FLAG_FD_ATR_ENABLED;
pf->atr_sample_rate = I40E_DEFAULT_ATR_SAMPLE_RATE;
+ /* Setup a counter for fd_atr per pf */
+ pf->fd_atr_cnt_idx = I40E_FD_ATR_STAT_IDX(pf->hw.pf_id);
if (!(pf->flags & I40E_FLAG_MFP_ENABLED)) {
pf->flags |= I40E_FLAG_FD_SB_ENABLED;
+ /* Setup a counter for fd_sb per pf */
+ pf->fd_sb_cnt_idx = I40E_FD_SB_STAT_IDX(pf->hw.pf_id);
} else {
dev_info(&pf->pdev->dev,
"Flow Director Sideband mode Disabled in MFP mode\n");
@@ -6649,6 +6833,96 @@ static void i40e_del_vxlan_port(struct net_device *netdev,
}
#endif
+#ifdef HAVE_FDB_OPS
+#ifdef USE_CONST_DEV_UC_CHAR
+static int i40e_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
+ struct net_device *dev,
+ const unsigned char *addr,
+ u16 flags)
+#else
+static int i40e_ndo_fdb_add(struct ndmsg *ndm,
+ struct net_device *dev,
+ unsigned char *addr,
+ u16 flags)
+#endif
+{
+ struct i40e_netdev_priv *np = netdev_priv(dev);
+ struct i40e_pf *pf = np->vsi->back;
+ int err = 0;
+
+ if (!(pf->flags & I40E_FLAG_SRIOV_ENABLED))
+ return -EOPNOTSUPP;
+
+ /* Hardware does not support aging addresses so if a
+ * ndm_state is given only allow permanent addresses
+ */
+ if (ndm->ndm_state && !(ndm->ndm_state & NUD_PERMANENT)) {
+ netdev_info(dev, "FDB only supports static addresses\n");
+ return -EINVAL;
+ }
+
+ if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr))
+ err = dev_uc_add_excl(dev, addr);
+ else if (is_multicast_ether_addr(addr))
+ err = dev_mc_add_excl(dev, addr);
+ else
+ err = -EINVAL;
+
+ /* Only return duplicate errors if NLM_F_EXCL is set */
+ if (err == -EEXIST && !(flags & NLM_F_EXCL))
+ err = 0;
+
+ return err;
+}
+
+#ifndef USE_DEFAULT_FDB_DEL_DUMP
+#ifdef USE_CONST_DEV_UC_CHAR
+static int i40e_ndo_fdb_del(struct ndmsg *ndm,
+ struct net_device *dev,
+ const unsigned char *addr)
+#else
+static int i40e_ndo_fdb_del(struct ndmsg *ndm,
+ struct net_device *dev,
+ unsigned char *addr)
+#endif
+{
+ struct i40e_netdev_priv *np = netdev_priv(dev);
+ struct i40e_pf *pf = np->vsi->back;
+ int err = -EOPNOTSUPP;
+
+ if (ndm->ndm_state & NUD_PERMANENT) {
+ netdev_info(dev, "FDB only supports static addresses\n");
+ return -EINVAL;
+ }
+
+ if (pf->flags & I40E_FLAG_SRIOV_ENABLED) {
+ if (is_unicast_ether_addr(addr))
+ err = dev_uc_del(dev, addr);
+ else if (is_multicast_ether_addr(addr))
+ err = dev_mc_del(dev, addr);
+ else
+ err = -EINVAL;
+ }
+
+ return err;
+}
+
+static int i40e_ndo_fdb_dump(struct sk_buff *skb,
+ struct netlink_callback *cb,
+ struct net_device *dev,
+ int idx)
+{
+ struct i40e_netdev_priv *np = netdev_priv(dev);
+ struct i40e_pf *pf = np->vsi->back;
+
+ if (pf->flags & I40E_FLAG_SRIOV_ENABLED)
+ idx = ndo_dflt_fdb_dump(skb, cb, dev, idx);
+
+ return idx;
+}
+
+#endif /* USE_DEFAULT_FDB_DEL_DUMP */
+#endif /* HAVE_FDB_OPS */
static const struct net_device_ops i40e_netdev_ops = {
.ndo_open = i40e_open,
.ndo_stop = i40e_close,
@@ -6669,13 +6943,21 @@ static const struct net_device_ops i40e_netdev_ops = {
.ndo_set_features = i40e_set_features,
.ndo_set_vf_mac = i40e_ndo_set_vf_mac,
.ndo_set_vf_vlan = i40e_ndo_set_vf_port_vlan,
- .ndo_set_vf_tx_rate = i40e_ndo_set_vf_bw,
+ .ndo_set_vf_rate = i40e_ndo_set_vf_bw,
.ndo_get_vf_config = i40e_ndo_get_vf_config,
.ndo_set_vf_link_state = i40e_ndo_set_vf_link_state,
+ .ndo_set_vf_spoofchk = i40e_ndo_set_vf_spoofck,
#ifdef CONFIG_I40E_VXLAN
.ndo_add_vxlan_port = i40e_add_vxlan_port,
.ndo_del_vxlan_port = i40e_del_vxlan_port,
#endif
+#ifdef HAVE_FDB_OPS
+ .ndo_fdb_add = i40e_ndo_fdb_add,
+#ifndef USE_DEFAULT_FDB_DEL_DUMP
+ .ndo_fdb_del = i40e_ndo_fdb_del,
+ .ndo_fdb_dump = i40e_ndo_fdb_dump,
+#endif
+#endif
};
/**
@@ -6720,16 +7002,26 @@ static int i40e_config_netdev(struct i40e_vsi *vsi)
NETIF_F_TSO_ECN |
NETIF_F_TSO6 |
NETIF_F_RXCSUM |
- NETIF_F_NTUPLE |
NETIF_F_RXHASH |
0;
+ if (!(pf->flags & I40E_FLAG_MFP_ENABLED))
+ netdev->features |= NETIF_F_NTUPLE;
+
/* copy netdev features into list of user selectable features */
netdev->hw_features |= netdev->features;
if (vsi->type == I40E_VSI_MAIN) {
SET_NETDEV_DEV(netdev, &pf->pdev->dev);
- memcpy(mac_addr, hw->mac.perm_addr, ETH_ALEN);
+ ether_addr_copy(mac_addr, hw->mac.perm_addr);
+ /* The following two steps are necessary to prevent reception
+ * of tagged packets - by default the NVM loads a MAC-VLAN
+ * filter that will accept any tagged packet. This is to
+ * prevent that during normal operations until a specific
+ * VLAN tag filter has been set.
+ */
+ i40e_rm_default_mac_filter(vsi, mac_addr);
+ i40e_add_filter(vsi, mac_addr, I40E_VLAN_ANY, false, true);
} else {
/* relate the VSI_VMDQ name to the VSI_MAIN name */
snprintf(netdev->name, IFNAMSIZ, "%sv%%d",
@@ -6739,8 +7031,8 @@ static int i40e_config_netdev(struct i40e_vsi *vsi)
}
i40e_add_filter(vsi, brdcast, I40E_VLAN_ANY, false, false);
- memcpy(netdev->dev_addr, mac_addr, ETH_ALEN);
- memcpy(netdev->perm_addr, mac_addr, ETH_ALEN);
+ ether_addr_copy(netdev->dev_addr, mac_addr);
+ ether_addr_copy(netdev->perm_addr, mac_addr);
/* vlan gets same features (except vlan offload)
* after any tweaks for specific VSI types
*/
@@ -6772,7 +7064,6 @@ static void i40e_vsi_delete(struct i40e_vsi *vsi)
return;
i40e_aq_delete_element(&vsi->back->hw, vsi->seid, NULL);
- return;
}
/**
@@ -6898,6 +7189,13 @@ static int i40e_add_vsi(struct i40e_vsi *vsi)
ctxt.info.valid_sections |= cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_ALL;
+ if (pf->vf[vsi->vf_id].spoofchk) {
+ ctxt.info.valid_sections |=
+ cpu_to_le16(I40E_AQ_VSI_PROP_SECURITY_VALID);
+ ctxt.info.sec_flags |=
+ (I40E_AQ_VSI_SEC_FLAG_ENABLE_VLAN_CHK |
+ I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK);
+ }
/* Setup the VSI tx/rx queue map for TC0 only for now */
i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true);
break;
@@ -6982,11 +7280,7 @@ int i40e_vsi_release(struct i40e_vsi *vsi)
unregister_netdev(vsi->netdev);
}
} else {
- if (!test_and_set_bit(__I40E_DOWN, &vsi->state))
- i40e_down(vsi);
- i40e_vsi_free_irq(vsi);
- i40e_vsi_free_tx_resources(vsi);
- i40e_vsi_free_rx_resources(vsi);
+ i40e_vsi_close(vsi);
}
i40e_vsi_disable_irq(vsi);
}
@@ -7013,7 +7307,7 @@ int i40e_vsi_release(struct i40e_vsi *vsi)
* the orphan VEBs yet. We'll wait for an explicit remove request
* from up the network stack.
*/
- for (n = 0, i = 0; i < pf->hw.func_caps.num_vsis; i++) {
+ for (n = 0, i = 0; i < pf->num_alloc_vsi; i++) {
if (pf->vsi[i] &&
pf->vsi[i]->uplink_seid == uplink_seid &&
(pf->vsi[i]->flags & I40E_VSI_FLAG_VEB_OWNER) == 0) {
@@ -7192,7 +7486,7 @@ struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf, u8 type,
if (!veb && uplink_seid != pf->mac_seid) {
- for (i = 0; i < pf->hw.func_caps.num_vsis; i++) {
+ for (i = 0; i < pf->num_alloc_vsi; i++) {
if (pf->vsi[i] && pf->vsi[i]->seid == uplink_seid) {
vsi = pf->vsi[i];
break;
@@ -7435,7 +7729,7 @@ static void i40e_switch_branch_release(struct i40e_veb *branch)
* NOTE: Removing the last VSI on a VEB has the SIDE EFFECT of removing
* the VEB itself, so don't use (*branch) after this loop.
*/
- for (i = 0; i < pf->hw.func_caps.num_vsis; i++) {
+ for (i = 0; i < pf->num_alloc_vsi; i++) {
if (!pf->vsi[i])
continue;
if (pf->vsi[i]->uplink_seid == branch_seid &&
@@ -7487,7 +7781,7 @@ void i40e_veb_release(struct i40e_veb *veb)
pf = veb->pf;
/* find the remaining VSI and check for extras */
- for (i = 0; i < pf->hw.func_caps.num_vsis; i++) {
+ for (i = 0; i < pf->num_alloc_vsi; i++) {
if (pf->vsi[i] && pf->vsi[i]->uplink_seid == veb->seid) {
n++;
vsi = pf->vsi[i];
@@ -7516,8 +7810,6 @@ void i40e_veb_release(struct i40e_veb *veb)
i40e_aq_delete_element(&pf->hw, veb->seid, NULL);
i40e_veb_clear(veb);
-
- return;
}
/**
@@ -7601,10 +7893,10 @@ struct i40e_veb *i40e_veb_setup(struct i40e_pf *pf, u16 flags,
}
/* make sure there is such a vsi and uplink */
- for (vsi_idx = 0; vsi_idx < pf->hw.func_caps.num_vsis; vsi_idx++)
+ for (vsi_idx = 0; vsi_idx < pf->num_alloc_vsi; vsi_idx++)
if (pf->vsi[vsi_idx] && pf->vsi[vsi_idx]->seid == vsi_seid)
break;
- if (vsi_idx >= pf->hw.func_caps.num_vsis && vsi_seid != 0) {
+ if (vsi_idx >= pf->num_alloc_vsi && vsi_seid != 0) {
dev_info(&pf->pdev->dev, "vsi seid %d not found\n",
vsi_seid);
return NULL;
@@ -7639,6 +7931,8 @@ struct i40e_veb *i40e_veb_setup(struct i40e_pf *pf, u16 flags,
ret = i40e_add_veb(veb, pf->vsi[vsi_idx]);
if (ret)
goto err_veb;
+ if (vsi_idx == pf->lan_vsi)
+ pf->lan_veb = veb->idx;
return veb;
@@ -7774,15 +8068,6 @@ int i40e_fetch_switch_configuration(struct i40e_pf *pf, bool printconfig)
"header: %d reported %d total\n",
num_reported, num_total);
- if (num_reported) {
- int sz = sizeof(*sw_config) * num_reported;
-
- kfree(pf->sw_config);
- pf->sw_config = kzalloc(sz, GFP_KERNEL);
- if (pf->sw_config)
- memcpy(pf->sw_config, sw_config, sz);
- }
-
for (i = 0; i < num_reported; i++) {
struct i40e_aqc_switch_config_element_resp *ele =
&sw_config->element[i];
@@ -7949,9 +8234,7 @@ static void i40e_determine_queue_usage(struct i40e_pf *pf)
queues_left = pf->hw.func_caps.num_tx_qp;
if ((queues_left == 1) ||
- !(pf->flags & I40E_FLAG_MSIX_ENABLED) ||
- !(pf->flags & (I40E_FLAG_RSS_ENABLED | I40E_FLAG_FD_SB_ENABLED |
- I40E_FLAG_DCB_ENABLED))) {
+ !(pf->flags & I40E_FLAG_MSIX_ENABLED)) {
/* one qp for PF, no queues for anything else */
queues_left = 0;
pf->rss_size = pf->num_lan_qps = 1;
@@ -7960,14 +8243,27 @@ static void i40e_determine_queue_usage(struct i40e_pf *pf)
pf->flags &= ~(I40E_FLAG_RSS_ENABLED |
I40E_FLAG_FD_SB_ENABLED |
I40E_FLAG_FD_ATR_ENABLED |
- I40E_FLAG_DCB_ENABLED |
+ I40E_FLAG_DCB_CAPABLE |
I40E_FLAG_SRIOV_ENABLED |
I40E_FLAG_VMDQ_ENABLED);
+ } else if (!(pf->flags & (I40E_FLAG_RSS_ENABLED |
+ I40E_FLAG_FD_SB_ENABLED |
+ I40E_FLAG_FD_ATR_ENABLED |
+ I40E_FLAG_DCB_CAPABLE))) {
+ /* one qp for PF */
+ pf->rss_size = pf->num_lan_qps = 1;
+ queues_left -= pf->num_lan_qps;
+
+ pf->flags &= ~(I40E_FLAG_RSS_ENABLED |
+ I40E_FLAG_FD_SB_ENABLED |
+ I40E_FLAG_FD_ATR_ENABLED |
+ I40E_FLAG_DCB_ENABLED |
+ I40E_FLAG_VMDQ_ENABLED);
} else {
/* Not enough queues for all TCs */
- if ((pf->flags & I40E_FLAG_DCB_ENABLED) &&
+ if ((pf->flags & I40E_FLAG_DCB_CAPABLE) &&
(queues_left < I40E_MAX_TRAFFIC_CLASS)) {
- pf->flags &= ~I40E_FLAG_DCB_ENABLED;
+ pf->flags &= ~I40E_FLAG_DCB_CAPABLE;
dev_info(&pf->pdev->dev, "not enough queues for DCB. DCB is disabled.\n");
}
pf->num_lan_qps = pf->rss_size_max;
@@ -7998,7 +8294,6 @@ static void i40e_determine_queue_usage(struct i40e_pf *pf)
}
pf->queues_left = queues_left;
- return;
}
/**
@@ -8055,12 +8350,13 @@ static void i40e_print_features(struct i40e_pf *pf)
if (pf->flags & I40E_FLAG_RSS_ENABLED)
buf += sprintf(buf, "RSS ");
- buf += sprintf(buf, "FDir ");
if (pf->flags & I40E_FLAG_FD_ATR_ENABLED)
- buf += sprintf(buf, "ATR ");
- if (pf->flags & I40E_FLAG_FD_SB_ENABLED)
+ buf += sprintf(buf, "FD_ATR ");
+ if (pf->flags & I40E_FLAG_FD_SB_ENABLED) {
+ buf += sprintf(buf, "FD_SB ");
buf += sprintf(buf, "NTUPLE ");
- if (pf->flags & I40E_FLAG_DCB_ENABLED)
+ }
+ if (pf->flags & I40E_FLAG_DCB_CAPABLE)
buf += sprintf(buf, "DCB ");
if (pf->flags & I40E_FLAG_PTP)
buf += sprintf(buf, "PTP ");
@@ -8083,13 +8379,13 @@ static void i40e_print_features(struct i40e_pf *pf)
**/
static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
- struct i40e_driver_version dv;
struct i40e_pf *pf;
struct i40e_hw *hw;
static u16 pfs_found;
u16 link_status;
int err = 0;
u32 len;
+ u32 i;
err = pci_enable_device_mem(pdev);
if (err)
@@ -8201,6 +8497,10 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
i40e_verify_eeprom(pf);
+ /* Rev 0 hardware was never productized */
+ if (hw->revision_id < 1)
+ dev_warn(&pdev->dev, "This device is a pre-production adapter/LOM. Please be aware there may be issues with your hardware. If you are experiencing problems please contact your Intel or hardware representative who provided you with this hardware.\n");
+
i40e_clear_pxe_mode(hw);
err = i40e_get_capabilities(pf);
if (err)
@@ -8234,7 +8534,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_mac_addr;
}
dev_info(&pdev->dev, "MAC address: %pM\n", hw->mac.addr);
- memcpy(hw->mac.perm_addr, hw->mac.addr, ETH_ALEN);
+ ether_addr_copy(hw->mac.perm_addr, hw->mac.addr);
pci_set_drvdata(pdev, pf);
pci_save_state(pdev);
@@ -8242,8 +8542,8 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
err = i40e_init_pf_dcb(pf);
if (err) {
dev_info(&pdev->dev, "init_pf_dcb failed: %d\n", err);
- pf->flags &= ~I40E_FLAG_DCB_ENABLED;
- goto err_init_dcb;
+ pf->flags &= ~I40E_FLAG_DCB_CAPABLE;
+ /* Continue without DCB enabled */
}
#endif /* CONFIG_I40E_DCB */
@@ -8264,10 +8564,18 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
i40e_determine_queue_usage(pf);
i40e_init_interrupt_scheme(pf);
- /* Set up the *vsi struct based on the number of VSIs in the HW,
- * and set up our local tracking of the MAIN PF vsi.
+ /* The number of VSIs reported by the FW is the minimum guaranteed
+ * to us; HW supports far more and we share the remaining pool with
+ * the other PFs. We allocate space for more than the guarantee with
+ * the understanding that we might not get them all later.
*/
- len = sizeof(struct i40e_vsi *) * pf->hw.func_caps.num_vsis;
+ if (pf->hw.func_caps.num_vsis < I40E_MIN_VSI_ALLOC)
+ pf->num_alloc_vsi = I40E_MIN_VSI_ALLOC;
+ else
+ pf->num_alloc_vsi = pf->hw.func_caps.num_vsis;
+
+ /* Set up the *vsi struct and our local tracking of the MAIN PF vsi. */
+ len = sizeof(struct i40e_vsi *) * pf->num_alloc_vsi;
pf->vsi = kzalloc(len, GFP_KERNEL);
if (!pf->vsi) {
err = -ENOMEM;
@@ -8279,6 +8587,13 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
dev_info(&pdev->dev, "setup_pf_switch failed: %d\n", err);
goto err_vsis;
}
+ /* if FDIR VSI was set up, start it now */
+ for (i = 0; i < pf->num_alloc_vsi; i++) {
+ if (pf->vsi[i] && pf->vsi[i]->type == I40E_VSI_FDIR) {
+ i40e_vsi_open(pf->vsi[i]);
+ break;
+ }
+ }
/* The main driver is (mostly) up and happy. We need to set this state
* before setting up the misc vector or we get a race and the vector
@@ -8300,6 +8615,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
}
}
+#ifdef CONFIG_PCI_IOV
/* prep for VF support */
if ((pf->flags & I40E_FLAG_SRIOV_ENABLED) &&
(pf->flags & I40E_FLAG_MSIX_ENABLED) &&
@@ -8322,17 +8638,14 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
err);
}
}
+#endif /* CONFIG_PCI_IOV */
pfs_found++;
i40e_dbg_pf_init(pf);
/* tell the firmware that we're starting */
- dv.major_version = DRV_VERSION_MAJOR;
- dv.minor_version = DRV_VERSION_MINOR;
- dv.build_version = DRV_VERSION_BUILD;
- dv.subbuild_version = 0;
- i40e_aq_send_driver_version(&pf->hw, &dv, NULL);
+ i40e_send_version(pf);
/* since everything's happy, start the service_task timer */
mod_timer(&pf->service_timer,
@@ -8373,9 +8686,6 @@ err_vsis:
err_switch_setup:
i40e_reset_interrupt_capability(pf);
del_timer_sync(&pf->service_timer);
-#ifdef CONFIG_I40E_DCB
-err_init_dcb:
-#endif /* CONFIG_I40E_DCB */
err_mac_addr:
err_configure_lan_hmc:
(void)i40e_shutdown_lan_hmc(hw);
@@ -8456,10 +8766,13 @@ static void i40e_remove(struct pci_dev *pdev)
}
/* shutdown and destroy the HMC */
- ret_code = i40e_shutdown_lan_hmc(&pf->hw);
- if (ret_code)
- dev_warn(&pdev->dev,
- "Failed to destroy the HMC resources: %d\n", ret_code);
+ if (pf->hw.hmc.hmc_obj) {
+ ret_code = i40e_shutdown_lan_hmc(&pf->hw);
+ if (ret_code)
+ dev_warn(&pdev->dev,
+ "Failed to destroy the HMC resources: %d\n",
+ ret_code);
+ }
/* shutdown the adminq */
ret_code = i40e_shutdown_adminq(&pf->hw);
@@ -8470,7 +8783,7 @@ static void i40e_remove(struct pci_dev *pdev)
/* Clear all dynamic memory lists of rings, q_vectors, and VSIs */
i40e_clear_interrupt_scheme(pf);
- for (i = 0; i < pf->hw.func_caps.num_vsis; i++) {
+ for (i = 0; i < pf->num_alloc_vsi; i++) {
if (pf->vsi[i]) {
i40e_vsi_clear_rings(pf->vsi[i]);
i40e_vsi_clear(pf->vsi[i]);
@@ -8485,7 +8798,6 @@ static void i40e_remove(struct pci_dev *pdev)
kfree(pf->qp_pile);
kfree(pf->irq_pile);
- kfree(pf->sw_config);
kfree(pf->vsi);
/* force a PF reset to clean anything leftover */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_prototype.h b/drivers/net/ethernet/intel/i40e/i40e_prototype.h
index 9cd57e617959..a430699c41d5 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_prototype.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_prototype.h
@@ -70,10 +70,12 @@ i40e_status i40e_aq_get_firmware_version(struct i40e_hw *hw,
u16 *fw_major_version, u16 *fw_minor_version,
u16 *api_major_version, u16 *api_minor_version,
struct i40e_asq_cmd_details *cmd_details);
-i40e_status i40e_aq_set_phy_reset(struct i40e_hw *hw,
+i40e_status i40e_aq_set_phy_debug(struct i40e_hw *hw, u8 cmd_flags,
struct i40e_asq_cmd_details *cmd_details);
i40e_status i40e_aq_set_default_vsi(struct i40e_hw *hw, u16 vsi_id,
struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_aq_clear_pxe_mode(struct i40e_hw *hw,
+ struct i40e_asq_cmd_details *cmd_details);
i40e_status i40e_aq_set_link_restart_an(struct i40e_hw *hw,
struct i40e_asq_cmd_details *cmd_details);
i40e_status i40e_aq_get_link_info(struct i40e_hw *hw,
@@ -157,8 +159,8 @@ i40e_status i40e_aq_stop_lldp(struct i40e_hw *hw, bool shutdown_agent,
i40e_status i40e_aq_start_lldp(struct i40e_hw *hw,
struct i40e_asq_cmd_details *cmd_details);
i40e_status i40e_aq_add_udp_tunnel(struct i40e_hw *hw,
- u16 udp_port, u8 header_len,
- u8 protocol_index, u8 *filter_index,
+ u16 udp_port, u8 protocol_index,
+ u8 *filter_index,
struct i40e_asq_cmd_details *cmd_details);
i40e_status i40e_aq_del_udp_tunnel(struct i40e_hw *hw, u8 index,
struct i40e_asq_cmd_details *cmd_details);
@@ -167,6 +169,9 @@ i40e_status i40e_aq_delete_element(struct i40e_hw *hw, u16 seid,
i40e_status i40e_aq_mac_address_write(struct i40e_hw *hw,
u16 flags, u8 *mac_addr,
struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_aq_config_vsi_bw_limit(struct i40e_hw *hw,
+ u16 seid, u16 credit, u8 max_credit,
+ struct i40e_asq_cmd_details *cmd_details);
i40e_status i40e_aq_dcb_updated(struct i40e_hw *hw,
struct i40e_asq_cmd_details *cmd_details);
i40e_status i40e_aq_set_hmc_resource_profile(struct i40e_hw *hw,
@@ -216,6 +221,7 @@ bool i40e_get_link_status(struct i40e_hw *hw);
i40e_status i40e_get_mac_addr(struct i40e_hw *hw,
u8 *mac_addr);
i40e_status i40e_validate_mac_addr(u8 *mac_addr);
+void i40e_pre_tx_queue_cfg(struct i40e_hw *hw, u32 queue, bool enable);
/* prototype for functions used for NVM access */
i40e_status i40e_init_nvm(struct i40e_hw *hw);
i40e_status i40e_acquire_nvm(struct i40e_hw *hw,
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ptp.c b/drivers/net/ethernet/intel/i40e/i40e_ptp.c
index e61e63720800..101f439acda6 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ptp.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ptp.c
@@ -48,7 +48,6 @@
I40E_PRTTSYN_CTL1_TSYNTYPE_SHIFT)
#define I40E_PRTTSYN_CTL1_TSYNTYPE_V2 (0x2 << \
I40E_PRTTSYN_CTL1_TSYNTYPE_SHIFT)
-#define I40E_PTP_TX_TIMEOUT (HZ * 15)
/**
* i40e_ptp_read - Read the PHC time from the device
@@ -217,40 +216,6 @@ static int i40e_ptp_settime(struct ptp_clock_info *ptp,
}
/**
- * i40e_ptp_tx_work
- * @work: pointer to work struct
- *
- * This work function polls the PRTTSYN_STAT_0.TXTIME bit to determine when a
- * Tx timestamp event has occurred, in order to pass the Tx timestamp value up
- * the stack in the skb.
- */
-static void i40e_ptp_tx_work(struct work_struct *work)
-{
- struct i40e_pf *pf = container_of(work, struct i40e_pf,
- ptp_tx_work);
- struct i40e_hw *hw = &pf->hw;
- u32 prttsyn_stat_0;
-
- if (!pf->ptp_tx_skb)
- return;
-
- if (time_is_before_jiffies(pf->ptp_tx_start +
- I40E_PTP_TX_TIMEOUT)) {
- dev_kfree_skb_any(pf->ptp_tx_skb);
- pf->ptp_tx_skb = NULL;
- pf->tx_hwtstamp_timeouts++;
- dev_warn(&pf->pdev->dev, "clearing Tx timestamp hang\n");
- return;
- }
-
- prttsyn_stat_0 = rd32(hw, I40E_PRTTSYN_STAT_0);
- if (prttsyn_stat_0 & I40E_PRTTSYN_STAT_0_TXTIME_MASK)
- i40e_ptp_tx_hwtstamp(pf);
- else
- schedule_work(&pf->ptp_tx_work);
-}
-
-/**
* i40e_ptp_enable - Enable/disable ancillary features of the PHC subsystem
* @ptp: The PTP clock structure
* @rq: The requested feature to change
@@ -608,7 +573,6 @@ void i40e_ptp_init(struct i40e_pf *pf)
u32 regval;
spin_lock_init(&pf->tmreg_lock);
- INIT_WORK(&pf->ptp_tx_work, i40e_ptp_tx_work);
dev_info(&pf->pdev->dev, "%s: added PHC on %s\n", __func__,
netdev->name);
@@ -647,7 +611,6 @@ void i40e_ptp_stop(struct i40e_pf *pf)
pf->ptp_tx = false;
pf->ptp_rx = false;
- cancel_work_sync(&pf->ptp_tx_work);
if (pf->ptp_tx_skb) {
dev_kfree_skb_any(pf->ptp_tx_skb);
pf->ptp_tx_skb = NULL;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_register.h b/drivers/net/ethernet/intel/i40e/i40e_register.h
index 1d40f425acf1..947de98500f3 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_register.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_register.h
@@ -1340,8 +1340,6 @@
#define I40E_PFINT_ICR0_GPIO_MASK (0x1 << I40E_PFINT_ICR0_GPIO_SHIFT)
#define I40E_PFINT_ICR0_TIMESYNC_SHIFT 23
#define I40E_PFINT_ICR0_TIMESYNC_MASK (0x1 << I40E_PFINT_ICR0_TIMESYNC_SHIFT)
-#define I40E_PFINT_ICR0_STORM_DETECT_SHIFT 24
-#define I40E_PFINT_ICR0_STORM_DETECT_MASK (0x1 << I40E_PFINT_ICR0_STORM_DETECT_SHIFT)
#define I40E_PFINT_ICR0_LINK_STAT_CHANGE_SHIFT 25
#define I40E_PFINT_ICR0_LINK_STAT_CHANGE_MASK (0x1 << I40E_PFINT_ICR0_LINK_STAT_CHANGE_SHIFT)
#define I40E_PFINT_ICR0_HMC_ERR_SHIFT 26
@@ -1367,8 +1365,6 @@
#define I40E_PFINT_ICR0_ENA_GPIO_MASK (0x1 << I40E_PFINT_ICR0_ENA_GPIO_SHIFT)
#define I40E_PFINT_ICR0_ENA_TIMESYNC_SHIFT 23
#define I40E_PFINT_ICR0_ENA_TIMESYNC_MASK (0x1 << I40E_PFINT_ICR0_ENA_TIMESYNC_SHIFT)
-#define I40E_PFINT_ICR0_ENA_STORM_DETECT_SHIFT 24
-#define I40E_PFINT_ICR0_ENA_STORM_DETECT_MASK (0x1 << I40E_PFINT_ICR0_ENA_STORM_DETECT_SHIFT)
#define I40E_PFINT_ICR0_ENA_LINK_STAT_CHANGE_SHIFT 25
#define I40E_PFINT_ICR0_ENA_LINK_STAT_CHANGE_MASK (0x1 << I40E_PFINT_ICR0_ENA_LINK_STAT_CHANGE_SHIFT)
#define I40E_PFINT_ICR0_ENA_HMC_ERR_SHIFT 26
@@ -1589,6 +1585,14 @@
#define I40E_GLLAN_TSOMSK_M 0x000442DC
#define I40E_GLLAN_TSOMSK_M_TCPMSKM_SHIFT 0
#define I40E_GLLAN_TSOMSK_M_TCPMSKM_MASK (0xFFF << I40E_GLLAN_TSOMSK_M_TCPMSKM_SHIFT)
+#define I40E_GLLAN_TXPRE_QDIS(_i) (0x000E6500 + ((_i) * 4)) /* i=0..11 */
+#define I40E_GLLAN_TXPRE_QDIS_QINDX_SHIFT 0
+#define I40E_GLLAN_TXPRE_QDIS_QINDX_MASK (0x7FF << I40E_GLLAN_TXPRE_QDIS_QINDX_SHIFT)
+#define I40E_GLLAN_TXPRE_QDIS_SET_QDIS_SHIFT 30
+#define I40E_GLLAN_TXPRE_QDIS_SET_QDIS_MASK (0x1 << I40E_GLLAN_TXPRE_QDIS_SET_QDIS_SHIFT)
+#define I40E_GLLAN_TXPRE_QDIS_CLEAR_QDIS_SHIFT 31
+#define I40E_GLLAN_TXPRE_QDIS_CLEAR_QDIS_MASK (0x1 << I40E_GLLAN_TXPRE_QDIS_CLEAR_QDIS_SHIFT)
+
#define I40E_PFLAN_QALLOC 0x001C0400
#define I40E_PFLAN_QALLOC_FIRSTQ_SHIFT 0
#define I40E_PFLAN_QALLOC_FIRSTQ_MASK (0x7FF << I40E_PFLAN_QALLOC_FIRSTQ_SHIFT)
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
index 9478ddc66caf..e49f31dbd5d8 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
@@ -24,6 +24,7 @@
*
******************************************************************************/
+#include <linux/prefetch.h>
#include "i40e.h"
#include "i40e_prototype.h"
@@ -61,7 +62,7 @@ int i40e_program_fdir_filter(struct i40e_fdir_filter *fdir_data, u8 *raw_packet,
/* find existing FDIR VSI */
vsi = NULL;
- for (i = 0; i < pf->hw.func_caps.num_vsis; i++)
+ for (i = 0; i < pf->num_alloc_vsi; i++)
if (pf->vsi[i] && pf->vsi[i]->type == I40E_VSI_FDIR)
vsi = pf->vsi[i];
if (!vsi)
@@ -120,7 +121,7 @@ int i40e_program_fdir_filter(struct i40e_fdir_filter *fdir_data, u8 *raw_packet,
dcc |= I40E_TXD_FLTR_QW1_CNT_ENA_MASK;
dcc |= ((u32)fdir_data->cnt_index <<
I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT) &
- I40E_TXD_FLTR_QW1_CNTINDEX_MASK;
+ I40E_TXD_FLTR_QW1_CNTINDEX_MASK;
}
fdir_desc->dtype_cmd_cntindex = cpu_to_le32(dcc);
@@ -183,7 +184,6 @@ static int i40e_add_del_fdir_udpv4(struct i40e_vsi *vsi,
struct iphdr *ip;
bool err = false;
int ret;
- int i;
static char packet[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x08, 0,
0x45, 0, 0, 0x1c, 0, 0, 0x40, 0, 0x40, 0x11, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
@@ -199,21 +199,17 @@ static int i40e_add_del_fdir_udpv4(struct i40e_vsi *vsi,
ip->saddr = fd_data->src_ip[0];
udp->source = fd_data->src_port;
- for (i = I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP;
- i <= I40E_FILTER_PCTYPE_NONF_IPV4_UDP; i++) {
- fd_data->pctype = i;
- ret = i40e_program_fdir_filter(fd_data, raw_packet, pf, add);
-
- if (ret) {
- dev_info(&pf->pdev->dev,
- "Filter command send failed for PCTYPE %d (ret = %d)\n",
- fd_data->pctype, ret);
- err = true;
- } else {
- dev_info(&pf->pdev->dev,
- "Filter OK for PCTYPE %d (ret = %d)\n",
- fd_data->pctype, ret);
- }
+ fd_data->pctype = I40E_FILTER_PCTYPE_NONF_IPV4_UDP;
+ ret = i40e_program_fdir_filter(fd_data, raw_packet, pf, add);
+ if (ret) {
+ dev_info(&pf->pdev->dev,
+ "Filter command send failed for PCTYPE %d (ret = %d)\n",
+ fd_data->pctype, ret);
+ err = true;
+ } else {
+ dev_info(&pf->pdev->dev,
+ "Filter OK for PCTYPE %d (ret = %d)\n",
+ fd_data->pctype, ret);
}
return err ? -EOPNOTSUPP : 0;
@@ -262,7 +258,7 @@ static int i40e_add_del_fdir_tcpv4(struct i40e_vsi *vsi,
}
}
- fd_data->pctype = I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN;
+ fd_data->pctype = I40E_FILTER_PCTYPE_NONF_IPV4_TCP;
ret = i40e_program_fdir_filter(fd_data, raw_packet, pf, add);
if (ret) {
@@ -455,22 +451,20 @@ static void i40e_fd_handle_status(struct i40e_ring *rx_ring,
/* filter programming failed most likely due to table full */
fcnt_prog = i40e_get_current_fd_count(pf);
- fcnt_avail = pf->hw.fdir_shared_filter_count +
- pf->fdir_pf_filter_count;
-
+ fcnt_avail = i40e_get_fd_cnt_all(pf);
/* If ATR is running fcnt_prog can quickly change,
* if we are very close to full, it makes sense to disable
* FD ATR/SB and then re-enable it when there is room.
*/
if (fcnt_prog >= (fcnt_avail - I40E_FDIR_BUFFER_FULL_MARGIN)) {
/* Turn off ATR first */
- if (pf->flags | I40E_FLAG_FD_ATR_ENABLED) {
+ if (pf->flags & I40E_FLAG_FD_ATR_ENABLED) {
pf->flags &= ~I40E_FLAG_FD_ATR_ENABLED;
dev_warn(&pdev->dev, "FD filter space full, ATR for further flows will be turned off\n");
pf->auto_disable_flags |=
I40E_FLAG_FD_ATR_ENABLED;
pf->flags |= I40E_FLAG_FDIR_REQUIRES_REINIT;
- } else if (pf->flags | I40E_FLAG_FD_SB_ENABLED) {
+ } else if (pf->flags & I40E_FLAG_FD_SB_ENABLED) {
pf->flags &= ~I40E_FLAG_FD_SB_ENABLED;
dev_warn(&pdev->dev, "FD filter space full, new ntuple rules will not be added\n");
pf->auto_disable_flags |=
@@ -1199,10 +1193,12 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
u32 rx_error,
u16 rx_ptype)
{
+ struct i40e_rx_ptype_decoded decoded = decode_rx_desc_ptype(rx_ptype);
+ bool ipv4 = false, ipv6 = false;
bool ipv4_tunnel, ipv6_tunnel;
__wsum rx_udp_csum;
- __sum16 csum;
struct iphdr *iph;
+ __sum16 csum;
ipv4_tunnel = (rx_ptype > I40E_RX_PTYPE_GRENAT4_MAC_PAY3) &&
(rx_ptype < I40E_RX_PTYPE_GRENAT4_MACVLAN_IPV6_ICMP_PAY4);
@@ -1213,29 +1209,57 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
skb->ip_summed = CHECKSUM_NONE;
/* Rx csum enabled and ip headers found? */
- if (!(vsi->netdev->features & NETIF_F_RXCSUM &&
- rx_status & (1 << I40E_RX_DESC_STATUS_L3L4P_SHIFT)))
+ if (!(vsi->netdev->features & NETIF_F_RXCSUM))
+ return;
+
+ /* did the hardware decode the packet and checksum? */
+ if (!(rx_status & (1 << I40E_RX_DESC_STATUS_L3L4P_SHIFT)))
+ return;
+
+ /* both known and outer_ip must be set for the below code to work */
+ if (!(decoded.known && decoded.outer_ip))
return;
+ if (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP &&
+ decoded.outer_ip_ver == I40E_RX_PTYPE_OUTER_IPV4)
+ ipv4 = true;
+ else if (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP &&
+ decoded.outer_ip_ver == I40E_RX_PTYPE_OUTER_IPV6)
+ ipv6 = true;
+
+ if (ipv4 &&
+ (rx_error & ((1 << I40E_RX_DESC_ERROR_IPE_SHIFT) |
+ (1 << I40E_RX_DESC_ERROR_EIPE_SHIFT))))
+ goto checksum_fail;
+
/* likely incorrect csum if alternate IP extension headers found */
- if (rx_status & (1 << I40E_RX_DESC_STATUS_IPV6EXADD_SHIFT))
+ if (ipv6 &&
+ decoded.inner_prot == I40E_RX_PTYPE_INNER_PROT_TCP &&
+ rx_error & (1 << I40E_RX_DESC_ERROR_L4E_SHIFT) &&
+ rx_status & (1 << I40E_RX_DESC_STATUS_IPV6EXADD_SHIFT))
+ /* don't increment checksum err here, non-fatal err */
return;
- /* IP or L4 or outmost IP checksum error */
- if (rx_error & ((1 << I40E_RX_DESC_ERROR_IPE_SHIFT) |
- (1 << I40E_RX_DESC_ERROR_L4E_SHIFT) |
- (1 << I40E_RX_DESC_ERROR_EIPE_SHIFT))) {
- vsi->back->hw_csum_rx_error++;
+ /* there was some L4 error, count error and punt packet to the stack */
+ if (rx_error & (1 << I40E_RX_DESC_ERROR_L4E_SHIFT))
+ goto checksum_fail;
+
+ /* handle packets that were not able to be checksummed due
+ * to arrival speed, in this case the stack can compute
+ * the csum.
+ */
+ if (rx_error & (1 << I40E_RX_DESC_ERROR_PPRS_SHIFT))
return;
- }
+ /* If VXLAN traffic has an outer UDPv4 checksum we need to check
+ * it in the driver, hardware does not do it for us.
+ * Since L3L4P bit was set we assume a valid IHL value (>=5)
+ * so the total length of IPv4 header is IHL*4 bytes
+ * The UDP_0 bit *may* bet set if the *inner* header is UDP
+ */
if (ipv4_tunnel &&
+ (decoded.inner_prot != I40E_RX_PTYPE_INNER_PROT_UDP) &&
!(rx_status & (1 << I40E_RX_DESC_STATUS_UDP_0_SHIFT))) {
- /* If VXLAN traffic has an outer UDPv4 checksum we need to check
- * it in the driver, hardware does not do it for us.
- * Since L3L4P bit was set we assume a valid IHL value (>=5)
- * so the total length of IPv4 header is IHL*4 bytes
- */
skb->transport_header = skb->mac_header +
sizeof(struct ethhdr) +
(ip_hdr(skb)->ihl * 4);
@@ -1252,13 +1276,16 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
(skb->len - skb_transport_offset(skb)),
IPPROTO_UDP, rx_udp_csum);
- if (udp_hdr(skb)->check != csum) {
- vsi->back->hw_csum_rx_error++;
- return;
- }
+ if (udp_hdr(skb)->check != csum)
+ goto checksum_fail;
}
skb->ip_summed = CHECKSUM_UNNECESSARY;
+
+ return;
+
+checksum_fail:
+ vsi->back->hw_csum_rx_error++;
}
/**
@@ -1435,6 +1462,9 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
/* ERR_MASK will only have valid bits if EOP set */
if (unlikely(rx_error & (1 << I40E_RX_DESC_ERROR_RXE_SHIFT))) {
dev_kfree_skb_any(skb);
+ /* TODO: shouldn't we increment a counter indicating the
+ * drop?
+ */
goto next_desc;
}
@@ -1665,6 +1695,11 @@ static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb,
dtype_cmd |= I40E_FILTER_PROGRAM_DESC_FD_STATUS_FD_ID <<
I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT;
+ dtype_cmd |= I40E_TXD_FLTR_QW1_CNT_ENA_MASK;
+ dtype_cmd |=
+ ((u32)pf->fd_atr_cnt_idx << I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT) &
+ I40E_TXD_FLTR_QW1_CNTINDEX_MASK;
+
fdir_desc->qindex_flex_ptype_vsi = cpu_to_le32(flex_ptype);
fdir_desc->dtype_cmd_cntindex = cpu_to_le32(dtype_cmd);
}
@@ -1825,9 +1860,6 @@ static int i40e_tsyn(struct i40e_ring *tx_ring, struct sk_buff *skb,
*cd_type_cmd_tso_mss |= (u64)I40E_TX_CTX_DESC_TSYN <<
I40E_TXD_CTX_QW1_CMD_SHIFT;
- pf->ptp_tx_start = jiffies;
- schedule_work(&pf->ptp_tx_work);
-
return 1;
}
@@ -2179,9 +2211,7 @@ static int i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
static int i40e_xmit_descriptor_count(struct sk_buff *skb,
struct i40e_ring *tx_ring)
{
-#if PAGE_SIZE > I40E_MAX_DATA_PER_TXD
unsigned int f;
-#endif
int count = 0;
/* need: 1 descriptor per page * PAGE_SIZE/I40E_MAX_DATA_PER_TXD,
@@ -2190,12 +2220,9 @@ static int i40e_xmit_descriptor_count(struct sk_buff *skb,
* + 1 desc for context descriptor,
* otherwise try next time
*/
-#if PAGE_SIZE > I40E_MAX_DATA_PER_TXD
for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size);
-#else
- count += skb_shinfo(skb)->nr_frags;
-#endif
+
count += TXD_USE_COUNT(skb_headlen(skb));
if (i40e_maybe_stop_tx(tx_ring, count + 4 + 1)) {
tx_ring->tx_stats.tx_busy++;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.h b/drivers/net/ethernet/intel/i40e/i40e_txrx.h
index d5349698e513..0277894fe1c4 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.h
@@ -27,7 +27,7 @@
#ifndef _I40E_TXRX_H_
#define _I40E_TXRX_H_
-/* Interrupt Throttling and Rate Limiting (storm control) Goodies */
+/* Interrupt Throttling and Rate Limiting Goodies */
#define I40E_MAX_ITR 0x0FF0 /* reg uses 2 usec resolution */
#define I40E_MIN_ITR 0x0004 /* reg uses 2 usec resolution */
@@ -69,16 +69,11 @@ enum i40e_dyn_idx_t {
/* Supported RSS offloads */
#define I40E_DEFAULT_RSS_HENA ( \
- ((u64)1 << I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP) | \
- ((u64)1 << I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP) | \
((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP) | \
((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_SCTP) | \
- ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN) | \
((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP) | \
((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER) | \
((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4) | \
- ((u64)1 << I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP) | \
- ((u64)1 << I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP) | \
((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP) | \
((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN) | \
((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP) | \
@@ -122,11 +117,11 @@ enum i40e_dyn_idx_t {
#define i40e_rx_desc i40e_32byte_rx_desc
#define I40E_MIN_TX_LEN 17
-#define I40E_MAX_DATA_PER_TXD 16383 /* aka 16kB - 1 */
+#define I40E_MAX_DATA_PER_TXD 8192
/* Tx Descriptors needed, worst case */
#define TXD_USE_COUNT(S) DIV_ROUND_UP((S), I40E_MAX_DATA_PER_TXD)
-#define DESC_NEEDED ((MAX_SKB_FRAGS * TXD_USE_COUNT(PAGE_SIZE)) + 4)
+#define DESC_NEEDED (MAX_SKB_FRAGS + 4)
#define I40E_TX_FLAGS_CSUM (u32)(1)
#define I40E_TX_FLAGS_HW_VLAN (u32)(1 << 1)
@@ -184,7 +179,6 @@ enum i40e_ring_state_t {
__I40E_TX_DETECT_HANG,
__I40E_HANG_CHECK_ARMED,
__I40E_RX_PS_ENABLED,
- __I40E_RX_LRO_ENABLED,
__I40E_RX_16BYTE_DESC_ENABLED,
};
@@ -200,12 +194,6 @@ enum i40e_ring_state_t {
set_bit(__I40E_TX_DETECT_HANG, &(ring)->state)
#define clear_check_for_tx_hang(ring) \
clear_bit(__I40E_TX_DETECT_HANG, &(ring)->state)
-#define ring_is_lro_enabled(ring) \
- test_bit(__I40E_RX_LRO_ENABLED, &(ring)->state)
-#define set_ring_lro_enabled(ring) \
- set_bit(__I40E_RX_LRO_ENABLED, &(ring)->state)
-#define clear_ring_lro_enabled(ring) \
- clear_bit(__I40E_RX_LRO_ENABLED, &(ring)->state)
#define ring_is_16byte_desc_enabled(ring) \
test_bit(__I40E_RX_16BYTE_DESC_ENABLED, &(ring)->state)
#define set_ring_16byte_desc_enabled(ring) \
diff --git a/drivers/net/ethernet/intel/i40e/i40e_type.h b/drivers/net/ethernet/intel/i40e/i40e_type.h
index 71a968fe557f..9d39ff23c5fb 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_type.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_type.h
@@ -36,12 +36,10 @@
/* Device IDs */
#define I40E_DEV_ID_SFP_XL710 0x1572
-#define I40E_DEV_ID_SFP_X710 0x1573
#define I40E_DEV_ID_QEMU 0x1574
#define I40E_DEV_ID_KX_A 0x157F
#define I40E_DEV_ID_KX_B 0x1580
#define I40E_DEV_ID_KX_C 0x1581
-#define I40E_DEV_ID_KX_D 0x1582
#define I40E_DEV_ID_QSFP_A 0x1583
#define I40E_DEV_ID_QSFP_B 0x1584
#define I40E_DEV_ID_QSFP_C 0x1585
@@ -60,8 +58,8 @@
/* Max default timeout in ms, */
#define I40E_MAX_NVM_TIMEOUT 18000
-/* Switch from mc to the 2usec global time (this is the GTIME resolution) */
-#define I40E_MS_TO_GTIME(time) (((time) * 1000) / 2)
+/* Switch from ms to the 1usec global time (this is the GTIME resolution) */
+#define I40E_MS_TO_GTIME(time) ((time) * 1000)
/* forward declaration */
struct i40e_hw;
@@ -167,6 +165,9 @@ struct i40e_link_status {
u8 loopback;
/* is Link Status Event notification to SW enabled */
bool lse_enable;
+ u16 max_frame_size;
+ bool crc_enable;
+ u8 pacing;
};
struct i40e_phy_info {
@@ -409,6 +410,7 @@ struct i40e_driver_version {
u8 minor_version;
u8 build_version;
u8 subbuild_version;
+ u8 driver_string[32];
};
/* RX Descriptors */
@@ -488,9 +490,6 @@ union i40e_32byte_rx_desc {
} wb; /* writeback */
};
-#define I40E_RXD_QW1_STATUS_SHIFT 0
-#define I40E_RXD_QW1_STATUS_MASK (0x7FFFUL << I40E_RXD_QW1_STATUS_SHIFT)
-
enum i40e_rx_desc_status_bits {
/* Note: These are predefined bit offsets */
I40E_RX_DESC_STATUS_DD_SHIFT = 0,
@@ -507,9 +506,14 @@ enum i40e_rx_desc_status_bits {
I40E_RX_DESC_STATUS_LPBK_SHIFT = 14,
I40E_RX_DESC_STATUS_IPV6EXADD_SHIFT = 15,
I40E_RX_DESC_STATUS_RESERVED_SHIFT = 16, /* 2 BITS */
- I40E_RX_DESC_STATUS_UDP_0_SHIFT = 18
+ I40E_RX_DESC_STATUS_UDP_0_SHIFT = 18,
+ I40E_RX_DESC_STATUS_LAST /* this entry must be last!!! */
};
+#define I40E_RXD_QW1_STATUS_SHIFT 0
+#define I40E_RXD_QW1_STATUS_MASK (((1 << I40E_RX_DESC_STATUS_LAST) - 1) \
+ << I40E_RXD_QW1_STATUS_SHIFT)
+
#define I40E_RXD_QW1_STATUS_TSYNINDX_SHIFT I40E_RX_DESC_STATUS_TSYNINDX_SHIFT
#define I40E_RXD_QW1_STATUS_TSYNINDX_MASK (0x3UL << \
I40E_RXD_QW1_STATUS_TSYNINDX_SHIFT)
@@ -537,7 +541,8 @@ enum i40e_rx_desc_error_bits {
I40E_RX_DESC_ERROR_IPE_SHIFT = 3,
I40E_RX_DESC_ERROR_L4E_SHIFT = 4,
I40E_RX_DESC_ERROR_EIPE_SHIFT = 5,
- I40E_RX_DESC_ERROR_OVERSIZE_SHIFT = 6
+ I40E_RX_DESC_ERROR_OVERSIZE_SHIFT = 6,
+ I40E_RX_DESC_ERROR_PPRS_SHIFT = 7
};
enum i40e_rx_desc_error_l3l4e_fcoe_masks {
@@ -658,7 +663,6 @@ enum i40e_rx_desc_ext_status_bits {
I40E_RX_DESC_EXT_STATUS_L2TAG3P_SHIFT = 1,
I40E_RX_DESC_EXT_STATUS_FLEXBL_SHIFT = 2, /* 2 BITS */
I40E_RX_DESC_EXT_STATUS_FLEXBH_SHIFT = 4, /* 2 BITS */
- I40E_RX_DESC_EXT_STATUS_FTYPE_SHIFT = 6, /* 3 BITS */
I40E_RX_DESC_EXT_STATUS_FDLONGB_SHIFT = 9,
I40E_RX_DESC_EXT_STATUS_FCOELONGB_SHIFT = 10,
I40E_RX_DESC_EXT_STATUS_PELONGB_SHIFT = 11,
@@ -862,18 +866,14 @@ struct i40e_filter_program_desc {
/* Packet Classifier Types for filters */
enum i40e_filter_pctype {
- /* Note: Values 0-28 are reserved for future use */
- I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP = 29,
- I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP = 30,
+ /* Note: Values 0-30 are reserved for future use */
I40E_FILTER_PCTYPE_NONF_IPV4_UDP = 31,
- I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN = 32,
+ /* Note: Value 32 is reserved for future use */
I40E_FILTER_PCTYPE_NONF_IPV4_TCP = 33,
I40E_FILTER_PCTYPE_NONF_IPV4_SCTP = 34,
I40E_FILTER_PCTYPE_NONF_IPV4_OTHER = 35,
I40E_FILTER_PCTYPE_FRAG_IPV4 = 36,
- /* Note: Values 37-38 are reserved for future use */
- I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP = 39,
- I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP = 40,
+ /* Note: Values 37-40 are reserved for future use */
I40E_FILTER_PCTYPE_NONF_IPV6_UDP = 41,
I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN = 42,
I40E_FILTER_PCTYPE_NONF_IPV6_TCP = 43,
@@ -955,6 +955,16 @@ struct i40e_vsi_context {
struct i40e_aqc_vsi_properties_data info;
};
+struct i40e_veb_context {
+ u16 seid;
+ u16 uplink_seid;
+ u16 veb_number;
+ u16 vebs_allocated;
+ u16 vebs_unallocated;
+ u16 flags;
+ struct i40e_aqc_get_veb_parameters_completion info;
+};
+
/* Statistics collected by each port, VSI, VEB, and S-channel */
struct i40e_eth_stats {
u64 rx_bytes; /* gorc */
@@ -962,8 +972,6 @@ struct i40e_eth_stats {
u64 rx_multicast; /* mprc */
u64 rx_broadcast; /* bprc */
u64 rx_discards; /* rdpc */
- u64 rx_errors; /* repc */
- u64 rx_missed; /* rmpc */
u64 rx_unknown_protocol; /* rupp */
u64 tx_bytes; /* gotc */
u64 tx_unicast; /* uptc */
@@ -1015,9 +1023,12 @@ struct i40e_hw_port_stats {
u64 tx_size_big; /* ptc9522 */
u64 mac_short_packet_dropped; /* mspdc */
u64 checksum_error; /* xec */
+ /* flow director stats */
+ u64 fd_atr_match;
+ u64 fd_sb_match;
/* EEE LPI */
- bool tx_lpi_status;
- bool rx_lpi_status;
+ u32 tx_lpi_status;
+ u32 rx_lpi_status;
u64 tx_lpi_count; /* etlpic */
u64 rx_lpi_count; /* erlpic */
};
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl.h b/drivers/net/ethernet/intel/i40e/i40e_virtchnl.h
index 22a1b69cd646..70951d2edcad 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl.h
@@ -341,10 +341,6 @@ struct i40e_virtchnl_pf_event {
int severity;
};
-/* The following are TBD, not necessary for LAN functionality.
- * I40E_VIRTCHNL_OP_FCOE
- */
-
/* VF reset states - these are written into the RSTAT register:
* I40E_VFGEN_RSTAT1 on the PF
* I40E_VFGEN_RSTAT on the VF
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
index 02c11a7f7d29..f5b9d2062573 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
@@ -29,6 +29,24 @@
/***********************misc routines*****************************/
/**
+ * i40e_vc_disable_vf
+ * @pf: pointer to the pf info
+ * @vf: pointer to the vf info
+ *
+ * Disable the VF through a SW reset
+ **/
+static inline void i40e_vc_disable_vf(struct i40e_pf *pf, struct i40e_vf *vf)
+{
+ struct i40e_hw *hw = &pf->hw;
+ u32 reg;
+
+ reg = rd32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id));
+ reg |= I40E_VPGEN_VFRTRIG_VFSWR_MASK;
+ wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id), reg);
+ i40e_flush(hw);
+}
+
+/**
* i40e_vc_isvalid_vsi_id
* @vf: pointer to the vf info
* @vsi_id: vf relative vsi id
@@ -230,9 +248,8 @@ static int i40e_config_vsi_tx_queue(struct i40e_vf *vf, u16 vsi_idx,
tx_ctx.qlen = info->ring_len;
tx_ctx.rdylist = le16_to_cpu(pf->vsi[vsi_idx]->info.qs_handle[0]);
tx_ctx.rdylist_act = 0;
- tx_ctx.head_wb_ena = 1;
- tx_ctx.head_wb_addr = info->dma_ring_addr +
- (info->ring_len * sizeof(struct i40e_tx_desc));
+ tx_ctx.head_wb_ena = info->headwb_enabled;
+ tx_ctx.head_wb_addr = info->dma_headwb_addr;
/* clear the context in the HMC */
ret = i40e_clear_lan_tx_queue_context(hw, pf_queue_id);
@@ -336,6 +353,7 @@ static int i40e_config_vsi_rx_queue(struct i40e_vf *vf, u16 vsi_idx,
rx_ctx.tphhead_ena = 1;
rx_ctx.lrxqthresh = 2;
rx_ctx.crcstrip = 1;
+ rx_ctx.prefena = 1;
/* clear the context in the HMC */
ret = i40e_clear_lan_rx_queue_context(hw, pf_queue_id);
@@ -416,6 +434,15 @@ static int i40e_alloc_vsi_res(struct i40e_vf *vf, enum i40e_vsi_type type)
if (ret)
dev_err(&pf->pdev->dev, "Unable to program ucast filters\n");
+ /* Set VF bandwidth if specified */
+ if (vf->tx_rate) {
+ ret = i40e_aq_config_vsi_bw_limit(&pf->hw, vsi->seid,
+ vf->tx_rate / 50, 0, NULL);
+ if (ret)
+ dev_err(&pf->pdev->dev, "Unable to set tx rate, VF %d, error code %d.\n",
+ vf->vf_id, ret);
+ }
+
error_alloc_vsi_res:
return ret;
}
@@ -815,6 +842,10 @@ void i40e_free_vfs(struct i40e_pf *pf)
kfree(pf->vf);
pf->vf = NULL;
+ /* This check is for when the driver is unloaded while VFs are
+ * assigned. Setting the number of VFs to 0 through sysfs is caught
+ * before this function ever gets called.
+ */
if (!i40e_vfs_are_assigned(pf)) {
pci_disable_sriov(pf->pdev);
/* Acknowledge VFLR for all VFS. Without this, VFs will fail to
@@ -867,6 +898,7 @@ int i40e_alloc_vfs(struct i40e_pf *pf, u16 num_alloc_vfs)
ret = -ENOMEM;
goto err_alloc;
}
+ pf->vf = vfs;
/* apply default profile */
for (i = 0; i < num_alloc_vfs; i++) {
@@ -876,13 +908,13 @@ int i40e_alloc_vfs(struct i40e_pf *pf, u16 num_alloc_vfs)
/* assign default capabilities */
set_bit(I40E_VIRTCHNL_VF_CAP_L2, &vfs[i].vf_caps);
+ vfs[i].spoofchk = true;
/* vf resources get allocated during reset */
i40e_reset_vf(&vfs[i], false);
/* enable vf vplan_qtable mappings */
i40e_enable_vf_mappings(&vfs[i]);
}
- pf->vf = vfs;
pf->num_alloc_vfs = num_alloc_vfs;
i40e_enable_pf_switch_lb(pf);
@@ -951,7 +983,12 @@ int i40e_pci_sriov_configure(struct pci_dev *pdev, int num_vfs)
if (num_vfs)
return i40e_pci_sriov_enable(pdev, num_vfs);
- i40e_free_vfs(pf);
+ if (!i40e_vfs_are_assigned(pf)) {
+ i40e_free_vfs(pf);
+ } else {
+ dev_warn(&pdev->dev, "Unable to free VFs because some are assigned to VMs.\n");
+ return -EINVAL;
+ }
return 0;
}
@@ -2022,16 +2059,14 @@ int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac)
}
/* delete the temporary mac address */
- i40e_del_filter(vsi, vf->default_lan_addr.addr, 0, true, false);
+ i40e_del_filter(vsi, vf->default_lan_addr.addr, vf->port_vlan_id,
+ true, false);
- /* add the new mac address */
- f = i40e_add_filter(vsi, mac, 0, true, false);
- if (!f) {
- dev_err(&pf->pdev->dev,
- "Unable to add VF ucast filter\n");
- ret = -ENOMEM;
- goto error_param;
- }
+ /* Delete all the filters for this VSI - we're going to kill it
+ * anyway.
+ */
+ list_for_each_entry(f, &vsi->mac_filter_list, list)
+ i40e_del_filter(vsi, f->macaddr, f->vlan, true, false);
dev_info(&pf->pdev->dev, "Setting MAC %pM on VF %d\n", mac, vf_id);
/* program mac filter */
@@ -2040,7 +2075,7 @@ int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac)
ret = -EIO;
goto error_param;
}
- memcpy(vf->default_lan_addr.addr, mac, ETH_ALEN);
+ ether_addr_copy(vf->default_lan_addr.addr, mac);
vf->pf_set_mac = true;
dev_info(&pf->pdev->dev, "Reload the VF driver to make this change effective.\n");
ret = 0;
@@ -2088,18 +2123,28 @@ int i40e_ndo_set_vf_port_vlan(struct net_device *netdev,
goto error_pvid;
}
- if (vsi->info.pvid == 0 && i40e_is_vsi_in_vlan(vsi))
+ if (vsi->info.pvid == 0 && i40e_is_vsi_in_vlan(vsi)) {
dev_err(&pf->pdev->dev,
"VF %d has already configured VLAN filters and the administrator is requesting a port VLAN override.\nPlease unload and reload the VF driver for this change to take effect.\n",
vf_id);
+ /* Administrator Error - knock the VF offline until he does
+ * the right thing by reconfiguring his network correctly
+ * and then reloading the VF driver.
+ */
+ i40e_vc_disable_vf(pf, vf);
+ }
/* Check for condition where there was already a port VLAN ID
* filter set and now it is being deleted by setting it to zero.
+ * Additionally check for the condition where there was a port
+ * VLAN but now there is a new and different port VLAN being set.
* Before deleting all the old VLAN filters we must add new ones
* with -1 (I40E_VLAN_ANY) or otherwise we're left with all our
* MAC addresses deleted.
*/
- if (!(vlan_id || qos) && vsi->info.pvid)
+ if ((!(vlan_id || qos) ||
+ (vlan_id | qos) != le16_to_cpu(vsi->info.pvid)) &&
+ vsi->info.pvid)
ret = i40e_vsi_add_vlan(vsi, I40E_VLAN_ANY);
if (vsi->info.pvid) {
@@ -2150,6 +2195,8 @@ error_pvid:
return ret;
}
+#define I40E_BW_CREDIT_DIVISOR 50 /* 50Mbps per BW credit */
+#define I40E_MAX_BW_INACTIVE_ACCUM 4 /* device can accumulate 4 credits max */
/**
* i40e_ndo_set_vf_bw
* @netdev: network interface device structure
@@ -2158,9 +2205,76 @@ error_pvid:
*
* configure vf tx rate
**/
-int i40e_ndo_set_vf_bw(struct net_device *netdev, int vf_id, int tx_rate)
+int i40e_ndo_set_vf_bw(struct net_device *netdev, int vf_id, int min_tx_rate,
+ int max_tx_rate)
{
- return -EOPNOTSUPP;
+ struct i40e_netdev_priv *np = netdev_priv(netdev);
+ struct i40e_pf *pf = np->vsi->back;
+ struct i40e_vsi *vsi;
+ struct i40e_vf *vf;
+ int speed = 0;
+ int ret = 0;
+
+ /* validate the request */
+ if (vf_id >= pf->num_alloc_vfs) {
+ dev_err(&pf->pdev->dev, "Invalid VF Identifier %d.\n", vf_id);
+ ret = -EINVAL;
+ goto error;
+ }
+
+ if (min_tx_rate) {
+ dev_err(&pf->pdev->dev, "Invalid min tx rate (%d) (greater than 0) specified for vf %d.\n",
+ min_tx_rate, vf_id);
+ return -EINVAL;
+ }
+
+ vf = &(pf->vf[vf_id]);
+ vsi = pf->vsi[vf->lan_vsi_index];
+ if (!test_bit(I40E_VF_STAT_INIT, &vf->vf_states)) {
+ dev_err(&pf->pdev->dev, "Uninitialized VF %d.\n", vf_id);
+ ret = -EINVAL;
+ goto error;
+ }
+
+ switch (pf->hw.phy.link_info.link_speed) {
+ case I40E_LINK_SPEED_40GB:
+ speed = 40000;
+ break;
+ case I40E_LINK_SPEED_10GB:
+ speed = 10000;
+ break;
+ case I40E_LINK_SPEED_1GB:
+ speed = 1000;
+ break;
+ default:
+ break;
+ }
+
+ if (max_tx_rate > speed) {
+ dev_err(&pf->pdev->dev, "Invalid max tx rate %d specified for vf %d.",
+ max_tx_rate, vf->vf_id);
+ ret = -EINVAL;
+ goto error;
+ }
+
+ if ((max_tx_rate < 50) && (max_tx_rate > 0)) {
+ dev_warn(&pf->pdev->dev, "Setting max Tx rate to minimum usable value of 50Mbps.\n");
+ max_tx_rate = 50;
+ }
+
+ /* Tx rate credits are in values of 50Mbps, 0 is disabled*/
+ ret = i40e_aq_config_vsi_bw_limit(&pf->hw, vsi->seid,
+ max_tx_rate / I40E_BW_CREDIT_DIVISOR,
+ I40E_MAX_BW_INACTIVE_ACCUM, NULL);
+ if (ret) {
+ dev_err(&pf->pdev->dev, "Unable to set max tx rate, error code %d.\n",
+ ret);
+ ret = -EIO;
+ goto error;
+ }
+ vf->tx_rate = max_tx_rate;
+error:
+ return ret;
}
/**
@@ -2200,10 +2314,18 @@ int i40e_ndo_get_vf_config(struct net_device *netdev,
memcpy(&ivi->mac, vf->default_lan_addr.addr, ETH_ALEN);
- ivi->tx_rate = 0;
+ ivi->max_tx_rate = vf->tx_rate;
+ ivi->min_tx_rate = 0;
ivi->vlan = le16_to_cpu(vsi->info.pvid) & I40E_VLAN_MASK;
ivi->qos = (le16_to_cpu(vsi->info.pvid) & I40E_PRIORITY_MASK) >>
I40E_VLAN_PRIORITY_SHIFT;
+ if (vf->link_forced == false)
+ ivi->linkstate = IFLA_VF_LINK_STATE_AUTO;
+ else if (vf->link_up == true)
+ ivi->linkstate = IFLA_VF_LINK_STATE_ENABLE;
+ else
+ ivi->linkstate = IFLA_VF_LINK_STATE_DISABLE;
+ ivi->spoofchk = vf->spoofchk;
ret = 0;
error_param:
@@ -2270,3 +2392,50 @@ int i40e_ndo_set_vf_link_state(struct net_device *netdev, int vf_id, int link)
error_out:
return ret;
}
+
+/**
+ * i40e_ndo_set_vf_spoofchk
+ * @netdev: network interface device structure
+ * @vf_id: vf identifier
+ * @enable: flag to enable or disable feature
+ *
+ * Enable or disable VF spoof checking
+ **/
+int i40e_ndo_set_vf_spoofck(struct net_device *netdev, int vf_id, bool enable)
+{
+ struct i40e_netdev_priv *np = netdev_priv(netdev);
+ struct i40e_vsi *vsi = np->vsi;
+ struct i40e_pf *pf = vsi->back;
+ struct i40e_vsi_context ctxt;
+ struct i40e_hw *hw = &pf->hw;
+ struct i40e_vf *vf;
+ int ret = 0;
+
+ /* validate the request */
+ if (vf_id >= pf->num_alloc_vfs) {
+ dev_err(&pf->pdev->dev, "Invalid VF Identifier %d\n", vf_id);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ vf = &(pf->vf[vf_id]);
+
+ if (enable == vf->spoofchk)
+ goto out;
+
+ vf->spoofchk = enable;
+ memset(&ctxt, 0, sizeof(ctxt));
+ ctxt.seid = pf->vsi[vf->lan_vsi_index]->seid;
+ ctxt.pf_num = pf->hw.pf_id;
+ ctxt.info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SECURITY_VALID);
+ if (enable)
+ ctxt.info.sec_flags |= I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK;
+ ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
+ if (ret) {
+ dev_err(&pf->pdev->dev, "Error %d updating VSI parameters\n",
+ ret);
+ ret = -EIO;
+ }
+out:
+ return ret;
+}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
index 389c47f396d5..63e7e0d81ad2 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
@@ -98,8 +98,10 @@ struct i40e_vf {
unsigned long vf_caps; /* vf's adv. capabilities */
unsigned long vf_states; /* vf's runtime states */
+ unsigned int tx_rate; /* Tx bandwidth limit in Mbps */
bool link_forced;
bool link_up; /* only valid if vf link is forced */
+ bool spoofchk;
};
void i40e_free_vfs(struct i40e_pf *pf);
@@ -115,10 +117,12 @@ void i40e_vc_notify_vf_reset(struct i40e_vf *vf);
int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac);
int i40e_ndo_set_vf_port_vlan(struct net_device *netdev,
int vf_id, u16 vlan_id, u8 qos);
-int i40e_ndo_set_vf_bw(struct net_device *netdev, int vf_id, int tx_rate);
+int i40e_ndo_set_vf_bw(struct net_device *netdev, int vf_id, int min_tx_rate,
+ int max_tx_rate);
int i40e_ndo_get_vf_config(struct net_device *netdev,
int vf_id, struct ifla_vf_info *ivi);
int i40e_ndo_set_vf_link_state(struct net_device *netdev, int vf_id, int link);
+int i40e_ndo_set_vf_spoofck(struct net_device *netdev, int vf_id, bool enable);
void i40e_vc_notify_link_state(struct i40e_pf *pf);
void i40e_vc_notify_reset(struct i40e_pf *pf);
diff --git a/drivers/net/ethernet/intel/i40evf/Makefile b/drivers/net/ethernet/intel/i40evf/Makefile
index e09be37a07a8..3a423836a565 100644
--- a/drivers/net/ethernet/intel/i40evf/Makefile
+++ b/drivers/net/ethernet/intel/i40evf/Makefile
@@ -1,7 +1,7 @@
################################################################################
#
# Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
-# Copyright(c) 2013 Intel Corporation.
+# Copyright(c) 2013 - 2014 Intel Corporation.
#
# This program is free software; you can redistribute it and/or modify it
# under the terms and conditions of the GNU General Public License,
@@ -12,6 +12,9 @@
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details.
#
+# You should have received a copy of the GNU General Public License along
+# with this program. If not, see <http://www.gnu.org/licenses/>.
+#
# The full GNU General Public License is included in this distribution in
# the file called "COPYING".
#
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_adminq.c b/drivers/net/ethernet/intel/i40evf/i40e_adminq.c
index 5470ce95936e..eb67cce3e8f9 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_adminq.c
+++ b/drivers/net/ethernet/intel/i40evf/i40e_adminq.c
@@ -1,7 +1,7 @@
/*******************************************************************************
*
* Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
- * Copyright(c) 2013 Intel Corporation.
+ * Copyright(c) 2013 - 2014 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -12,6 +12,9 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
+ * You should have received a copy of the GNU General Public License along
+ * with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
*
@@ -28,6 +31,16 @@
#include "i40e_prototype.h"
/**
+ * i40e_is_nvm_update_op - return true if this is an NVM update operation
+ * @desc: API request descriptor
+ **/
+static inline bool i40e_is_nvm_update_op(struct i40e_aq_desc *desc)
+{
+ return (desc->opcode == i40e_aqc_opc_nvm_erase) ||
+ (desc->opcode == i40e_aqc_opc_nvm_update);
+}
+
+/**
* i40e_adminq_init_regs - Initialize AdminQ registers
* @hw: pointer to the hardware structure
*
@@ -276,8 +289,11 @@ static void i40e_free_asq_bufs(struct i40e_hw *hw)
*
* Configure base address and length registers for the transmit queue
**/
-static void i40e_config_asq_regs(struct i40e_hw *hw)
+static i40e_status i40e_config_asq_regs(struct i40e_hw *hw)
{
+ i40e_status ret_code = 0;
+ u32 reg = 0;
+
if (hw->mac.type == I40E_MAC_VF) {
/* configure the transmit queue */
wr32(hw, I40E_VF_ATQBAH1,
@@ -286,6 +302,7 @@ static void i40e_config_asq_regs(struct i40e_hw *hw)
lower_32_bits(hw->aq.asq.desc_buf.pa));
wr32(hw, I40E_VF_ATQLEN1, (hw->aq.num_asq_entries |
I40E_VF_ATQLEN1_ATQENABLE_MASK));
+ reg = rd32(hw, I40E_VF_ATQBAL1);
} else {
/* configure the transmit queue */
wr32(hw, I40E_PF_ATQBAH,
@@ -294,7 +311,14 @@ static void i40e_config_asq_regs(struct i40e_hw *hw)
lower_32_bits(hw->aq.asq.desc_buf.pa));
wr32(hw, I40E_PF_ATQLEN, (hw->aq.num_asq_entries |
I40E_PF_ATQLEN_ATQENABLE_MASK));
+ reg = rd32(hw, I40E_PF_ATQBAL);
}
+
+ /* Check one register to verify that config was applied */
+ if (reg != lower_32_bits(hw->aq.asq.desc_buf.pa))
+ ret_code = I40E_ERR_ADMIN_QUEUE_ERROR;
+
+ return ret_code;
}
/**
@@ -303,8 +327,11 @@ static void i40e_config_asq_regs(struct i40e_hw *hw)
*
* Configure base address and length registers for the receive (event queue)
**/
-static void i40e_config_arq_regs(struct i40e_hw *hw)
+static i40e_status i40e_config_arq_regs(struct i40e_hw *hw)
{
+ i40e_status ret_code = 0;
+ u32 reg = 0;
+
if (hw->mac.type == I40E_MAC_VF) {
/* configure the receive queue */
wr32(hw, I40E_VF_ARQBAH1,
@@ -313,6 +340,7 @@ static void i40e_config_arq_regs(struct i40e_hw *hw)
lower_32_bits(hw->aq.arq.desc_buf.pa));
wr32(hw, I40E_VF_ARQLEN1, (hw->aq.num_arq_entries |
I40E_VF_ARQLEN1_ARQENABLE_MASK));
+ reg = rd32(hw, I40E_VF_ARQBAL1);
} else {
/* configure the receive queue */
wr32(hw, I40E_PF_ARQBAH,
@@ -321,10 +349,17 @@ static void i40e_config_arq_regs(struct i40e_hw *hw)
lower_32_bits(hw->aq.arq.desc_buf.pa));
wr32(hw, I40E_PF_ARQLEN, (hw->aq.num_arq_entries |
I40E_PF_ARQLEN_ARQENABLE_MASK));
+ reg = rd32(hw, I40E_PF_ARQBAL);
}
/* Update tail in the HW to post pre-allocated buffers */
wr32(hw, hw->aq.arq.tail, hw->aq.num_arq_entries - 1);
+
+ /* Check one register to verify that config was applied */
+ if (reg != lower_32_bits(hw->aq.arq.desc_buf.pa))
+ ret_code = I40E_ERR_ADMIN_QUEUE_ERROR;
+
+ return ret_code;
}
/**
@@ -372,7 +407,9 @@ static i40e_status i40e_init_asq(struct i40e_hw *hw)
goto init_adminq_free_rings;
/* initialize base registers */
- i40e_config_asq_regs(hw);
+ ret_code = i40e_config_asq_regs(hw);
+ if (ret_code)
+ goto init_adminq_free_rings;
/* success! */
goto init_adminq_exit;
@@ -429,7 +466,9 @@ static i40e_status i40e_init_arq(struct i40e_hw *hw)
goto init_adminq_free_rings;
/* initialize base registers */
- i40e_config_arq_regs(hw);
+ ret_code = i40e_config_arq_regs(hw);
+ if (ret_code)
+ goto init_adminq_free_rings;
/* success! */
goto init_adminq_exit;
@@ -659,6 +698,12 @@ i40e_status i40evf_asq_send_command(struct i40e_hw *hw,
goto asq_send_command_exit;
}
+ if (i40e_is_nvm_update_op(desc) && hw->aq.nvm_busy) {
+ i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, "AQTX: NVM busy.\n");
+ status = I40E_ERR_NVM;
+ goto asq_send_command_exit;
+ }
+
details = I40E_ADMINQ_DETAILS(hw->aq.asq, hw->aq.asq.next_to_use);
if (cmd_details) {
*details = *cmd_details;
@@ -786,6 +831,9 @@ i40e_status i40evf_asq_send_command(struct i40e_hw *hw,
hw->aq.asq_last_status = (enum i40e_admin_queue_err)retval;
}
+ if (i40e_is_nvm_update_op(desc))
+ hw->aq.nvm_busy = true;
+
/* update the error if time out occurred */
if ((!cmd_completed) &&
(!details->async && !details->postpone)) {
@@ -880,6 +928,9 @@ i40e_status i40evf_clean_arq_element(struct i40e_hw *hw,
e->msg_size);
}
+ if (i40e_is_nvm_update_op(&e->desc))
+ hw->aq.nvm_busy = false;
+
/* Restore the original datalen and buffer address in the desc,
* FW updates datalen to indicate the event message
* size
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_adminq.h b/drivers/net/ethernet/intel/i40evf/i40e_adminq.h
index 8f72c31d95cc..e3472c62e155 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_adminq.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_adminq.h
@@ -1,7 +1,7 @@
/*******************************************************************************
*
* Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
- * Copyright(c) 2013 Intel Corporation.
+ * Copyright(c) 2013 - 2014 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -12,6 +12,9 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
+ * You should have received a copy of the GNU General Public License along
+ * with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
*
@@ -87,6 +90,7 @@ struct i40e_adminq_info {
u16 fw_min_ver; /* firmware minor version */
u16 api_maj_ver; /* api major version */
u16 api_min_ver; /* api minor version */
+ bool nvm_busy;
struct mutex asq_mutex; /* Send queue lock */
struct mutex arq_mutex; /* Receive queue lock */
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h b/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h
index 97662b6bd98a..e656ea7a7920 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h
@@ -1,7 +1,7 @@
/*******************************************************************************
*
* Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
- * Copyright(c) 2013 Intel Corporation.
+ * Copyright(c) 2013 - 2014 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -12,6 +12,9 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
+ * You should have received a copy of the GNU General Public License along
+ * with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
*
@@ -31,7 +34,7 @@
*/
#define I40E_FW_API_VERSION_MAJOR 0x0001
-#define I40E_FW_API_VERSION_MINOR 0x0001
+#define I40E_FW_API_VERSION_MINOR 0x0002
#define I40E_FW_API_VERSION_A0_MINOR 0x0000
struct i40e_aq_desc {
@@ -121,6 +124,7 @@ enum i40e_admin_queue_opc {
i40e_aqc_opc_get_version = 0x0001,
i40e_aqc_opc_driver_version = 0x0002,
i40e_aqc_opc_queue_shutdown = 0x0003,
+ i40e_aqc_opc_set_pf_context = 0x0004,
/* resource ownership */
i40e_aqc_opc_request_resource = 0x0008,
@@ -180,9 +184,6 @@ enum i40e_admin_queue_opc {
i40e_aqc_opc_add_mirror_rule = 0x0260,
i40e_aqc_opc_delete_mirror_rule = 0x0261,
- i40e_aqc_opc_set_storm_control_config = 0x0280,
- i40e_aqc_opc_get_storm_control_config = 0x0281,
-
/* DCB commands */
i40e_aqc_opc_dcb_ignore_pfc = 0x0301,
i40e_aqc_opc_dcb_updated = 0x0302,
@@ -205,6 +206,7 @@ enum i40e_admin_queue_opc {
i40e_aqc_opc_query_switching_comp_bw_config = 0x041A,
i40e_aqc_opc_suspend_port_tx = 0x041B,
i40e_aqc_opc_resume_port_tx = 0x041C,
+ i40e_aqc_opc_configure_partition_bw = 0x041D,
/* hmc */
i40e_aqc_opc_query_hmc_resource_profile = 0x0500,
@@ -222,13 +224,15 @@ enum i40e_admin_queue_opc {
i40e_aqc_opc_get_partner_advt = 0x0616,
i40e_aqc_opc_set_lb_modes = 0x0618,
i40e_aqc_opc_get_phy_wol_caps = 0x0621,
- i40e_aqc_opc_set_phy_reset = 0x0622,
+ i40e_aqc_opc_set_phy_debug = 0x0622,
i40e_aqc_opc_upload_ext_phy_fm = 0x0625,
/* NVM commands */
- i40e_aqc_opc_nvm_read = 0x0701,
- i40e_aqc_opc_nvm_erase = 0x0702,
- i40e_aqc_opc_nvm_update = 0x0703,
+ i40e_aqc_opc_nvm_read = 0x0701,
+ i40e_aqc_opc_nvm_erase = 0x0702,
+ i40e_aqc_opc_nvm_update = 0x0703,
+ i40e_aqc_opc_nvm_config_read = 0x0704,
+ i40e_aqc_opc_nvm_config_write = 0x0705,
/* virtualization commands */
i40e_aqc_opc_send_msg_to_pf = 0x0801,
@@ -270,8 +274,6 @@ enum i40e_admin_queue_opc {
i40e_aqc_opc_debug_set_mode = 0xFF01,
i40e_aqc_opc_debug_read_reg = 0xFF03,
i40e_aqc_opc_debug_write_reg = 0xFF04,
- i40e_aqc_opc_debug_read_reg_sg = 0xFF05,
- i40e_aqc_opc_debug_write_reg_sg = 0xFF06,
i40e_aqc_opc_debug_modify_reg = 0xFF07,
i40e_aqc_opc_debug_dump_internals = 0xFF08,
i40e_aqc_opc_debug_modify_internals = 0xFF09,
@@ -339,6 +341,14 @@ struct i40e_aqc_queue_shutdown {
I40E_CHECK_CMD_LENGTH(i40e_aqc_queue_shutdown);
+/* Set PF context (0x0004, direct) */
+struct i40e_aqc_set_pf_context {
+ u8 pf_id;
+ u8 reserved[15];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_set_pf_context);
+
/* Request resource ownership (direct 0x0008)
* Release resource ownership (direct 0x0009)
*/
@@ -678,7 +688,6 @@ struct i40e_aqc_add_get_update_vsi {
#define I40E_AQ_VSI_TYPE_PF 0x2
#define I40E_AQ_VSI_TYPE_EMP_MNG 0x3
#define I40E_AQ_VSI_FLAG_CASCADED_PV 0x4
-#define I40E_AQ_VSI_FLAG_CLOUD_VSI 0x8
__le32 addr_high;
__le32 addr_low;
};
@@ -1040,7 +1049,9 @@ struct i40e_aqc_set_vsi_promiscuous_modes {
#define I40E_AQC_SET_VSI_PROMISC_VLAN 0x10
__le16 seid;
#define I40E_AQC_VSI_PROM_CMD_SEID_MASK 0x3FF
- u8 reserved[10];
+ __le16 vlan_tag;
+#define I40E_AQC_SET_VSI_VLAN_VALID 0x8000
+ u8 reserved[8];
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_set_vsi_promiscuous_modes);
@@ -1289,27 +1300,6 @@ struct i40e_aqc_add_delete_mirror_rule_completion {
I40E_CHECK_CMD_LENGTH(i40e_aqc_add_delete_mirror_rule_completion);
-/* Set Storm Control Configuration (direct 0x0280)
- * Get Storm Control Configuration (direct 0x0281)
- * the command and response use the same descriptor structure
- */
-struct i40e_aqc_set_get_storm_control_config {
- __le32 broadcast_threshold;
- __le32 multicast_threshold;
- __le32 control_flags;
-#define I40E_AQC_STORM_CONTROL_MDIPW 0x01
-#define I40E_AQC_STORM_CONTROL_MDICW 0x02
-#define I40E_AQC_STORM_CONTROL_BDIPW 0x04
-#define I40E_AQC_STORM_CONTROL_BDICW 0x08
-#define I40E_AQC_STORM_CONTROL_BIDU 0x10
-#define I40E_AQC_STORM_CONTROL_INTERVAL_SHIFT 8
-#define I40E_AQC_STORM_CONTROL_INTERVAL_MASK (0x3FF << \
- I40E_AQC_STORM_CONTROL_INTERVAL_SHIFT)
- u8 reserved[4];
-};
-
-I40E_CHECK_CMD_LENGTH(i40e_aqc_set_get_storm_control_config);
-
/* DCB 0x03xx*/
/* PFC Ignore (direct 0x0301)
@@ -1427,11 +1417,12 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_configure_switching_comp_bw_limit);
struct i40e_aqc_configure_switching_comp_ets_data {
u8 reserved[4];
u8 tc_valid_bits;
- u8 reserved1;
+ u8 seepage;
+#define I40E_AQ_ETS_SEEPAGE_EN_MASK 0x1
u8 tc_strict_priority_flags;
- u8 reserved2[17];
+ u8 reserved1[17];
u8 tc_bw_share_credits[8];
- u8 reserved3[96];
+ u8 reserved2[96];
};
/* Configure Switching Component Bandwidth Limits per Tc (indirect 0x0416) */
@@ -1499,6 +1490,15 @@ struct i40e_aqc_query_switching_comp_bw_config_resp {
* (direct 0x041B and 0x041C) uses the generic SEID struct
*/
+/* Configure partition BW
+ * (indirect 0x041D)
+ */
+struct i40e_aqc_configure_partition_bw_data {
+ __le16 pf_valid_bits;
+ u8 min_bw[16]; /* guaranteed bandwidth */
+ u8 max_bw[16]; /* bandwidth limit */
+};
+
/* Get and set the active HMC resource profile and status.
* (direct 0x0500) and (direct 0x0501)
*/
@@ -1539,6 +1539,8 @@ enum i40e_aq_phy_type {
I40E_PHY_TYPE_XLPPI = 0x9,
I40E_PHY_TYPE_40GBASE_CR4_CU = 0xA,
I40E_PHY_TYPE_10GBASE_CR1_CU = 0xB,
+ I40E_PHY_TYPE_10GBASE_AOC = 0xC,
+ I40E_PHY_TYPE_40GBASE_AOC = 0xD,
I40E_PHY_TYPE_100BASE_TX = 0x11,
I40E_PHY_TYPE_1000BASE_T = 0x12,
I40E_PHY_TYPE_10GBASE_T = 0x13,
@@ -1549,7 +1551,10 @@ enum i40e_aq_phy_type {
I40E_PHY_TYPE_40GBASE_CR4 = 0x18,
I40E_PHY_TYPE_40GBASE_SR4 = 0x19,
I40E_PHY_TYPE_40GBASE_LR4 = 0x1A,
- I40E_PHY_TYPE_20GBASE_KR2 = 0x1B,
+ I40E_PHY_TYPE_1000BASE_SX = 0x1B,
+ I40E_PHY_TYPE_1000BASE_LX = 0x1C,
+ I40E_PHY_TYPE_1000BASE_T_OPTICAL = 0x1D,
+ I40E_PHY_TYPE_20GBASE_KR2 = 0x1E,
I40E_PHY_TYPE_MAX
};
@@ -1583,11 +1588,8 @@ struct i40e_aq_get_phy_abilities_resp {
#define I40E_AQ_PHY_FLAG_PAUSE_TX 0x01
#define I40E_AQ_PHY_FLAG_PAUSE_RX 0x02
#define I40E_AQ_PHY_FLAG_LOW_POWER 0x04
-#define I40E_AQ_PHY_FLAG_AN_SHIFT 3
-#define I40E_AQ_PHY_FLAG_AN_MASK (0x3 << I40E_AQ_PHY_FLAG_AN_SHIFT)
-#define I40E_AQ_PHY_FLAG_AN_OFF 0x00 /* link forced on */
-#define I40E_AQ_PHY_FLAG_AN_OFF_LINK_DOWN 0x01
-#define I40E_AQ_PHY_FLAG_AN_ON 0x02
+#define I40E_AQ_PHY_LINK_ENABLED 0x08
+#define I40E_AQ_PHY_AN_ENABLED 0x10
#define I40E_AQ_PHY_FLAG_MODULE_QUAL 0x20
__le16 eee_capability;
#define I40E_AQ_EEE_100BASE_TX 0x0002
@@ -1696,6 +1698,7 @@ struct i40e_aqc_get_link_status {
#define I40E_AQ_LINK_TX_ACTIVE 0x00
#define I40E_AQ_LINK_TX_DRAINED 0x01
#define I40E_AQ_LINK_TX_FLUSHED 0x03
+#define I40E_AQ_LINK_FORCED_40G 0x10
u8 loopback; /* use defines from i40e_aqc_set_lb_mode */
__le16 max_frame_size;
u8 config;
@@ -1747,14 +1750,21 @@ struct i40e_aqc_set_lb_mode {
I40E_CHECK_CMD_LENGTH(i40e_aqc_set_lb_mode);
-/* Set PHY Reset command (0x0622) */
-struct i40e_aqc_set_phy_reset {
- u8 reset_flags;
-#define I40E_AQ_PHY_RESET_REQUEST 0x02
+/* Set PHY Debug command (0x0622) */
+struct i40e_aqc_set_phy_debug {
+ u8 command_flags;
+#define I40E_AQ_PHY_DEBUG_RESET_INTERNAL 0x02
+#define I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_SHIFT 2
+#define I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_MASK (0x03 << \
+ I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_SHIFT)
+#define I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_NONE 0x00
+#define I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_HARD 0x01
+#define I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_SOFT 0x02
+#define I40E_AQ_PHY_DEBUG_DISABLE_LINK_FW 0x10
u8 reserved[15];
};
-I40E_CHECK_CMD_LENGTH(i40e_aqc_set_phy_reset);
+I40E_CHECK_CMD_LENGTH(i40e_aqc_set_phy_debug);
enum i40e_aq_phy_reg_type {
I40E_AQC_PHY_REG_INTERNAL = 0x1,
@@ -1779,6 +1789,47 @@ struct i40e_aqc_nvm_update {
I40E_CHECK_CMD_LENGTH(i40e_aqc_nvm_update);
+/* NVM Config Read (indirect 0x0704) */
+struct i40e_aqc_nvm_config_read {
+ __le16 cmd_flags;
+#define ANVM_SINGLE_OR_MULTIPLE_FEATURES_MASK 1
+#define ANVM_READ_SINGLE_FEATURE 0
+#define ANVM_READ_MULTIPLE_FEATURES 1
+ __le16 element_count;
+ __le16 element_id; /* Feature/field ID */
+ u8 reserved[2];
+ __le32 address_high;
+ __le32 address_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_nvm_config_read);
+
+/* NVM Config Write (indirect 0x0705) */
+struct i40e_aqc_nvm_config_write {
+ __le16 cmd_flags;
+ __le16 element_count;
+ u8 reserved[4];
+ __le32 address_high;
+ __le32 address_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_nvm_config_write);
+
+struct i40e_aqc_nvm_config_data_feature {
+ __le16 feature_id;
+ __le16 instance_id;
+ __le16 feature_options;
+ __le16 feature_selection;
+};
+
+struct i40e_aqc_nvm_config_data_immediate_field {
+#define ANVM_FEATURE_OR_IMMEDIATE_MASK 0x2
+ __le16 field_id;
+ __le16 instance_id;
+ __le16 field_options;
+ __le16 field_value;
+};
+
/* Send to PF command (indirect 0x0801) id is only used by PF
* Send to VF command (indirect 0x0802) id is only used by PF
* Send to Peer PF command (indirect 0x0803)
@@ -1948,19 +1999,12 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_start);
/* Add Udp Tunnel command and completion (direct 0x0B00) */
struct i40e_aqc_add_udp_tunnel {
__le16 udp_port;
- u8 header_len; /* in DWords, 1 to 15 */
+ u8 reserved0[3];
u8 protocol_type;
-#define I40E_AQC_TUNNEL_TYPE_TEREDO 0x0
-#define I40E_AQC_TUNNEL_TYPE_VXLAN 0x2
-#define I40E_AQC_TUNNEL_TYPE_NGE 0x3
- u8 variable_udp_length;
-#define I40E_AQC_TUNNEL_FIXED_UDP_LENGTH 0x0
-#define I40E_AQC_TUNNEL_VARIABLE_UDP_LENGTH 0x1
- u8 udp_key_index;
-#define I40E_AQC_TUNNEL_KEY_INDEX_VXLAN 0x0
-#define I40E_AQC_TUNNEL_KEY_INDEX_NGE 0x1
-#define I40E_AQC_TUNNEL_KEY_INDEX_PROPRIETARY_UDP 0x2
- u8 reserved[10];
+#define I40E_AQC_TUNNEL_TYPE_VXLAN 0x00
+#define I40E_AQC_TUNNEL_TYPE_NGE 0x01
+#define I40E_AQC_TUNNEL_TYPE_TEREDO 0x10
+ u8 reserved1[10];
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_add_udp_tunnel);
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_alloc.h b/drivers/net/ethernet/intel/i40evf/i40e_alloc.h
index d8654fb9e525..8e6a6dd9212b 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_alloc.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_alloc.h
@@ -1,7 +1,7 @@
/*******************************************************************************
*
* Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
- * Copyright(c) 2013 Intel Corporation.
+ * Copyright(c) 2013 - 2014 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -12,6 +12,9 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
+ * You should have received a copy of the GNU General Public License along
+ * with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
*
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_common.c b/drivers/net/ethernet/intel/i40evf/i40e_common.c
index ae084378faab..a43155afdbe2 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_common.c
+++ b/drivers/net/ethernet/intel/i40evf/i40e_common.c
@@ -1,7 +1,7 @@
/*******************************************************************************
*
* Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
- * Copyright(c) 2013 Intel Corporation.
+ * Copyright(c) 2013 - 2014 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -12,6 +12,9 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
+ * You should have received a copy of the GNU General Public License along
+ * with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
*
@@ -40,12 +43,10 @@ i40e_status i40e_set_mac_type(struct i40e_hw *hw)
if (hw->vendor_id == PCI_VENDOR_ID_INTEL) {
switch (hw->device_id) {
case I40E_DEV_ID_SFP_XL710:
- case I40E_DEV_ID_SFP_X710:
case I40E_DEV_ID_QEMU:
case I40E_DEV_ID_KX_A:
case I40E_DEV_ID_KX_B:
case I40E_DEV_ID_KX_C:
- case I40E_DEV_ID_KX_D:
case I40E_DEV_ID_QSFP_A:
case I40E_DEV_ID_QSFP_B:
case I40E_DEV_ID_QSFP_C:
@@ -130,7 +131,11 @@ void i40evf_debug_aq(struct i40e_hw *hw, enum i40e_debug_mask mask, void *desc,
**/
bool i40evf_check_asq_alive(struct i40e_hw *hw)
{
- return !!(rd32(hw, hw->aq.asq.len) & I40E_PF_ATQLEN_ATQENABLE_MASK);
+ if (hw->aq.asq.len)
+ return !!(rd32(hw, hw->aq.asq.len) &
+ I40E_PF_ATQLEN_ATQENABLE_MASK);
+ else
+ return false;
}
/**
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_hmc.h b/drivers/net/ethernet/intel/i40evf/i40e_hmc.h
index cb97b3eed440..a2ad9a4e399d 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_hmc.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_hmc.h
@@ -1,7 +1,7 @@
/*******************************************************************************
*
* Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
- * Copyright(c) 2013 Intel Corporation.
+ * Copyright(c) 2013 - 2014 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -12,6 +12,9 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
+ * You should have received a copy of the GNU General Public License along
+ * with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
*
@@ -160,11 +163,6 @@ struct i40e_hmc_info {
(((sd_idx) << I40E_PFHMC_PDINV_PMSDIDX_SHIFT) | \
((pd_idx) << I40E_PFHMC_PDINV_PMPDIDX_SHIFT)))
-#define I40E_INVALIDATE_VF_HMC_PD(hw, sd_idx, pd_idx, hmc_fn_id) \
- wr32((hw), I40E_GLHMC_VFPDINV((hmc_fn_id) - I40E_FIRST_VF_FPM_ID), \
- (((sd_idx) << I40E_PFHMC_PDINV_PMSDIDX_SHIFT) | \
- ((pd_idx) << I40E_PFHMC_PDINV_PMPDIDX_SHIFT)))
-
/**
* I40E_FIND_SD_INDEX_LIMIT - finds segment descriptor index limit
* @hmc_info: pointer to the HMC configuration information structure
@@ -223,7 +221,7 @@ i40e_status i40e_add_pd_table_entry(struct i40e_hw *hw,
u32 pd_index);
i40e_status i40e_remove_pd_bp(struct i40e_hw *hw,
struct i40e_hmc_info *hmc_info,
- u32 idx, bool is_pf);
+ u32 idx);
i40e_status i40e_prep_remove_sd_bp(struct i40e_hmc_info *hmc_info,
u32 idx);
i40e_status i40e_remove_sd_bp_new(struct i40e_hw *hw,
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_lan_hmc.h b/drivers/net/ethernet/intel/i40evf/i40e_lan_hmc.h
index 17e42ca26d0b..d6f762241537 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_lan_hmc.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_lan_hmc.h
@@ -1,7 +1,7 @@
/*******************************************************************************
*
* Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
- * Copyright(c) 2013 Intel Corporation.
+ * Copyright(c) 2013 - 2014 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -12,6 +12,9 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
+ * You should have received a copy of the GNU General Public License along
+ * with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
*
@@ -53,6 +56,7 @@ struct i40e_hmc_obj_rxq {
u8 tphdata_ena;
u8 tphhead_ena;
u8 lrxqthresh;
+ u8 prefena; /* NOTE: normally must be set to 1 at init */
};
/* Tx queue context data */
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_osdep.h b/drivers/net/ethernet/intel/i40evf/i40e_osdep.h
index 622f373b745d..21a91b14bf81 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_osdep.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_osdep.h
@@ -1,7 +1,7 @@
/*******************************************************************************
*
* Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
- * Copyright(c) 2013 Intel Corporation.
+ * Copyright(c) 2013 - 2014 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -12,6 +12,9 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
+ * You should have received a copy of the GNU General Public License along
+ * with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
*
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_prototype.h b/drivers/net/ethernet/intel/i40evf/i40e_prototype.h
index 97ab8c2b76f8..849edcc2e398 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_prototype.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_prototype.h
@@ -1,7 +1,7 @@
/*******************************************************************************
*
* Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
- * Copyright(c) 2013 Intel Corporation.
+ * Copyright(c) 2013 - 2014 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -12,6 +12,9 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
+ * You should have received a copy of the GNU General Public License along
+ * with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
*
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_register.h b/drivers/net/ethernet/intel/i40evf/i40e_register.h
index 30af953cf106..369839655818 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_register.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_register.h
@@ -1,7 +1,7 @@
/*******************************************************************************
*
* Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
- * Copyright(c) 2013 Intel Corporation.
+ * Copyright(c) 2013 - 2014 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -12,6 +12,9 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
+ * You should have received a copy of the GNU General Public License along
+ * with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
*
@@ -1337,8 +1340,6 @@
#define I40E_PFINT_ICR0_GPIO_MASK (0x1 << I40E_PFINT_ICR0_GPIO_SHIFT)
#define I40E_PFINT_ICR0_TIMESYNC_SHIFT 23
#define I40E_PFINT_ICR0_TIMESYNC_MASK (0x1 << I40E_PFINT_ICR0_TIMESYNC_SHIFT)
-#define I40E_PFINT_ICR0_STORM_DETECT_SHIFT 24
-#define I40E_PFINT_ICR0_STORM_DETECT_MASK (0x1 << I40E_PFINT_ICR0_STORM_DETECT_SHIFT)
#define I40E_PFINT_ICR0_LINK_STAT_CHANGE_SHIFT 25
#define I40E_PFINT_ICR0_LINK_STAT_CHANGE_MASK (0x1 << I40E_PFINT_ICR0_LINK_STAT_CHANGE_SHIFT)
#define I40E_PFINT_ICR0_HMC_ERR_SHIFT 26
@@ -1364,8 +1365,6 @@
#define I40E_PFINT_ICR0_ENA_GPIO_MASK (0x1 << I40E_PFINT_ICR0_ENA_GPIO_SHIFT)
#define I40E_PFINT_ICR0_ENA_TIMESYNC_SHIFT 23
#define I40E_PFINT_ICR0_ENA_TIMESYNC_MASK (0x1 << I40E_PFINT_ICR0_ENA_TIMESYNC_SHIFT)
-#define I40E_PFINT_ICR0_ENA_STORM_DETECT_SHIFT 24
-#define I40E_PFINT_ICR0_ENA_STORM_DETECT_MASK (0x1 << I40E_PFINT_ICR0_ENA_STORM_DETECT_SHIFT)
#define I40E_PFINT_ICR0_ENA_LINK_STAT_CHANGE_SHIFT 25
#define I40E_PFINT_ICR0_ENA_LINK_STAT_CHANGE_MASK (0x1 << I40E_PFINT_ICR0_ENA_LINK_STAT_CHANGE_SHIFT)
#define I40E_PFINT_ICR0_ENA_HMC_ERR_SHIFT 26
@@ -1586,6 +1585,14 @@
#define I40E_GLLAN_TSOMSK_M 0x000442DC
#define I40E_GLLAN_TSOMSK_M_TCPMSKM_SHIFT 0
#define I40E_GLLAN_TSOMSK_M_TCPMSKM_MASK (0xFFF << I40E_GLLAN_TSOMSK_M_TCPMSKM_SHIFT)
+#define I40E_GLLAN_TXPRE_QDIS(_i) (0x000E6500 + ((_i) * 4)) /* i=0..11 */
+#define I40E_GLLAN_TXPRE_QDIS_QINDX_SHIFT 0
+#define I40E_GLLAN_TXPRE_QDIS_QINDX_MASK (0x7FF << I40E_GLLAN_TXPRE_QDIS_QINDX_SHIFT)
+#define I40E_GLLAN_TXPRE_QDIS_SET_QDIS_SHIFT 30
+#define I40E_GLLAN_TXPRE_QDIS_SET_QDIS_MASK (0x1 << I40E_GLLAN_TXPRE_QDIS_SET_QDIS_SHIFT)
+#define I40E_GLLAN_TXPRE_QDIS_CLEAR_QDIS_SHIFT 31
+#define I40E_GLLAN_TXPRE_QDIS_CLEAR_QDIS_MASK (0x1 << I40E_GLLAN_TXPRE_QDIS_CLEAR_QDIS_SHIFT)
+
#define I40E_PFLAN_QALLOC 0x001C0400
#define I40E_PFLAN_QALLOC_FIRSTQ_SHIFT 0
#define I40E_PFLAN_QALLOC_FIRSTQ_MASK (0x7FF << I40E_PFLAN_QALLOC_FIRSTQ_SHIFT)
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_status.h b/drivers/net/ethernet/intel/i40evf/i40e_status.h
index 7c08cc2e339b..7fa7a41915c1 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_status.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_status.h
@@ -1,7 +1,7 @@
/*******************************************************************************
*
* Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
- * Copyright(c) 2013 Intel Corporation.
+ * Copyright(c) 2013 - 2014 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -12,6 +12,9 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
+ * You should have received a copy of the GNU General Public License along
+ * with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
*
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
index b9f50f40abe1..48ebb6cd69f2 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
@@ -12,6 +12,9 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
+ * You should have received a copy of the GNU General Public License along
+ * with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
*
@@ -725,10 +728,12 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
u32 rx_error,
u16 rx_ptype)
{
+ struct i40e_rx_ptype_decoded decoded = decode_rx_desc_ptype(rx_ptype);
+ bool ipv4 = false, ipv6 = false;
bool ipv4_tunnel, ipv6_tunnel;
__wsum rx_udp_csum;
- __sum16 csum;
struct iphdr *iph;
+ __sum16 csum;
ipv4_tunnel = (rx_ptype > I40E_RX_PTYPE_GRENAT4_MAC_PAY3) &&
(rx_ptype < I40E_RX_PTYPE_GRENAT4_MACVLAN_IPV6_ICMP_PAY4);
@@ -739,29 +744,57 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
skb->ip_summed = CHECKSUM_NONE;
/* Rx csum enabled and ip headers found? */
- if (!(vsi->netdev->features & NETIF_F_RXCSUM &&
- rx_status & (1 << I40E_RX_DESC_STATUS_L3L4P_SHIFT)))
+ if (!(vsi->netdev->features & NETIF_F_RXCSUM))
return;
+ /* did the hardware decode the packet and checksum? */
+ if (!(rx_status & (1 << I40E_RX_DESC_STATUS_L3L4P_SHIFT)))
+ return;
+
+ /* both known and outer_ip must be set for the below code to work */
+ if (!(decoded.known && decoded.outer_ip))
+ return;
+
+ if (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP &&
+ decoded.outer_ip_ver == I40E_RX_PTYPE_OUTER_IPV4)
+ ipv4 = true;
+ else if (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP &&
+ decoded.outer_ip_ver == I40E_RX_PTYPE_OUTER_IPV6)
+ ipv6 = true;
+
+ if (ipv4 &&
+ (rx_error & ((1 << I40E_RX_DESC_ERROR_IPE_SHIFT) |
+ (1 << I40E_RX_DESC_ERROR_EIPE_SHIFT))))
+ goto checksum_fail;
+
/* likely incorrect csum if alternate IP extension headers found */
- if (rx_status & (1 << I40E_RX_DESC_STATUS_IPV6EXADD_SHIFT))
+ if (ipv6 &&
+ decoded.inner_prot == I40E_RX_PTYPE_INNER_PROT_TCP &&
+ rx_error & (1 << I40E_RX_DESC_ERROR_L4E_SHIFT) &&
+ rx_status & (1 << I40E_RX_DESC_STATUS_IPV6EXADD_SHIFT))
+ /* don't increment checksum err here, non-fatal err */
return;
- /* IP or L4 or outmost IP checksum error */
- if (rx_error & ((1 << I40E_RX_DESC_ERROR_IPE_SHIFT) |
- (1 << I40E_RX_DESC_ERROR_L4E_SHIFT) |
- (1 << I40E_RX_DESC_ERROR_EIPE_SHIFT))) {
- vsi->back->hw_csum_rx_error++;
+ /* there was some L4 error, count error and punt packet to the stack */
+ if (rx_error & (1 << I40E_RX_DESC_ERROR_L4E_SHIFT))
+ goto checksum_fail;
+
+ /* handle packets that were not able to be checksummed due
+ * to arrival speed, in this case the stack can compute
+ * the csum.
+ */
+ if (rx_error & (1 << I40E_RX_DESC_ERROR_PPRS_SHIFT))
return;
- }
+ /* If VXLAN traffic has an outer UDPv4 checksum we need to check
+ * it in the driver, hardware does not do it for us.
+ * Since L3L4P bit was set we assume a valid IHL value (>=5)
+ * so the total length of IPv4 header is IHL*4 bytes
+ * The UDP_0 bit *may* bet set if the *inner* header is UDP
+ */
if (ipv4_tunnel &&
+ (decoded.inner_prot != I40E_RX_PTYPE_INNER_PROT_UDP) &&
!(rx_status & (1 << I40E_RX_DESC_STATUS_UDP_0_SHIFT))) {
- /* If VXLAN traffic has an outer UDPv4 checksum we need to check
- * it in the driver, hardware does not do it for us.
- * Since L3L4P bit was set we assume a valid IHL value (>=5)
- * so the total length of IPv4 header is IHL*4 bytes
- */
skb->transport_header = skb->mac_header +
sizeof(struct ethhdr) +
(ip_hdr(skb)->ihl * 4);
@@ -778,13 +811,16 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
(skb->len - skb_transport_offset(skb)),
IPPROTO_UDP, rx_udp_csum);
- if (udp_hdr(skb)->check != csum) {
- vsi->back->hw_csum_rx_error++;
- return;
- }
+ if (udp_hdr(skb)->check != csum)
+ goto checksum_fail;
}
skb->ip_summed = CHECKSUM_UNNECESSARY;
+
+ return;
+
+checksum_fail:
+ vsi->back->hw_csum_rx_error++;
}
/**
@@ -953,6 +989,9 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
/* ERR_MASK will only have valid bits if EOP set */
if (unlikely(rx_error & (1 << I40E_RX_DESC_ERROR_RXE_SHIFT))) {
dev_kfree_skb_any(skb);
+ /* TODO: shouldn't we increment a counter indicating the
+ * drop?
+ */
goto next_desc;
}
@@ -1508,9 +1547,7 @@ static int i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
static int i40e_xmit_descriptor_count(struct sk_buff *skb,
struct i40e_ring *tx_ring)
{
-#if PAGE_SIZE > I40E_MAX_DATA_PER_TXD
unsigned int f;
-#endif
int count = 0;
/* need: 1 descriptor per page * PAGE_SIZE/I40E_MAX_DATA_PER_TXD,
@@ -1519,12 +1556,9 @@ static int i40e_xmit_descriptor_count(struct sk_buff *skb,
* + 1 desc for context descriptor,
* otherwise try next time
*/
-#if PAGE_SIZE > I40E_MAX_DATA_PER_TXD
for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size);
-#else
- count += skb_shinfo(skb)->nr_frags;
-#endif
+
count += TXD_USE_COUNT(skb_headlen(skb));
if (i40e_maybe_stop_tx(tx_ring, count + 4 + 1)) {
tx_ring->tx_stats.tx_busy++;
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.h b/drivers/net/ethernet/intel/i40evf/i40e_txrx.h
index 10bf49e18d7f..30d248bc5d19 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.h
@@ -1,7 +1,7 @@
/*******************************************************************************
*
* Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
- * Copyright(c) 2013 Intel Corporation.
+ * Copyright(c) 2013 - 2014 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -12,6 +12,9 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
+ * You should have received a copy of the GNU General Public License along
+ * with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
*
@@ -24,7 +27,7 @@
#ifndef _I40E_TXRX_H_
#define _I40E_TXRX_H_
-/* Interrupt Throttling and Rate Limiting (storm control) Goodies */
+/* Interrupt Throttling and Rate Limiting Goodies */
#define I40E_MAX_ITR 0x0FF0 /* reg uses 2 usec resolution */
#define I40E_MIN_ITR 0x0004 /* reg uses 2 usec resolution */
@@ -66,16 +69,11 @@ enum i40e_dyn_idx_t {
/* Supported RSS offloads */
#define I40E_DEFAULT_RSS_HENA ( \
- ((u64)1 << I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP) | \
- ((u64)1 << I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP) | \
((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP) | \
((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_SCTP) | \
- ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN) | \
((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP) | \
((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER) | \
((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4) | \
- ((u64)1 << I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP) | \
- ((u64)1 << I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP) | \
((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP) | \
((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN) | \
((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP) | \
@@ -119,11 +117,11 @@ enum i40e_dyn_idx_t {
#define i40e_rx_desc i40e_32byte_rx_desc
#define I40E_MIN_TX_LEN 17
-#define I40E_MAX_DATA_PER_TXD 16383 /* aka 16kB - 1 */
+#define I40E_MAX_DATA_PER_TXD 8192
/* Tx Descriptors needed, worst case */
#define TXD_USE_COUNT(S) DIV_ROUND_UP((S), I40E_MAX_DATA_PER_TXD)
-#define DESC_NEEDED ((MAX_SKB_FRAGS * TXD_USE_COUNT(PAGE_SIZE)) + 4)
+#define DESC_NEEDED (MAX_SKB_FRAGS + 4)
#define I40E_TX_FLAGS_CSUM (u32)(1)
#define I40E_TX_FLAGS_HW_VLAN (u32)(1 << 1)
@@ -180,7 +178,6 @@ enum i40e_ring_state_t {
__I40E_TX_DETECT_HANG,
__I40E_HANG_CHECK_ARMED,
__I40E_RX_PS_ENABLED,
- __I40E_RX_LRO_ENABLED,
__I40E_RX_16BYTE_DESC_ENABLED,
};
@@ -196,12 +193,6 @@ enum i40e_ring_state_t {
set_bit(__I40E_TX_DETECT_HANG, &(ring)->state)
#define clear_check_for_tx_hang(ring) \
clear_bit(__I40E_TX_DETECT_HANG, &(ring)->state)
-#define ring_is_lro_enabled(ring) \
- test_bit(__I40E_RX_LRO_ENABLED, &(ring)->state)
-#define set_ring_lro_enabled(ring) \
- set_bit(__I40E_RX_LRO_ENABLED, &(ring)->state)
-#define clear_ring_lro_enabled(ring) \
- clear_bit(__I40E_RX_LRO_ENABLED, &(ring)->state)
#define ring_is_16byte_desc_enabled(ring) \
test_bit(__I40E_RX_16BYTE_DESC_ENABLED, &(ring)->state)
#define set_ring_16byte_desc_enabled(ring) \
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_type.h b/drivers/net/ethernet/intel/i40evf/i40e_type.h
index 4673b3381edd..d3cf5a69de54 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_type.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_type.h
@@ -12,6 +12,9 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
+ * You should have received a copy of the GNU General Public License along
+ * with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
*
@@ -32,13 +35,11 @@
#include "i40e_lan_hmc.h"
/* Device IDs */
-#define I40E_DEV_ID_SFP_XL710 0x1572
-#define I40E_DEV_ID_SFP_X710 0x1573
+#define I40E_DEV_ID_SFP_XL710 0x1572
#define I40E_DEV_ID_QEMU 0x1574
#define I40E_DEV_ID_KX_A 0x157F
#define I40E_DEV_ID_KX_B 0x1580
#define I40E_DEV_ID_KX_C 0x1581
-#define I40E_DEV_ID_KX_D 0x1582
#define I40E_DEV_ID_QSFP_A 0x1583
#define I40E_DEV_ID_QSFP_B 0x1584
#define I40E_DEV_ID_QSFP_C 0x1585
@@ -57,8 +58,8 @@
/* Max default timeout in ms, */
#define I40E_MAX_NVM_TIMEOUT 18000
-/* Switch from mc to the 2usec global time (this is the GTIME resolution) */
-#define I40E_MS_TO_GTIME(time) (((time) * 1000) / 2)
+/* Switch from ms to the 1usec global time (this is the GTIME resolution) */
+#define I40E_MS_TO_GTIME(time) ((time) * 1000)
/* forward declaration */
struct i40e_hw;
@@ -101,15 +102,6 @@ enum i40e_debug_mask {
I40E_DEBUG_ALL = 0xFFFFFFFF
};
-/* PCI Bus Info */
-#define I40E_PCI_LINK_WIDTH_1 0x10
-#define I40E_PCI_LINK_WIDTH_2 0x20
-#define I40E_PCI_LINK_WIDTH_4 0x40
-#define I40E_PCI_LINK_WIDTH_8 0x80
-#define I40E_PCI_LINK_SPEED_2500 0x1
-#define I40E_PCI_LINK_SPEED_5000 0x2
-#define I40E_PCI_LINK_SPEED_8000 0x3
-
/* These are structs for managing the hardware information and the operations.
* The structures of function pointers are filled out at init time when we
* know for sure exactly which hardware we're working with. This gives us the
@@ -173,6 +165,9 @@ struct i40e_link_status {
u8 loopback;
/* is Link Status Event notification to SW enabled */
bool lse_enable;
+ u16 max_frame_size;
+ bool crc_enable;
+ u8 pacing;
};
struct i40e_phy_info {
@@ -415,6 +410,7 @@ struct i40e_driver_version {
u8 minor_version;
u8 build_version;
u8 subbuild_version;
+ u8 driver_string[32];
};
/* RX Descriptors */
@@ -494,9 +490,6 @@ union i40e_32byte_rx_desc {
} wb; /* writeback */
};
-#define I40E_RXD_QW1_STATUS_SHIFT 0
-#define I40E_RXD_QW1_STATUS_MASK (0x7FFFUL << I40E_RXD_QW1_STATUS_SHIFT)
-
enum i40e_rx_desc_status_bits {
/* Note: These are predefined bit offsets */
I40E_RX_DESC_STATUS_DD_SHIFT = 0,
@@ -513,9 +506,14 @@ enum i40e_rx_desc_status_bits {
I40E_RX_DESC_STATUS_LPBK_SHIFT = 14,
I40E_RX_DESC_STATUS_IPV6EXADD_SHIFT = 15,
I40E_RX_DESC_STATUS_RESERVED_SHIFT = 16, /* 2 BITS */
- I40E_RX_DESC_STATUS_UDP_0_SHIFT = 18
+ I40E_RX_DESC_STATUS_UDP_0_SHIFT = 18,
+ I40E_RX_DESC_STATUS_LAST /* this entry must be last!!! */
};
+#define I40E_RXD_QW1_STATUS_SHIFT 0
+#define I40E_RXD_QW1_STATUS_MASK (((1 << I40E_RX_DESC_STATUS_LAST) - 1) \
+ << I40E_RXD_QW1_STATUS_SHIFT)
+
#define I40E_RXD_QW1_STATUS_TSYNINDX_SHIFT I40E_RX_DESC_STATUS_TSYNINDX_SHIFT
#define I40E_RXD_QW1_STATUS_TSYNINDX_MASK (0x3UL << \
I40E_RXD_QW1_STATUS_TSYNINDX_SHIFT)
@@ -543,7 +541,8 @@ enum i40e_rx_desc_error_bits {
I40E_RX_DESC_ERROR_IPE_SHIFT = 3,
I40E_RX_DESC_ERROR_L4E_SHIFT = 4,
I40E_RX_DESC_ERROR_EIPE_SHIFT = 5,
- I40E_RX_DESC_ERROR_OVERSIZE_SHIFT = 6
+ I40E_RX_DESC_ERROR_OVERSIZE_SHIFT = 6,
+ I40E_RX_DESC_ERROR_PPRS_SHIFT = 7
};
enum i40e_rx_desc_error_l3l4e_fcoe_masks {
@@ -664,7 +663,6 @@ enum i40e_rx_desc_ext_status_bits {
I40E_RX_DESC_EXT_STATUS_L2TAG3P_SHIFT = 1,
I40E_RX_DESC_EXT_STATUS_FLEXBL_SHIFT = 2, /* 2 BITS */
I40E_RX_DESC_EXT_STATUS_FLEXBH_SHIFT = 4, /* 2 BITS */
- I40E_RX_DESC_EXT_STATUS_FTYPE_SHIFT = 6, /* 3 BITS */
I40E_RX_DESC_EXT_STATUS_FDLONGB_SHIFT = 9,
I40E_RX_DESC_EXT_STATUS_FCOELONGB_SHIFT = 10,
I40E_RX_DESC_EXT_STATUS_PELONGB_SHIFT = 11,
@@ -868,18 +866,14 @@ struct i40e_filter_program_desc {
/* Packet Classifier Types for filters */
enum i40e_filter_pctype {
- /* Note: Values 0-28 are reserved for future use */
- I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP = 29,
- I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP = 30,
+ /* Note: Values 0-30 are reserved for future use */
I40E_FILTER_PCTYPE_NONF_IPV4_UDP = 31,
- I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN = 32,
+ /* Note: Value 32 is reserved for future use */
I40E_FILTER_PCTYPE_NONF_IPV4_TCP = 33,
I40E_FILTER_PCTYPE_NONF_IPV4_SCTP = 34,
I40E_FILTER_PCTYPE_NONF_IPV4_OTHER = 35,
I40E_FILTER_PCTYPE_FRAG_IPV4 = 36,
- /* Note: Values 37-38 are reserved for future use */
- I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP = 39,
- I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP = 40,
+ /* Note: Values 37-40 are reserved for future use */
I40E_FILTER_PCTYPE_NONF_IPV6_UDP = 41,
I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN = 42,
I40E_FILTER_PCTYPE_NONF_IPV6_TCP = 43,
@@ -961,6 +955,16 @@ struct i40e_vsi_context {
struct i40e_aqc_vsi_properties_data info;
};
+struct i40e_veb_context {
+ u16 seid;
+ u16 uplink_seid;
+ u16 veb_number;
+ u16 vebs_allocated;
+ u16 vebs_unallocated;
+ u16 flags;
+ struct i40e_aqc_get_veb_parameters_completion info;
+};
+
/* Statistics collected by each port, VSI, VEB, and S-channel */
struct i40e_eth_stats {
u64 rx_bytes; /* gorc */
@@ -968,8 +972,6 @@ struct i40e_eth_stats {
u64 rx_multicast; /* mprc */
u64 rx_broadcast; /* bprc */
u64 rx_discards; /* rdpc */
- u64 rx_errors; /* repc */
- u64 rx_missed; /* rmpc */
u64 rx_unknown_protocol; /* rupp */
u64 tx_bytes; /* gotc */
u64 tx_unicast; /* uptc */
@@ -1021,9 +1023,12 @@ struct i40e_hw_port_stats {
u64 tx_size_big; /* ptc9522 */
u64 mac_short_packet_dropped; /* mspdc */
u64 checksum_error; /* xec */
+ /* flow director stats */
+ u64 fd_atr_match;
+ u64 fd_sb_match;
/* EEE LPI */
- bool tx_lpi_status;
- bool rx_lpi_status;
+ u32 tx_lpi_status;
+ u32 rx_lpi_status;
u64 tx_lpi_count; /* etlpic */
u64 rx_lpi_count; /* erlpic */
};
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_virtchnl.h b/drivers/net/ethernet/intel/i40evf/i40e_virtchnl.h
index ccf45d04b7ef..cd18d5689006 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_virtchnl.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_virtchnl.h
@@ -1,7 +1,7 @@
/*******************************************************************************
*
* Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
- * Copyright(c) 2013 Intel Corporation.
+ * Copyright(c) 2013 - 2014 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -12,6 +12,9 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
+ * You should have received a copy of the GNU General Public License along
+ * with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
*
@@ -338,10 +341,6 @@ struct i40e_virtchnl_pf_event {
int severity;
};
-/* The following are TBD, not necessary for LAN functionality.
- * I40E_VIRTCHNL_OP_FCOE
- */
-
/* VF reset states - these are written into the RSTAT register:
* I40E_VFGEN_RSTAT1 on the PF
* I40E_VFGEN_RSTAT on the VF
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf.h b/drivers/net/ethernet/intel/i40evf/i40evf.h
index 807807d62387..30ef519d4b91 100644
--- a/drivers/net/ethernet/intel/i40evf/i40evf.h
+++ b/drivers/net/ethernet/intel/i40evf/i40evf.h
@@ -12,6 +12,9 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
+ * You should have received a copy of the GNU General Public License along
+ * with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
*
@@ -77,7 +80,7 @@ struct i40e_vsi {
#define I40EVF_MIN_TXD 64
#define I40EVF_MAX_RXD 4096
#define I40EVF_MIN_RXD 64
-#define I40EVF_REQ_DESCRIPTOR_MULTIPLE 8
+#define I40EVF_REQ_DESCRIPTOR_MULTIPLE 32
/* Supported Rx Buffer Sizes */
#define I40EVF_RXBUFFER_64 64 /* Used for packet split */
@@ -193,10 +196,12 @@ struct i40evf_adapter {
struct i40e_ring *tx_rings[I40E_MAX_VSI_QP];
u32 tx_timeout_count;
struct list_head mac_filter_list;
+ u32 tx_desc_count;
/* RX */
struct i40e_ring *rx_rings[I40E_MAX_VSI_QP];
u64 hw_csum_rx_error;
+ u32 rx_desc_count;
int num_msix_vectors;
struct msix_entry *msix_entries;
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c b/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c
index 8b0db1ce179c..60407a9df0c1 100644
--- a/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c
+++ b/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c
@@ -12,6 +12,9 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
+ * You should have received a copy of the GNU General Public License along
+ * with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
*
@@ -44,8 +47,6 @@ static const struct i40evf_stats i40evf_gstrings_stats[] = {
I40EVF_STAT("rx_multicast", current_stats.rx_multicast),
I40EVF_STAT("rx_broadcast", current_stats.rx_broadcast),
I40EVF_STAT("rx_discards", current_stats.rx_discards),
- I40EVF_STAT("rx_errors", current_stats.rx_errors),
- I40EVF_STAT("rx_missed", current_stats.rx_missed),
I40EVF_STAT("rx_unknown_protocol", current_stats.rx_unknown_protocol),
I40EVF_STAT("tx_bytes", current_stats.tx_bytes),
I40EVF_STAT("tx_unicast", current_stats.tx_unicast),
@@ -56,10 +57,12 @@ static const struct i40evf_stats i40evf_gstrings_stats[] = {
};
#define I40EVF_GLOBAL_STATS_LEN ARRAY_SIZE(i40evf_gstrings_stats)
-#define I40EVF_QUEUE_STATS_LEN \
+#define I40EVF_QUEUE_STATS_LEN(_dev) \
(((struct i40evf_adapter *) \
- netdev_priv(netdev))->vsi_res->num_queue_pairs * 4)
-#define I40EVF_STATS_LEN (I40EVF_GLOBAL_STATS_LEN + I40EVF_QUEUE_STATS_LEN)
+ netdev_priv(_dev))->vsi_res->num_queue_pairs \
+ * 2 * (sizeof(struct i40e_queue_stats) / sizeof(u64)))
+#define I40EVF_STATS_LEN(_dev) \
+ (I40EVF_GLOBAL_STATS_LEN + I40EVF_QUEUE_STATS_LEN(_dev))
/**
* i40evf_get_settings - Get Link Speed and Duplex settings
@@ -75,7 +78,7 @@ static int i40evf_get_settings(struct net_device *netdev,
/* In the future the VF will be able to query the PF for
* some information - for now use a dummy value
*/
- ecmd->supported = SUPPORTED_10000baseT_Full;
+ ecmd->supported = 0;
ecmd->autoneg = AUTONEG_DISABLE;
ecmd->transceiver = XCVR_DUMMY1;
ecmd->port = PORT_NONE;
@@ -94,9 +97,9 @@ static int i40evf_get_settings(struct net_device *netdev,
static int i40evf_get_sset_count(struct net_device *netdev, int sset)
{
if (sset == ETH_SS_STATS)
- return I40EVF_STATS_LEN;
+ return I40EVF_STATS_LEN(netdev);
else
- return -ENOTSUPP;
+ return -EINVAL;
}
/**
@@ -219,13 +222,11 @@ static void i40evf_get_ringparam(struct net_device *netdev,
struct ethtool_ringparam *ring)
{
struct i40evf_adapter *adapter = netdev_priv(netdev);
- struct i40e_ring *tx_ring = adapter->tx_rings[0];
- struct i40e_ring *rx_ring = adapter->rx_rings[0];
ring->rx_max_pending = I40EVF_MAX_RXD;
ring->tx_max_pending = I40EVF_MAX_TXD;
- ring->rx_pending = rx_ring->count;
- ring->tx_pending = tx_ring->count;
+ ring->rx_pending = adapter->rx_desc_count;
+ ring->tx_pending = adapter->tx_desc_count;
}
/**
@@ -241,7 +242,6 @@ static int i40evf_set_ringparam(struct net_device *netdev,
{
struct i40evf_adapter *adapter = netdev_priv(netdev);
u32 new_rx_count, new_tx_count;
- int i;
if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
return -EINVAL;
@@ -257,17 +257,16 @@ static int i40evf_set_ringparam(struct net_device *netdev,
new_rx_count = ALIGN(new_rx_count, I40EVF_REQ_DESCRIPTOR_MULTIPLE);
/* if nothing to do return success */
- if ((new_tx_count == adapter->tx_rings[0]->count) &&
- (new_rx_count == adapter->rx_rings[0]->count))
+ if ((new_tx_count == adapter->tx_desc_count) &&
+ (new_rx_count == adapter->rx_desc_count))
return 0;
- for (i = 0; i < adapter->vsi_res->num_queue_pairs; i++) {
- adapter->tx_rings[0]->count = new_tx_count;
- adapter->rx_rings[0]->count = new_rx_count;
- }
+ adapter->tx_desc_count = new_tx_count;
+ adapter->rx_desc_count = new_rx_count;
if (netif_running(netdev))
i40evf_reinit_locked(adapter);
+
return 0;
}
@@ -290,14 +289,13 @@ static int i40evf_get_coalesce(struct net_device *netdev,
ec->rx_max_coalesced_frames = vsi->work_limit;
if (ITR_IS_DYNAMIC(vsi->rx_itr_setting))
- ec->rx_coalesce_usecs = 1;
- else
- ec->rx_coalesce_usecs = vsi->rx_itr_setting;
+ ec->use_adaptive_rx_coalesce = 1;
if (ITR_IS_DYNAMIC(vsi->tx_itr_setting))
- ec->tx_coalesce_usecs = 1;
- else
- ec->tx_coalesce_usecs = vsi->tx_itr_setting;
+ ec->use_adaptive_tx_coalesce = 1;
+
+ ec->rx_coalesce_usecs = vsi->rx_itr_setting & ~I40E_ITR_DYNAMIC;
+ ec->tx_coalesce_usecs = vsi->tx_itr_setting & ~I40E_ITR_DYNAMIC;
return 0;
}
@@ -318,54 +316,361 @@ static int i40evf_set_coalesce(struct net_device *netdev,
struct i40e_q_vector *q_vector;
int i;
- if (ec->tx_max_coalesced_frames || ec->rx_max_coalesced_frames)
- vsi->work_limit = ec->tx_max_coalesced_frames;
+ if (ec->tx_max_coalesced_frames_irq || ec->rx_max_coalesced_frames_irq)
+ vsi->work_limit = ec->tx_max_coalesced_frames_irq;
+
+ if ((ec->rx_coalesce_usecs >= (I40E_MIN_ITR << 1)) &&
+ (ec->rx_coalesce_usecs <= (I40E_MAX_ITR << 1)))
+ vsi->rx_itr_setting = ec->rx_coalesce_usecs;
+
+ else
+ return -EINVAL;
+
+ if ((ec->tx_coalesce_usecs >= (I40E_MIN_ITR << 1)) &&
+ (ec->tx_coalesce_usecs <= (I40E_MAX_ITR << 1)))
+ vsi->tx_itr_setting = ec->tx_coalesce_usecs;
+ else if (ec->use_adaptive_tx_coalesce)
+ vsi->tx_itr_setting = (I40E_ITR_DYNAMIC |
+ ITR_REG_TO_USEC(I40E_ITR_RX_DEF));
+ else
+ return -EINVAL;
+
+ if (ec->use_adaptive_rx_coalesce)
+ vsi->rx_itr_setting |= I40E_ITR_DYNAMIC;
+ else
+ vsi->rx_itr_setting &= ~I40E_ITR_DYNAMIC;
+
+ if (ec->use_adaptive_tx_coalesce)
+ vsi->tx_itr_setting |= I40E_ITR_DYNAMIC;
+ else
+ vsi->tx_itr_setting &= ~I40E_ITR_DYNAMIC;
- switch (ec->rx_coalesce_usecs) {
- case 0:
- vsi->rx_itr_setting = 0;
+ for (i = 0; i < adapter->num_msix_vectors - NONQ_VECS; i++) {
+ q_vector = adapter->q_vector[i];
+ q_vector->rx.itr = ITR_TO_REG(vsi->rx_itr_setting);
+ wr32(hw, I40E_VFINT_ITRN1(0, i), q_vector->rx.itr);
+ q_vector->tx.itr = ITR_TO_REG(vsi->tx_itr_setting);
+ wr32(hw, I40E_VFINT_ITRN1(1, i), q_vector->tx.itr);
+ i40e_flush(hw);
+ }
+
+ return 0;
+}
+
+/**
+ * i40e_get_rss_hash_opts - Get RSS hash Input Set for each flow type
+ * @adapter: board private structure
+ * @cmd: ethtool rxnfc command
+ *
+ * Returns Success if the flow is supported, else Invalid Input.
+ **/
+static int i40evf_get_rss_hash_opts(struct i40evf_adapter *adapter,
+ struct ethtool_rxnfc *cmd)
+{
+ struct i40e_hw *hw = &adapter->hw;
+ u64 hena = (u64)rd32(hw, I40E_VFQF_HENA(0)) |
+ ((u64)rd32(hw, I40E_VFQF_HENA(1)) << 32);
+
+ /* We always hash on IP src and dest addresses */
+ cmd->data = RXH_IP_SRC | RXH_IP_DST;
+
+ switch (cmd->flow_type) {
+ case TCP_V4_FLOW:
+ if (hena & ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP))
+ cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
break;
- case 1:
- vsi->rx_itr_setting = (I40E_ITR_DYNAMIC
- | ITR_REG_TO_USEC(I40E_ITR_RX_DEF));
+ case UDP_V4_FLOW:
+ if (hena & ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP))
+ cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
break;
- default:
- if ((ec->rx_coalesce_usecs < (I40E_MIN_ITR << 1)) ||
- (ec->rx_coalesce_usecs > (I40E_MAX_ITR << 1)))
- return -EINVAL;
- vsi->rx_itr_setting = ec->rx_coalesce_usecs;
+
+ case SCTP_V4_FLOW:
+ case AH_ESP_V4_FLOW:
+ case AH_V4_FLOW:
+ case ESP_V4_FLOW:
+ case IPV4_FLOW:
+ break;
+
+ case TCP_V6_FLOW:
+ if (hena & ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP))
+ cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
break;
+ case UDP_V6_FLOW:
+ if (hena & ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP))
+ cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
+ break;
+
+ case SCTP_V6_FLOW:
+ case AH_ESP_V6_FLOW:
+ case AH_V6_FLOW:
+ case ESP_V6_FLOW:
+ case IPV6_FLOW:
+ break;
+ default:
+ cmd->data = 0;
+ return -EINVAL;
}
- switch (ec->tx_coalesce_usecs) {
- case 0:
- vsi->tx_itr_setting = 0;
+ return 0;
+}
+
+/**
+ * i40evf_get_rxnfc - command to get RX flow classification rules
+ * @netdev: network interface device structure
+ * @cmd: ethtool rxnfc command
+ *
+ * Returns Success if the command is supported.
+ **/
+static int i40evf_get_rxnfc(struct net_device *netdev,
+ struct ethtool_rxnfc *cmd,
+ u32 *rule_locs)
+{
+ struct i40evf_adapter *adapter = netdev_priv(netdev);
+ int ret = -EOPNOTSUPP;
+
+ switch (cmd->cmd) {
+ case ETHTOOL_GRXRINGS:
+ cmd->data = adapter->vsi_res->num_queue_pairs;
+ ret = 0;
break;
- case 1:
- vsi->tx_itr_setting = (I40E_ITR_DYNAMIC
- | ITR_REG_TO_USEC(I40E_ITR_TX_DEF));
+ case ETHTOOL_GRXFH:
+ ret = i40evf_get_rss_hash_opts(adapter, cmd);
break;
default:
- if ((ec->tx_coalesce_usecs < (I40E_MIN_ITR << 1)) ||
- (ec->tx_coalesce_usecs > (I40E_MAX_ITR << 1)))
+ break;
+ }
+
+ return ret;
+}
+
+/**
+ * i40evf_set_rss_hash_opt - Enable/Disable flow types for RSS hash
+ * @adapter: board private structure
+ * @cmd: ethtool rxnfc command
+ *
+ * Returns Success if the flow input set is supported.
+ **/
+static int i40evf_set_rss_hash_opt(struct i40evf_adapter *adapter,
+ struct ethtool_rxnfc *nfc)
+{
+ struct i40e_hw *hw = &adapter->hw;
+
+ u64 hena = (u64)rd32(hw, I40E_VFQF_HENA(0)) |
+ ((u64)rd32(hw, I40E_VFQF_HENA(1)) << 32);
+
+ /* RSS does not support anything other than hashing
+ * to queues on src and dst IPs and ports
+ */
+ if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST |
+ RXH_L4_B_0_1 | RXH_L4_B_2_3))
+ return -EINVAL;
+
+ /* We need at least the IP SRC and DEST fields for hashing */
+ if (!(nfc->data & RXH_IP_SRC) ||
+ !(nfc->data & RXH_IP_DST))
+ return -EINVAL;
+
+ switch (nfc->flow_type) {
+ case TCP_V4_FLOW:
+ switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
+ case 0:
+ hena &= ~((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP);
+ break;
+ case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
+ hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP);
+ break;
+ default:
return -EINVAL;
- vsi->tx_itr_setting = ec->tx_coalesce_usecs;
+ }
+ break;
+ case TCP_V6_FLOW:
+ switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
+ case 0:
+ hena &= ~((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP);
+ break;
+ case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
+ hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP);
+ break;
+ default:
+ return -EINVAL;
+ }
+ break;
+ case UDP_V4_FLOW:
+ switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
+ case 0:
+ hena &= ~(((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP) |
+ ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4));
+ break;
+ case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
+ hena |= (((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP) |
+ ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4));
+ break;
+ default:
+ return -EINVAL;
+ }
break;
+ case UDP_V6_FLOW:
+ switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
+ case 0:
+ hena &= ~(((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP) |
+ ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6));
+ break;
+ case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
+ hena |= (((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP) |
+ ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6));
+ break;
+ default:
+ return -EINVAL;
+ }
+ break;
+ case AH_ESP_V4_FLOW:
+ case AH_V4_FLOW:
+ case ESP_V4_FLOW:
+ case SCTP_V4_FLOW:
+ if ((nfc->data & RXH_L4_B_0_1) ||
+ (nfc->data & RXH_L4_B_2_3))
+ return -EINVAL;
+ hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER);
+ break;
+ case AH_ESP_V6_FLOW:
+ case AH_V6_FLOW:
+ case ESP_V6_FLOW:
+ case SCTP_V6_FLOW:
+ if ((nfc->data & RXH_L4_B_0_1) ||
+ (nfc->data & RXH_L4_B_2_3))
+ return -EINVAL;
+ hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER);
+ break;
+ case IPV4_FLOW:
+ hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER) |
+ ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4);
+ break;
+ case IPV6_FLOW:
+ hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER) |
+ ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6);
+ break;
+ default:
+ return -EINVAL;
}
- for (i = 0; i < adapter->num_msix_vectors - NONQ_VECS; i++) {
- q_vector = adapter->q_vector[i];
- q_vector->rx.itr = ITR_TO_REG(vsi->rx_itr_setting);
- wr32(hw, I40E_VFINT_ITRN1(0, i), q_vector->rx.itr);
- q_vector->tx.itr = ITR_TO_REG(vsi->tx_itr_setting);
- wr32(hw, I40E_VFINT_ITRN1(1, i), q_vector->tx.itr);
- i40e_flush(hw);
+ wr32(hw, I40E_VFQF_HENA(0), (u32)hena);
+ wr32(hw, I40E_VFQF_HENA(1), (u32)(hena >> 32));
+ i40e_flush(hw);
+
+ return 0;
+}
+
+/**
+ * i40evf_set_rxnfc - command to set RX flow classification rules
+ * @netdev: network interface device structure
+ * @cmd: ethtool rxnfc command
+ *
+ * Returns Success if the command is supported.
+ **/
+static int i40evf_set_rxnfc(struct net_device *netdev,
+ struct ethtool_rxnfc *cmd)
+{
+ struct i40evf_adapter *adapter = netdev_priv(netdev);
+ int ret = -EOPNOTSUPP;
+
+ switch (cmd->cmd) {
+ case ETHTOOL_SRXFH:
+ ret = i40evf_set_rss_hash_opt(adapter, cmd);
+ break;
+ default:
+ break;
+ }
+
+ return ret;
+}
+
+/**
+ * i40evf_get_channels: get the number of channels supported by the device
+ * @netdev: network interface device structure
+ * @ch: channel information structure
+ *
+ * For the purposes of our device, we only use combined channels, i.e. a tx/rx
+ * queue pair. Report one extra channel to match our "other" MSI-X vector.
+ **/
+static void i40evf_get_channels(struct net_device *netdev,
+ struct ethtool_channels *ch)
+{
+ struct i40evf_adapter *adapter = netdev_priv(netdev);
+
+ /* Report maximum channels */
+ ch->max_combined = adapter->vsi_res->num_queue_pairs;
+
+ ch->max_other = NONQ_VECS;
+ ch->other_count = NONQ_VECS;
+
+ ch->combined_count = adapter->vsi_res->num_queue_pairs;
+}
+
+/**
+ * i40evf_get_rxfh_indir_size - get the rx flow hash indirection table size
+ * @netdev: network interface device structure
+ *
+ * Returns the table size.
+ **/
+static u32 i40evf_get_rxfh_indir_size(struct net_device *netdev)
+{
+ return (I40E_VFQF_HLUT_MAX_INDEX + 1) * 4;
+}
+
+/**
+ * i40evf_get_rxfh - get the rx flow hash indirection table
+ * @netdev: network interface device structure
+ * @indir: indirection table
+ * @key: hash key (will be %NULL until get_rxfh_key_size is implemented)
+ *
+ * Reads the indirection table directly from the hardware. Always returns 0.
+ **/
+static int i40evf_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key)
+{
+ struct i40evf_adapter *adapter = netdev_priv(netdev);
+ struct i40e_hw *hw = &adapter->hw;
+ u32 hlut_val;
+ int i, j;
+
+ for (i = 0, j = 0; i < I40E_VFQF_HLUT_MAX_INDEX; i++) {
+ hlut_val = rd32(hw, I40E_VFQF_HLUT(i));
+ indir[j++] = hlut_val & 0xff;
+ indir[j++] = (hlut_val >> 8) & 0xff;
+ indir[j++] = (hlut_val >> 16) & 0xff;
+ indir[j++] = (hlut_val >> 24) & 0xff;
+ }
+ return 0;
+}
+
+/**
+ * i40evf_set_rxfh - set the rx flow hash indirection table
+ * @netdev: network interface device structure
+ * @indir: indirection table
+ * @key: hash key (will be %NULL until get_rxfh_key_size is implemented)
+ *
+ * Returns -EINVAL if the table specifies an inavlid queue id, otherwise
+ * returns 0 after programming the table.
+ **/
+static int i40evf_set_rxfh(struct net_device *netdev, const u32 *indir,
+ const u8 *key)
+{
+ struct i40evf_adapter *adapter = netdev_priv(netdev);
+ struct i40e_hw *hw = &adapter->hw;
+ u32 hlut_val;
+ int i, j;
+
+ for (i = 0, j = 0; i < I40E_VFQF_HLUT_MAX_INDEX + 1; i++) {
+ hlut_val = indir[j++];
+ hlut_val |= indir[j++] << 8;
+ hlut_val |= indir[j++] << 16;
+ hlut_val |= indir[j++] << 24;
+ wr32(hw, I40E_VFQF_HLUT(i), hlut_val);
}
return 0;
}
-static struct ethtool_ops i40evf_ethtool_ops = {
+static const struct ethtool_ops i40evf_ethtool_ops = {
.get_settings = i40evf_get_settings,
.get_drvinfo = i40evf_get_drvinfo,
.get_link = ethtool_op_get_link,
@@ -378,6 +683,12 @@ static struct ethtool_ops i40evf_ethtool_ops = {
.set_msglevel = i40evf_set_msglevel,
.get_coalesce = i40evf_get_coalesce,
.set_coalesce = i40evf_set_coalesce,
+ .get_rxnfc = i40evf_get_rxnfc,
+ .set_rxnfc = i40evf_set_rxnfc,
+ .get_rxfh_indir_size = i40evf_get_rxfh_indir_size,
+ .get_rxfh = i40evf_get_rxfh,
+ .set_rxfh = i40evf_set_rxfh,
+ .get_channels = i40evf_get_channels,
};
/**
@@ -389,5 +700,5 @@ static struct ethtool_ops i40evf_ethtool_ops = {
**/
void i40evf_set_ethtool_ops(struct net_device *netdev)
{
- SET_ETHTOOL_OPS(netdev, &i40evf_ethtool_ops);
+ netdev->ethtool_ops = &i40evf_ethtool_ops;
}
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_main.c b/drivers/net/ethernet/intel/i40evf/i40evf_main.c
index 2797548fde0d..7fc5f3b5d6bf 100644
--- a/drivers/net/ethernet/intel/i40evf/i40evf_main.c
+++ b/drivers/net/ethernet/intel/i40evf/i40evf_main.c
@@ -12,6 +12,9 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
+ * You should have received a copy of the GNU General Public License along
+ * with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
*
@@ -25,13 +28,15 @@
#include "i40e_prototype.h"
static int i40evf_setup_all_tx_resources(struct i40evf_adapter *adapter);
static int i40evf_setup_all_rx_resources(struct i40evf_adapter *adapter);
+static void i40evf_free_all_tx_resources(struct i40evf_adapter *adapter);
+static void i40evf_free_all_rx_resources(struct i40evf_adapter *adapter);
static int i40evf_close(struct net_device *netdev);
char i40evf_driver_name[] = "i40evf";
static const char i40evf_driver_string[] =
"Intel(R) XL710 X710 Virtual Function Network Driver";
-#define DRV_VERSION "0.9.16"
+#define DRV_VERSION "0.9.34"
const char i40evf_driver_version[] = DRV_VERSION;
static const char i40evf_copyright[] =
"Copyright (c) 2013 - 2014 Intel Corporation.";
@@ -167,7 +172,6 @@ static void i40evf_tx_timeout(struct net_device *netdev)
struct i40evf_adapter *adapter = netdev_priv(netdev);
adapter->tx_timeout_count++;
- dev_info(&adapter->pdev->dev, "TX timeout detected.\n");
if (!(adapter->flags & I40EVF_FLAG_RESET_PENDING)) {
adapter->flags |= I40EVF_FLAG_RESET_NEEDED;
schedule_work(&adapter->reset_task);
@@ -657,12 +661,9 @@ i40evf_vlan_filter *i40evf_add_vlan(struct i40evf_adapter *adapter, u16 vlan)
f = i40evf_find_vlan(adapter, vlan);
if (NULL == f) {
f = kzalloc(sizeof(*f), GFP_ATOMIC);
- if (NULL == f) {
- dev_info(&adapter->pdev->dev,
- "%s: no memory for new VLAN filter\n",
- __func__);
+ if (NULL == f)
return NULL;
- }
+
f->vlan = vlan;
INIT_LIST_HEAD(&f->list);
@@ -688,7 +689,6 @@ static void i40evf_del_vlan(struct i40evf_adapter *adapter, u16 vlan)
f->remove = true;
adapter->aq_required |= I40EVF_FLAG_AQ_DEL_VLAN_FILTER;
}
- return;
}
/**
@@ -767,14 +767,12 @@ i40evf_mac_filter *i40evf_add_filter(struct i40evf_adapter *adapter,
if (NULL == f) {
f = kzalloc(sizeof(*f), GFP_ATOMIC);
if (NULL == f) {
- dev_info(&adapter->pdev->dev,
- "%s: no memory for new filter\n", __func__);
clear_bit(__I40EVF_IN_CRITICAL_TASK,
&adapter->crit_section);
return NULL;
}
- memcpy(f->macaddr, macaddr, ETH_ALEN);
+ ether_addr_copy(f->macaddr, macaddr);
list_add(&f->list, &adapter->mac_filter_list);
f->add = true;
@@ -807,9 +805,8 @@ static int i40evf_set_mac(struct net_device *netdev, void *p)
f = i40evf_add_filter(adapter, addr->sa_data);
if (f) {
- memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len);
- memcpy(netdev->dev_addr, adapter->hw.mac.addr,
- netdev->addr_len);
+ ether_addr_copy(hw->mac.addr, addr->sa_data);
+ ether_addr_copy(netdev->dev_addr, adapter->hw.mac.addr);
}
return (f == NULL) ? -ENOMEM : 0;
@@ -841,7 +838,7 @@ static void i40evf_set_rx_mode(struct net_device *netdev)
list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) {
bool found = false;
- if (f->macaddr[0] & 0x01) {
+ if (is_multicast_ether_addr(f->macaddr)) {
netdev_for_each_mc_addr(mca, netdev) {
if (ether_addr_equal(mca->addr, f->macaddr)) {
found = true;
@@ -970,6 +967,9 @@ void i40evf_down(struct i40evf_adapter *adapter)
struct net_device *netdev = adapter->netdev;
struct i40evf_mac_filter *f;
+ if (adapter->state == __I40EVF_DOWN)
+ return;
+
/* remove all MAC filters */
list_for_each_entry(f, &adapter->mac_filter_list, list) {
f->remove = true;
@@ -1027,30 +1027,21 @@ i40evf_acquire_msix_vectors(struct i40evf_adapter *adapter, int vectors)
* Right now, we simply care about how many we'll get; we'll
* set them up later while requesting irq's.
*/
- while (vectors >= vector_threshold) {
- err = pci_enable_msix(adapter->pdev, adapter->msix_entries,
- vectors);
- if (!err) /* Success in acquiring all requested vectors. */
- break;
- else if (err < 0)
- vectors = 0; /* Nasty failure, quit now */
- else /* err == number of vectors we should try again with */
- vectors = err;
- }
-
- if (vectors < vector_threshold) {
- dev_err(&adapter->pdev->dev, "Unable to allocate MSI-X interrupts.\n");
+ err = pci_enable_msix_range(adapter->pdev, adapter->msix_entries,
+ vector_threshold, vectors);
+ if (err < 0) {
+ dev_err(&adapter->pdev->dev, "Unable to allocate MSI-X interrupts\n");
kfree(adapter->msix_entries);
adapter->msix_entries = NULL;
- err = -EIO;
- } else {
- /* Adjust for only the vectors we'll use, which is minimum
- * of max_msix_q_vectors + NONQ_VECS, or the number of
- * vectors we were allocated.
- */
- adapter->num_msix_vectors = vectors;
+ return err;
}
- return err;
+
+ /* Adjust for only the vectors we'll use, which is minimum
+ * of max_msix_q_vectors + NONQ_VECS, or the number of
+ * vectors we were allocated.
+ */
+ adapter->num_msix_vectors = err;
+ return 0;
}
/**
@@ -1096,14 +1087,14 @@ static int i40evf_alloc_queues(struct i40evf_adapter *adapter)
tx_ring->queue_index = i;
tx_ring->netdev = adapter->netdev;
tx_ring->dev = &adapter->pdev->dev;
- tx_ring->count = I40EVF_DEFAULT_TXD;
+ tx_ring->count = adapter->tx_desc_count;
adapter->tx_rings[i] = tx_ring;
rx_ring = &tx_ring[1];
rx_ring->queue_index = i;
rx_ring->netdev = adapter->netdev;
rx_ring->dev = &adapter->pdev->dev;
- rx_ring->count = I40EVF_DEFAULT_RXD;
+ rx_ring->count = adapter->rx_desc_count;
adapter->rx_rings[i] = rx_ring;
}
@@ -1141,9 +1132,6 @@ static int i40evf_set_interrupt_capability(struct i40evf_adapter *adapter)
v_budget = min_t(int, pairs, (int)(num_online_cpus() * 2)) + NONQ_VECS;
v_budget = min_t(int, v_budget, (int)adapter->vf_res->max_vectors);
- /* A failure in MSI-X entry allocation isn't fatal, but it does
- * mean we disable MSI-X capabilities of the adapter.
- */
adapter->msix_entries = kcalloc(v_budget,
sizeof(struct msix_entry), GFP_KERNEL);
if (!adapter->msix_entries) {
@@ -1183,7 +1171,7 @@ static int i40evf_alloc_q_vectors(struct i40evf_adapter *adapter)
q_vector->vsi = &adapter->vsi;
q_vector->v_idx = q_idx;
netif_napi_add(adapter->netdev, &q_vector->napi,
- i40evf_napi_poll, 64);
+ i40evf_napi_poll, NAPI_POLL_WEIGHT);
adapter->q_vector[q_idx] = q_vector;
}
@@ -1236,8 +1224,6 @@ void i40evf_reset_interrupt_capability(struct i40evf_adapter *adapter)
pci_disable_msix(adapter->pdev);
kfree(adapter->msix_entries);
adapter->msix_entries = NULL;
-
- return;
}
/**
@@ -1309,7 +1295,6 @@ static void i40evf_watchdog_task(struct work_struct *work)
goto restart_watchdog;
if (adapter->flags & I40EVF_FLAG_PF_COMMS_FAILED) {
- dev_info(&adapter->pdev->dev, "Checking for redemption\n");
if ((rd32(hw, I40E_VFGEN_RSTAT) & 0x3) == I40E_VFR_VFACTIVE) {
/* A chance for redemption! */
dev_err(&adapter->pdev->dev, "Hardware came out of reset. Attempting reinit.\n");
@@ -1340,8 +1325,7 @@ static void i40evf_watchdog_task(struct work_struct *work)
(rd32(hw, I40E_VFGEN_RSTAT) & 0x3) != I40E_VFR_VFACTIVE) {
adapter->state = __I40EVF_RESETTING;
adapter->flags |= I40EVF_FLAG_RESET_PENDING;
- dev_err(&adapter->pdev->dev, "Hardware reset detected.\n");
- dev_info(&adapter->pdev->dev, "Scheduling reset task\n");
+ dev_err(&adapter->pdev->dev, "Hardware reset detected\n");
schedule_work(&adapter->reset_task);
adapter->aq_pending = 0;
adapter->aq_required = 0;
@@ -1413,7 +1397,7 @@ restart_watchdog:
}
/**
- * i40evf_configure_rss - increment to next available tx queue
+ * next_queue - increment to next available tx queue
* @adapter: board private structure
* @j: queue counter
*
@@ -1504,15 +1488,12 @@ static void i40evf_reset_task(struct work_struct *work)
for (i = 0; i < I40EVF_RESET_WAIT_COUNT; i++) {
rstat_val = rd32(hw, I40E_VFGEN_RSTAT) &
I40E_VFGEN_RSTAT_VFR_STATE_MASK;
- if (rstat_val != I40E_VFR_VFACTIVE) {
- dev_info(&adapter->pdev->dev, "Reset now occurring\n");
+ if (rstat_val != I40E_VFR_VFACTIVE)
break;
- } else {
+ else
msleep(I40EVF_RESET_WAIT_MS);
- }
}
if (i == I40EVF_RESET_WAIT_COUNT) {
- dev_err(&adapter->pdev->dev, "Reset was not detected\n");
adapter->flags &= ~I40EVF_FLAG_RESET_PENDING;
goto continue_reset; /* act like the reset happened */
}
@@ -1521,22 +1502,24 @@ static void i40evf_reset_task(struct work_struct *work)
for (i = 0; i < I40EVF_RESET_WAIT_COUNT; i++) {
rstat_val = rd32(hw, I40E_VFGEN_RSTAT) &
I40E_VFGEN_RSTAT_VFR_STATE_MASK;
- if (rstat_val == I40E_VFR_VFACTIVE) {
- dev_info(&adapter->pdev->dev, "Reset is complete. Reinitializing.\n");
+ if (rstat_val == I40E_VFR_VFACTIVE)
break;
- } else {
+ else
msleep(I40EVF_RESET_WAIT_MS);
- }
}
if (i == I40EVF_RESET_WAIT_COUNT) {
/* reset never finished */
- dev_err(&adapter->pdev->dev, "Reset never finished (%x). PF driver is dead, and so am I.\n",
+ dev_err(&adapter->pdev->dev, "Reset never finished (%x)\n",
rstat_val);
adapter->flags |= I40EVF_FLAG_PF_COMMS_FAILED;
- if (netif_running(adapter->netdev))
- i40evf_close(adapter->netdev);
-
+ if (netif_running(adapter->netdev)) {
+ set_bit(__I40E_DOWN, &adapter->vsi.state);
+ i40evf_down(adapter);
+ i40evf_free_traffic_irqs(adapter);
+ i40evf_free_all_tx_resources(adapter);
+ i40evf_free_all_rx_resources(adapter);
+ }
i40evf_free_misc_irq(adapter);
i40evf_reset_interrupt_capability(adapter);
i40evf_free_queues(adapter);
@@ -1591,7 +1574,7 @@ continue_reset:
}
return;
reset_err:
- dev_err(&adapter->pdev->dev, "failed to allocate resources during reinit.\n");
+ dev_err(&adapter->pdev->dev, "failed to allocate resources during reinit\n");
i40evf_close(adapter->netdev);
}
@@ -1607,6 +1590,7 @@ static void i40evf_adminq_task(struct work_struct *work)
struct i40e_arq_event_info event;
struct i40e_virtchnl_msg *v_msg;
i40e_status ret;
+ u32 val, oldval;
u16 pending;
if (adapter->flags & I40EVF_FLAG_PF_COMMS_FAILED)
@@ -1614,11 +1598,9 @@ static void i40evf_adminq_task(struct work_struct *work)
event.msg_size = I40EVF_MAX_AQ_BUF_SIZE;
event.msg_buf = kzalloc(event.msg_size, GFP_KERNEL);
- if (!event.msg_buf) {
- dev_info(&adapter->pdev->dev, "%s: no memory for ARQ clean\n",
- __func__);
+ if (!event.msg_buf)
return;
- }
+
v_msg = (struct i40e_virtchnl_msg *)&event.desc;
do {
ret = i40evf_clean_arq_element(hw, &event, &pending);
@@ -1636,6 +1618,41 @@ static void i40evf_adminq_task(struct work_struct *work)
}
} while (pending);
+ /* check for error indications */
+ val = rd32(hw, hw->aq.arq.len);
+ oldval = val;
+ if (val & I40E_VF_ARQLEN_ARQVFE_MASK) {
+ dev_info(&adapter->pdev->dev, "ARQ VF Error detected\n");
+ val &= ~I40E_VF_ARQLEN_ARQVFE_MASK;
+ }
+ if (val & I40E_VF_ARQLEN_ARQOVFL_MASK) {
+ dev_info(&adapter->pdev->dev, "ARQ Overflow Error detected\n");
+ val &= ~I40E_VF_ARQLEN_ARQOVFL_MASK;
+ }
+ if (val & I40E_VF_ARQLEN_ARQCRIT_MASK) {
+ dev_info(&adapter->pdev->dev, "ARQ Critical Error detected\n");
+ val &= ~I40E_VF_ARQLEN_ARQCRIT_MASK;
+ }
+ if (oldval != val)
+ wr32(hw, hw->aq.arq.len, val);
+
+ val = rd32(hw, hw->aq.asq.len);
+ oldval = val;
+ if (val & I40E_VF_ATQLEN_ATQVFE_MASK) {
+ dev_info(&adapter->pdev->dev, "ASQ VF Error detected\n");
+ val &= ~I40E_VF_ATQLEN_ATQVFE_MASK;
+ }
+ if (val & I40E_VF_ATQLEN_ATQOVFL_MASK) {
+ dev_info(&adapter->pdev->dev, "ASQ Overflow Error detected\n");
+ val &= ~I40E_VF_ATQLEN_ATQOVFL_MASK;
+ }
+ if (val & I40E_VF_ATQLEN_ATQCRIT_MASK) {
+ dev_info(&adapter->pdev->dev, "ASQ Critical Error detected\n");
+ val &= ~I40E_VF_ATQLEN_ATQCRIT_MASK;
+ }
+ if (oldval != val)
+ wr32(hw, hw->aq.asq.len, val);
+
/* re-enable Admin queue interrupt cause */
i40evf_misc_irq_enable(adapter);
@@ -1673,6 +1690,7 @@ static int i40evf_setup_all_tx_resources(struct i40evf_adapter *adapter)
int i, err = 0;
for (i = 0; i < adapter->vsi_res->num_queue_pairs; i++) {
+ adapter->tx_rings[i]->count = adapter->tx_desc_count;
err = i40evf_setup_tx_descriptors(adapter->tx_rings[i]);
if (!err)
continue;
@@ -1700,6 +1718,7 @@ static int i40evf_setup_all_rx_resources(struct i40evf_adapter *adapter)
int i, err = 0;
for (i = 0; i < adapter->vsi_res->num_queue_pairs; i++) {
+ adapter->rx_rings[i]->count = adapter->rx_desc_count;
err = i40evf_setup_rx_descriptors(adapter->rx_rings[i]);
if (!err)
continue;
@@ -1804,12 +1823,11 @@ static int i40evf_close(struct net_device *netdev)
if (adapter->state <= __I40EVF_DOWN)
return 0;
- /* signal that we are down to the interrupt handler */
- adapter->state = __I40EVF_DOWN;
set_bit(__I40E_DOWN, &adapter->vsi.state);
i40evf_down(adapter);
+ adapter->state = __I40EVF_DOWN;
i40evf_free_traffic_irqs(adapter);
i40evf_free_all_tx_resources(adapter);
@@ -1848,8 +1866,6 @@ void i40evf_reinit_locked(struct i40evf_adapter *adapter)
WARN_ON(in_interrupt());
- adapter->state = __I40EVF_RESETTING;
-
i40evf_down(adapter);
/* allocate transmit descriptors */
@@ -1872,7 +1888,7 @@ void i40evf_reinit_locked(struct i40evf_adapter *adapter)
return;
err_reinit:
- dev_err(&adapter->pdev->dev, "failed to allocate resources during reinit.\n");
+ dev_err(&adapter->pdev->dev, "failed to allocate resources during reinit\n");
i40evf_close(netdev);
}
@@ -1967,7 +1983,7 @@ static void i40evf_init_task(struct work_struct *work)
}
err = i40evf_check_reset_complete(hw);
if (err) {
- dev_err(&pdev->dev, "Device is still in reset (%d)\n",
+ dev_info(&pdev->dev, "Device is still in reset (%d), retrying\n",
err);
goto err;
}
@@ -1993,14 +2009,14 @@ static void i40evf_init_task(struct work_struct *work)
break;
case __I40EVF_INIT_VERSION_CHECK:
if (!i40evf_asq_done(hw)) {
- dev_err(&pdev->dev, "Admin queue command never completed.\n");
+ dev_err(&pdev->dev, "Admin queue command never completed\n");
goto err;
}
/* aq msg sent, awaiting reply */
err = i40evf_verify_api_ver(adapter);
if (err) {
- dev_err(&pdev->dev, "Unable to verify API version (%d)\n",
+ dev_info(&pdev->dev, "Unable to verify API version (%d), retrying\n",
err);
goto err;
}
@@ -2074,12 +2090,12 @@ static void i40evf_init_task(struct work_struct *work)
netdev->hw_features &= ~NETIF_F_RXCSUM;
if (!is_valid_ether_addr(adapter->hw.mac.addr)) {
- dev_info(&pdev->dev, "Invalid MAC address %pMAC, using random\n",
+ dev_info(&pdev->dev, "Invalid MAC address %pM, using random\n",
adapter->hw.mac.addr);
random_ether_addr(adapter->hw.mac.addr);
}
- memcpy(netdev->dev_addr, adapter->hw.mac.addr, netdev->addr_len);
- memcpy(netdev->perm_addr, adapter->hw.mac.addr, netdev->addr_len);
+ ether_addr_copy(netdev->dev_addr, adapter->hw.mac.addr);
+ ether_addr_copy(netdev->perm_addr, adapter->hw.mac.addr);
INIT_LIST_HEAD(&adapter->mac_filter_list);
INIT_LIST_HEAD(&adapter->vlan_filter_list);
@@ -2087,7 +2103,7 @@ static void i40evf_init_task(struct work_struct *work)
if (NULL == f)
goto err_sw_init;
- memcpy(f->macaddr, adapter->hw.mac.addr, ETH_ALEN);
+ ether_addr_copy(f->macaddr, adapter->hw.mac.addr);
f->add = true;
adapter->aq_required |= I40EVF_FLAG_AQ_ADD_MAC_FILTER;
@@ -2098,6 +2114,8 @@ static void i40evf_init_task(struct work_struct *work)
adapter->watchdog_timer.data = (unsigned long)adapter;
mod_timer(&adapter->watchdog_timer, jiffies + 1);
+ adapter->tx_desc_count = I40EVF_DEFAULT_TXD;
+ adapter->rx_desc_count = I40EVF_DEFAULT_RXD;
err = i40evf_init_interrupt_scheme(adapter);
if (err)
goto err_sw_init;
@@ -2114,8 +2132,10 @@ static void i40evf_init_task(struct work_struct *work)
adapter->vsi.back = adapter;
adapter->vsi.base_vector = 1;
adapter->vsi.work_limit = I40E_DEFAULT_IRQ_WORK;
- adapter->vsi.rx_itr_setting = I40E_ITR_DYNAMIC;
- adapter->vsi.tx_itr_setting = I40E_ITR_DYNAMIC;
+ adapter->vsi.rx_itr_setting = (I40E_ITR_DYNAMIC |
+ ITR_REG_TO_USEC(I40E_ITR_RX_DEF));
+ adapter->vsi.tx_itr_setting = (I40E_ITR_DYNAMIC |
+ ITR_REG_TO_USEC(I40E_ITR_TX_DEF));
adapter->vsi.netdev = adapter->netdev;
if (!adapter->netdev_registered) {
@@ -2128,7 +2148,7 @@ static void i40evf_init_task(struct work_struct *work)
netif_tx_stop_all_queues(netdev);
- dev_info(&pdev->dev, "MAC address: %pMAC\n", adapter->hw.mac.addr);
+ dev_info(&pdev->dev, "MAC address: %pM\n", adapter->hw.mac.addr);
if (netdev->features & NETIF_F_GRO)
dev_info(&pdev->dev, "GRO is enabled\n");
@@ -2152,12 +2172,11 @@ err_alloc:
err:
/* Things went into the weeds, so try again later */
if (++adapter->aq_wait_count > I40EVF_AQ_MAX_ERR) {
- dev_err(&pdev->dev, "Failed to communicate with PF; giving up.\n");
+ dev_err(&pdev->dev, "Failed to communicate with PF; giving up\n");
adapter->flags |= I40EVF_FLAG_PF_COMMS_FAILED;
return; /* do not reschedule */
}
schedule_delayed_work(&adapter->init_task, HZ * 3);
- return;
}
/**
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c b/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c
index e294f012647d..2dc0bac76717 100644
--- a/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c
+++ b/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c
@@ -12,6 +12,9 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
+ * You should have received a copy of the GNU General Public License along
+ * with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
*
@@ -216,11 +219,9 @@ void i40evf_configure_queues(struct i40evf_adapter *adapter)
len = sizeof(struct i40e_virtchnl_vsi_queue_config_info) +
(sizeof(struct i40e_virtchnl_queue_pair_info) * pairs);
vqci = kzalloc(len, GFP_ATOMIC);
- if (!vqci) {
- dev_err(&adapter->pdev->dev, "%s: unable to allocate memory\n",
- __func__);
+ if (!vqci)
return;
- }
+
vqci->vsi_id = adapter->vsi_res->vsi_id;
vqci->num_queue_pairs = pairs;
vqpi = vqci->qpair;
@@ -232,6 +233,9 @@ void i40evf_configure_queues(struct i40evf_adapter *adapter)
vqpi->txq.queue_id = i;
vqpi->txq.ring_len = adapter->tx_rings[i]->count;
vqpi->txq.dma_ring_addr = adapter->tx_rings[i]->dma;
+ vqpi->txq.headwb_enabled = 1;
+ vqpi->txq.dma_headwb_addr = vqpi->txq.dma_ring_addr +
+ (vqpi->txq.ring_len * sizeof(struct i40e_tx_desc));
vqpi->rxq.vsi_id = vqci->vsi_id;
vqpi->rxq.queue_id = i;
@@ -329,11 +333,8 @@ void i40evf_map_queues(struct i40evf_adapter *adapter)
(adapter->num_msix_vectors *
sizeof(struct i40e_virtchnl_vector_map));
vimi = kzalloc(len, GFP_ATOMIC);
- if (!vimi) {
- dev_err(&adapter->pdev->dev, "%s: unable to allocate memory\n",
- __func__);
+ if (!vimi)
return;
- }
vimi->num_vectors = adapter->num_msix_vectors;
/* Queue vectors first */
@@ -390,7 +391,7 @@ void i40evf_add_ether_addrs(struct i40evf_adapter *adapter)
len = sizeof(struct i40e_virtchnl_ether_addr_list) +
(count * sizeof(struct i40e_virtchnl_ether_addr));
if (len > I40EVF_MAX_AQ_BUF_SIZE) {
- dev_warn(&adapter->pdev->dev, "%s: Too many MAC address changes in one request.\n",
+ dev_warn(&adapter->pdev->dev, "%s: Too many MAC address changes in one request\n",
__func__);
count = (I40EVF_MAX_AQ_BUF_SIZE -
sizeof(struct i40e_virtchnl_ether_addr_list)) /
@@ -399,16 +400,14 @@ void i40evf_add_ether_addrs(struct i40evf_adapter *adapter)
}
veal = kzalloc(len, GFP_ATOMIC);
- if (!veal) {
- dev_err(&adapter->pdev->dev, "%s: unable to allocate memory\n",
- __func__);
+ if (!veal)
return;
- }
+
veal->vsi_id = adapter->vsi_res->vsi_id;
veal->num_elements = count;
list_for_each_entry(f, &adapter->mac_filter_list, list) {
if (f->add) {
- memcpy(veal->list[i].addr, f->macaddr, ETH_ALEN);
+ ether_addr_copy(veal->list[i].addr, f->macaddr);
i++;
f->add = false;
}
@@ -454,7 +453,7 @@ void i40evf_del_ether_addrs(struct i40evf_adapter *adapter)
len = sizeof(struct i40e_virtchnl_ether_addr_list) +
(count * sizeof(struct i40e_virtchnl_ether_addr));
if (len > I40EVF_MAX_AQ_BUF_SIZE) {
- dev_warn(&adapter->pdev->dev, "%s: Too many MAC address changes in one request.\n",
+ dev_warn(&adapter->pdev->dev, "%s: Too many MAC address changes in one request\n",
__func__);
count = (I40EVF_MAX_AQ_BUF_SIZE -
sizeof(struct i40e_virtchnl_ether_addr_list)) /
@@ -462,16 +461,14 @@ void i40evf_del_ether_addrs(struct i40evf_adapter *adapter)
len = I40EVF_MAX_AQ_BUF_SIZE;
}
veal = kzalloc(len, GFP_ATOMIC);
- if (!veal) {
- dev_err(&adapter->pdev->dev, "%s: unable to allocate memory\n",
- __func__);
+ if (!veal)
return;
- }
+
veal->vsi_id = adapter->vsi_res->vsi_id;
veal->num_elements = count;
list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) {
if (f->remove) {
- memcpy(veal->list[i].addr, f->macaddr, ETH_ALEN);
+ ether_addr_copy(veal->list[i].addr, f->macaddr);
i++;
list_del(&f->list);
kfree(f);
@@ -518,7 +515,7 @@ void i40evf_add_vlans(struct i40evf_adapter *adapter)
len = sizeof(struct i40e_virtchnl_vlan_filter_list) +
(count * sizeof(u16));
if (len > I40EVF_MAX_AQ_BUF_SIZE) {
- dev_warn(&adapter->pdev->dev, "%s: Too many VLAN changes in one request.\n",
+ dev_warn(&adapter->pdev->dev, "%s: Too many VLAN changes in one request\n",
__func__);
count = (I40EVF_MAX_AQ_BUF_SIZE -
sizeof(struct i40e_virtchnl_vlan_filter_list)) /
@@ -526,11 +523,9 @@ void i40evf_add_vlans(struct i40evf_adapter *adapter)
len = I40EVF_MAX_AQ_BUF_SIZE;
}
vvfl = kzalloc(len, GFP_ATOMIC);
- if (!vvfl) {
- dev_err(&adapter->pdev->dev, "%s: unable to allocate memory\n",
- __func__);
+ if (!vvfl)
return;
- }
+
vvfl->vsi_id = adapter->vsi_res->vsi_id;
vvfl->num_elements = count;
list_for_each_entry(f, &adapter->vlan_filter_list, list) {
@@ -580,7 +575,7 @@ void i40evf_del_vlans(struct i40evf_adapter *adapter)
len = sizeof(struct i40e_virtchnl_vlan_filter_list) +
(count * sizeof(u16));
if (len > I40EVF_MAX_AQ_BUF_SIZE) {
- dev_warn(&adapter->pdev->dev, "%s: Too many VLAN changes in one request.\n",
+ dev_warn(&adapter->pdev->dev, "%s: Too many VLAN changes in one request\n",
__func__);
count = (I40EVF_MAX_AQ_BUF_SIZE -
sizeof(struct i40e_virtchnl_vlan_filter_list)) /
@@ -588,11 +583,9 @@ void i40evf_del_vlans(struct i40evf_adapter *adapter)
len = I40EVF_MAX_AQ_BUF_SIZE;
}
vvfl = kzalloc(len, GFP_ATOMIC);
- if (!vvfl) {
- dev_err(&adapter->pdev->dev, "%s: unable to allocate memory\n",
- __func__);
+ if (!vvfl)
return;
- }
+
vvfl->vsi_id = adapter->vsi_res->vsi_id;
vvfl->num_elements = count;
list_for_each_entry_safe(f, ftmp, &adapter->vlan_filter_list, list) {
@@ -721,7 +714,7 @@ void i40evf_virtchnl_completion(struct i40evf_adapter *adapter,
return;
}
if (v_opcode != adapter->current_op) {
- dev_err(&adapter->pdev->dev, "%s: Pending op is %d, received %d.\n",
+ dev_err(&adapter->pdev->dev, "%s: Pending op is %d, received %d\n",
__func__, adapter->current_op, v_opcode);
/* We're probably completely screwed at this point, but clear
* the current op and try to carry on....
@@ -730,7 +723,7 @@ void i40evf_virtchnl_completion(struct i40evf_adapter *adapter,
return;
}
if (v_retval) {
- dev_err(&adapter->pdev->dev, "%s: PF returned error %d to our request %d!\n",
+ dev_err(&adapter->pdev->dev, "%s: PF returned error %d to our request %d\n",
__func__, v_retval, v_opcode);
}
switch (v_opcode) {
@@ -745,9 +738,8 @@ void i40evf_virtchnl_completion(struct i40evf_adapter *adapter,
stats->tx_broadcast;
adapter->net_stats.rx_bytes = stats->rx_bytes;
adapter->net_stats.tx_bytes = stats->tx_bytes;
- adapter->net_stats.rx_errors = stats->rx_errors;
adapter->net_stats.tx_errors = stats->tx_errors;
- adapter->net_stats.rx_dropped = stats->rx_missed;
+ adapter->net_stats.rx_dropped = stats->rx_discards;
adapter->net_stats.tx_dropped = stats->tx_discards;
adapter->current_stats = *stats;
}
@@ -781,7 +773,7 @@ void i40evf_virtchnl_completion(struct i40evf_adapter *adapter,
adapter->aq_pending &= ~(I40EVF_FLAG_AQ_MAP_VECTORS);
break;
default:
- dev_warn(&adapter->pdev->dev, "%s: Received unexpected message %d from PF.\n",
+ dev_warn(&adapter->pdev->dev, "%s: Received unexpected message %d from PF\n",
__func__, v_opcode);
break;
} /* switch v_opcode */
diff --git a/drivers/net/ethernet/intel/igb/e1000_82575.c b/drivers/net/ethernet/intel/igb/e1000_82575.c
index fa36fe12e775..a2db388cc31e 100644
--- a/drivers/net/ethernet/intel/igb/e1000_82575.c
+++ b/drivers/net/ethernet/intel/igb/e1000_82575.c
@@ -1,28 +1,25 @@
-/*******************************************************************************
-
- Intel(R) Gigabit Ethernet Linux driver
- Copyright(c) 2007-2014 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, see <http://www.gnu.org/licenses/>.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
+/* Intel(R) Gigabit Ethernet Linux driver
+ * Copyright(c) 2007-2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ */
/* e1000_82575
* e1000_82576
@@ -73,9 +70,8 @@ static s32 igb_validate_nvm_checksum_82580(struct e1000_hw *hw);
static s32 igb_update_nvm_checksum_82580(struct e1000_hw *hw);
static s32 igb_validate_nvm_checksum_i350(struct e1000_hw *hw);
static s32 igb_update_nvm_checksum_i350(struct e1000_hw *hw);
-static const u16 e1000_82580_rxpbs_table[] =
- { 36, 72, 144, 1, 2, 4, 8, 16,
- 35, 70, 140 };
+static const u16 e1000_82580_rxpbs_table[] = {
+ 36, 72, 144, 1, 2, 4, 8, 16, 35, 70, 140 };
/**
* igb_sgmii_uses_mdio_82575 - Determine if I2C pins are for external MDIO
@@ -159,7 +155,7 @@ static s32 igb_check_for_link_media_swap(struct e1000_hw *hw)
ret_val = igb_check_for_link_82575(hw);
}
- return E1000_SUCCESS;
+ return 0;
}
/**
@@ -526,7 +522,7 @@ out:
static s32 igb_get_invariants_82575(struct e1000_hw *hw)
{
struct e1000_mac_info *mac = &hw->mac;
- struct e1000_dev_spec_82575 * dev_spec = &hw->dev_spec._82575;
+ struct e1000_dev_spec_82575 *dev_spec = &hw->dev_spec._82575;
s32 ret_val;
u32 ctrl_ext = 0;
u32 link_mode = 0;
@@ -1008,7 +1004,6 @@ out:
static s32 igb_set_d0_lplu_state_82580(struct e1000_hw *hw, bool active)
{
struct e1000_phy_info *phy = &hw->phy;
- s32 ret_val = 0;
u16 data;
data = rd32(E1000_82580_PHY_POWER_MGMT);
@@ -1032,7 +1027,7 @@ static s32 igb_set_d0_lplu_state_82580(struct e1000_hw *hw, bool active)
data &= ~E1000_82580_PM_SPD; }
wr32(E1000_82580_PHY_POWER_MGMT, data);
- return ret_val;
+ return 0;
}
/**
@@ -1052,7 +1047,6 @@ static s32 igb_set_d0_lplu_state_82580(struct e1000_hw *hw, bool active)
static s32 igb_set_d3_lplu_state_82580(struct e1000_hw *hw, bool active)
{
struct e1000_phy_info *phy = &hw->phy;
- s32 ret_val = 0;
u16 data;
data = rd32(E1000_82580_PHY_POWER_MGMT);
@@ -1077,7 +1071,7 @@ static s32 igb_set_d3_lplu_state_82580(struct e1000_hw *hw, bool active)
}
wr32(E1000_82580_PHY_POWER_MGMT, data);
- return ret_val;
+ return 0;
}
/**
@@ -1180,8 +1174,8 @@ static void igb_release_swfw_sync_82575(struct e1000_hw *hw, u16 mask)
{
u32 swfw_sync;
- while (igb_get_hw_semaphore(hw) != 0);
- /* Empty */
+ while (igb_get_hw_semaphore(hw) != 0)
+ ; /* Empty */
swfw_sync = rd32(E1000_SW_FW_SYNC);
swfw_sync &= ~mask;
@@ -1203,7 +1197,6 @@ static void igb_release_swfw_sync_82575(struct e1000_hw *hw, u16 mask)
static s32 igb_get_cfg_done_82575(struct e1000_hw *hw)
{
s32 timeout = PHY_CFG_TIMEOUT;
- s32 ret_val = 0;
u32 mask = E1000_NVM_CFG_DONE_PORT_0;
if (hw->bus.func == 1)
@@ -1216,7 +1209,7 @@ static s32 igb_get_cfg_done_82575(struct e1000_hw *hw)
while (timeout) {
if (rd32(E1000_EEMNGCTL) & mask)
break;
- msleep(1);
+ usleep_range(1000, 2000);
timeout--;
}
if (!timeout)
@@ -1227,7 +1220,7 @@ static s32 igb_get_cfg_done_82575(struct e1000_hw *hw)
(hw->phy.type == e1000_phy_igp_3))
igb_phy_init_script_igp3(hw);
- return ret_val;
+ return 0;
}
/**
@@ -1269,7 +1262,7 @@ static s32 igb_check_for_link_82575(struct e1000_hw *hw)
if (hw->phy.media_type != e1000_media_type_copper) {
ret_val = igb_get_pcs_speed_and_duplex_82575(hw, &speed,
- &duplex);
+ &duplex);
/* Use this flag to determine if link needs to be checked or
* not. If we have link clear the flag so that we do not
* continue to check for link.
@@ -1316,7 +1309,7 @@ void igb_power_up_serdes_link_82575(struct e1000_hw *hw)
/* flush the write to verify completion */
wrfl();
- msleep(1);
+ usleep_range(1000, 2000);
}
/**
@@ -1411,7 +1404,7 @@ void igb_shutdown_serdes_link_82575(struct e1000_hw *hw)
/* flush the write to verify completion */
wrfl();
- msleep(1);
+ usleep_range(1000, 2000);
}
}
@@ -1436,9 +1429,8 @@ static s32 igb_reset_hw_82575(struct e1000_hw *hw)
/* set the completion timeout for interface */
ret_val = igb_set_pcie_completion_timeout(hw);
- if (ret_val) {
+ if (ret_val)
hw_dbg("PCI-E Set completion timeout has failed.\n");
- }
hw_dbg("Masking off all interrupts\n");
wr32(E1000_IMC, 0xffffffff);
@@ -1447,7 +1439,7 @@ static s32 igb_reset_hw_82575(struct e1000_hw *hw)
wr32(E1000_TCTL, E1000_TCTL_PSP);
wrfl();
- msleep(10);
+ usleep_range(10000, 20000);
ctrl = rd32(E1000_CTRL);
@@ -1622,7 +1614,7 @@ static s32 igb_setup_serdes_link_82575(struct e1000_hw *hw)
{
u32 ctrl_ext, ctrl_reg, reg, anadv_reg;
bool pcs_autoneg;
- s32 ret_val = E1000_SUCCESS;
+ s32 ret_val = 0;
u16 data;
if ((hw->phy.media_type != e1000_media_type_internal_serdes) &&
@@ -1676,7 +1668,7 @@ static s32 igb_setup_serdes_link_82575(struct e1000_hw *hw)
hw->mac.type == e1000_82576) {
ret_val = hw->nvm.ops.read(hw, NVM_COMPAT, 1, &data);
if (ret_val) {
- printk(KERN_DEBUG "NVM Read Error\n\n");
+ hw_dbg(KERN_DEBUG "NVM Read Error\n\n");
return ret_val;
}
@@ -1689,7 +1681,7 @@ static s32 igb_setup_serdes_link_82575(struct e1000_hw *hw)
* link either autoneg or be forced to 1000/Full
*/
ctrl_reg |= E1000_CTRL_SPD_1000 | E1000_CTRL_FRCSPD |
- E1000_CTRL_FD | E1000_CTRL_FRCDPX;
+ E1000_CTRL_FD | E1000_CTRL_FRCDPX;
/* set speed of 1000/Full if speed/duplex is forced */
reg |= E1000_PCS_LCTL_FSV_1000 | E1000_PCS_LCTL_FDV_FULL;
@@ -1925,7 +1917,7 @@ void igb_rx_fifo_flush_82575(struct e1000_hw *hw)
}
/* Poll all queues to verify they have shut down */
for (ms_wait = 0; ms_wait < 10; ms_wait++) {
- msleep(1);
+ usleep_range(1000, 2000);
rx_enabled = 0;
for (i = 0; i < 4; i++)
rx_enabled |= rd32(E1000_RXDCTL(i));
@@ -1953,7 +1945,7 @@ void igb_rx_fifo_flush_82575(struct e1000_hw *hw)
wr32(E1000_RCTL, temp_rctl);
wr32(E1000_RCTL, temp_rctl | E1000_RCTL_EN);
wrfl();
- msleep(2);
+ usleep_range(2000, 3000);
/* Enable RX queues that were previously enabled and restore our
* previous state
@@ -2005,14 +1997,14 @@ static s32 igb_set_pcie_completion_timeout(struct e1000_hw *hw)
* 16ms to 55ms
*/
ret_val = igb_read_pcie_cap_reg(hw, PCIE_DEVICE_CONTROL2,
- &pcie_devctl2);
+ &pcie_devctl2);
if (ret_val)
goto out;
pcie_devctl2 |= PCIE_DEVICE_CONTROL2_16ms;
ret_val = igb_write_pcie_cap_reg(hw, PCIE_DEVICE_CONTROL2,
- &pcie_devctl2);
+ &pcie_devctl2);
out:
/* disable completion timeout resend */
gcr &= ~E1000_GCR_CMPL_TMOUT_RESEND;
@@ -2241,7 +2233,7 @@ static s32 igb_reset_hw_82580(struct e1000_hw *hw)
wr32(E1000_TCTL, E1000_TCTL_PSP);
wrfl();
- msleep(10);
+ usleep_range(10000, 11000);
/* Determine whether or not a global dev reset is requested */
if (global_device_reset &&
@@ -2259,7 +2251,7 @@ static s32 igb_reset_hw_82580(struct e1000_hw *hw)
/* Add delay to insure DEV_RST has time to complete */
if (global_device_reset)
- msleep(5);
+ usleep_range(5000, 6000);
ret_val = igb_get_auto_rd_done(hw);
if (ret_val) {
@@ -2436,8 +2428,7 @@ static s32 igb_update_nvm_checksum_82580(struct e1000_hw *hw)
ret_val = hw->nvm.ops.read(hw, NVM_COMPATIBILITY_REG_3, 1, &nvm_data);
if (ret_val) {
- hw_dbg("NVM Read Error while updating checksum"
- " compatibility bit.\n");
+ hw_dbg("NVM Read Error while updating checksum compatibility bit.\n");
goto out;
}
@@ -2447,8 +2438,7 @@ static s32 igb_update_nvm_checksum_82580(struct e1000_hw *hw)
ret_val = hw->nvm.ops.write(hw, NVM_COMPATIBILITY_REG_3, 1,
&nvm_data);
if (ret_val) {
- hw_dbg("NVM Write Error while updating checksum"
- " compatibility bit.\n");
+ hw_dbg("NVM Write Error while updating checksum compatibility bit.\n");
goto out;
}
}
@@ -2525,7 +2515,7 @@ out:
static s32 __igb_access_emi_reg(struct e1000_hw *hw, u16 address,
u16 *data, bool read)
{
- s32 ret_val = E1000_SUCCESS;
+ s32 ret_val = 0;
ret_val = hw->phy.ops.write_reg(hw, E1000_EMIADD, address);
if (ret_val)
@@ -2559,7 +2549,6 @@ s32 igb_read_emi_reg(struct e1000_hw *hw, u16 addr, u16 *data)
**/
s32 igb_set_eee_i350(struct e1000_hw *hw)
{
- s32 ret_val = 0;
u32 ipcnfg, eeer;
if ((hw->mac.type < e1000_i350) ||
@@ -2593,7 +2582,7 @@ s32 igb_set_eee_i350(struct e1000_hw *hw)
rd32(E1000_EEER);
out:
- return ret_val;
+ return 0;
}
/**
@@ -2720,7 +2709,6 @@ static const u8 e1000_emc_therm_limit[4] = {
**/
static s32 igb_get_thermal_sensor_data_generic(struct e1000_hw *hw)
{
- s32 status = E1000_SUCCESS;
u16 ets_offset;
u16 ets_cfg;
u16 ets_sensor;
@@ -2738,7 +2726,7 @@ static s32 igb_get_thermal_sensor_data_generic(struct e1000_hw *hw)
/* Return the internal sensor only if ETS is unsupported */
hw->nvm.ops.read(hw, NVM_ETS_CFG, 1, &ets_offset);
if ((ets_offset == 0x0000) || (ets_offset == 0xFFFF))
- return status;
+ return 0;
hw->nvm.ops.read(hw, ets_offset, 1, &ets_cfg);
if (((ets_cfg & NVM_ETS_TYPE_MASK) >> NVM_ETS_TYPE_SHIFT)
@@ -2762,7 +2750,7 @@ static s32 igb_get_thermal_sensor_data_generic(struct e1000_hw *hw)
E1000_I2C_THERMAL_SENSOR_ADDR,
&data->sensor[i].temp);
}
- return status;
+ return 0;
}
/**
@@ -2774,7 +2762,6 @@ static s32 igb_get_thermal_sensor_data_generic(struct e1000_hw *hw)
**/
static s32 igb_init_thermal_sensor_thresh_generic(struct e1000_hw *hw)
{
- s32 status = E1000_SUCCESS;
u16 ets_offset;
u16 ets_cfg;
u16 ets_sensor;
@@ -2800,7 +2787,7 @@ static s32 igb_init_thermal_sensor_thresh_generic(struct e1000_hw *hw)
/* Return the internal sensor only if ETS is unsupported */
hw->nvm.ops.read(hw, NVM_ETS_CFG, 1, &ets_offset);
if ((ets_offset == 0x0000) || (ets_offset == 0xFFFF))
- return status;
+ return 0;
hw->nvm.ops.read(hw, ets_offset, 1, &ets_cfg);
if (((ets_cfg & NVM_ETS_TYPE_MASK) >> NVM_ETS_TYPE_SHIFT)
@@ -2831,7 +2818,7 @@ static s32 igb_init_thermal_sensor_thresh_generic(struct e1000_hw *hw)
low_thresh_delta;
}
}
- return status;
+ return 0;
}
#endif
diff --git a/drivers/net/ethernet/intel/igb/e1000_82575.h b/drivers/net/ethernet/intel/igb/e1000_82575.h
index 09d78be72416..b407c55738fa 100644
--- a/drivers/net/ethernet/intel/igb/e1000_82575.h
+++ b/drivers/net/ethernet/intel/igb/e1000_82575.h
@@ -1,28 +1,25 @@
-/*******************************************************************************
-
- Intel(R) Gigabit Ethernet Linux driver
- Copyright(c) 2007-2014 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, see <http://www.gnu.org/licenses/>.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
+/* Intel(R) Gigabit Ethernet Linux driver
+ * Copyright(c) 2007-2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ */
#ifndef _E1000_82575_H_
#define _E1000_82575_H_
@@ -37,9 +34,9 @@ s32 igb_write_i2c_byte(struct e1000_hw *hw, u8 byte_offset, u8 dev_addr,
u8 data);
#define ID_LED_DEFAULT_82575_SERDES ((ID_LED_DEF1_DEF2 << 12) | \
- (ID_LED_DEF1_DEF2 << 8) | \
- (ID_LED_DEF1_DEF2 << 4) | \
- (ID_LED_OFF1_ON2))
+ (ID_LED_DEF1_DEF2 << 8) | \
+ (ID_LED_DEF1_DEF2 << 4) | \
+ (ID_LED_OFF1_ON2))
#define E1000_RAR_ENTRIES_82575 16
#define E1000_RAR_ENTRIES_82576 24
@@ -67,16 +64,16 @@ s32 igb_write_i2c_byte(struct e1000_hw *hw, u8 byte_offset, u8 dev_addr,
#define E1000_MRQC_RSS_FIELD_IPV6_UDP_EX 0x01000000
#define E1000_EICR_TX_QUEUE ( \
- E1000_EICR_TX_QUEUE0 | \
- E1000_EICR_TX_QUEUE1 | \
- E1000_EICR_TX_QUEUE2 | \
- E1000_EICR_TX_QUEUE3)
+ E1000_EICR_TX_QUEUE0 | \
+ E1000_EICR_TX_QUEUE1 | \
+ E1000_EICR_TX_QUEUE2 | \
+ E1000_EICR_TX_QUEUE3)
#define E1000_EICR_RX_QUEUE ( \
- E1000_EICR_RX_QUEUE0 | \
- E1000_EICR_RX_QUEUE1 | \
- E1000_EICR_RX_QUEUE2 | \
- E1000_EICR_RX_QUEUE3)
+ E1000_EICR_RX_QUEUE0 | \
+ E1000_EICR_RX_QUEUE1 | \
+ E1000_EICR_RX_QUEUE2 | \
+ E1000_EICR_RX_QUEUE3)
/* Immediate Interrupt Rx (A.K.A. Low Latency Interrupt) */
#define E1000_IMIREXT_SIZE_BP 0x00001000 /* Packet size bypass */
@@ -92,8 +89,7 @@ union e1000_adv_rx_desc {
struct {
struct {
__le16 pkt_info; /* RSS type, Packet type */
- __le16 hdr_info; /* Split Header,
- * header buffer length */
+ __le16 hdr_info; /* Split Head, buf len */
} lo_dword;
union {
__le32 rss; /* RSS Hash */
diff --git a/drivers/net/ethernet/intel/igb/e1000_defines.h b/drivers/net/ethernet/intel/igb/e1000_defines.h
index b05bf925ac72..2a8bb35c2df2 100644
--- a/drivers/net/ethernet/intel/igb/e1000_defines.h
+++ b/drivers/net/ethernet/intel/igb/e1000_defines.h
@@ -1,28 +1,25 @@
-/*******************************************************************************
-
- Intel(R) Gigabit Ethernet Linux driver
- Copyright(c) 2007-2014 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, see <http://www.gnu.org/licenses/>.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
+/* Intel(R) Gigabit Ethernet Linux driver
+ * Copyright(c) 2007-2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ */
#ifndef _E1000_DEFINES_H_
#define _E1000_DEFINES_H_
@@ -101,11 +98,11 @@
/* Same mask, but for extended and packet split descriptors */
#define E1000_RXDEXT_ERR_FRAME_ERR_MASK ( \
- E1000_RXDEXT_STATERR_CE | \
- E1000_RXDEXT_STATERR_SE | \
- E1000_RXDEXT_STATERR_SEQ | \
- E1000_RXDEXT_STATERR_CXE | \
- E1000_RXDEXT_STATERR_RXE)
+ E1000_RXDEXT_STATERR_CE | \
+ E1000_RXDEXT_STATERR_SE | \
+ E1000_RXDEXT_STATERR_SEQ | \
+ E1000_RXDEXT_STATERR_CXE | \
+ E1000_RXDEXT_STATERR_RXE)
#define E1000_MRQC_RSS_FIELD_IPV4_TCP 0x00010000
#define E1000_MRQC_RSS_FIELD_IPV4 0x00020000
@@ -307,39 +304,34 @@
#define E1000_TCTL_RTLC 0x01000000 /* Re-transmit on late collision */
/* DMA Coalescing register fields */
-#define E1000_DMACR_DMACWT_MASK 0x00003FFF /* DMA Coalescing
- * Watchdog Timer */
-#define E1000_DMACR_DMACTHR_MASK 0x00FF0000 /* DMA Coalescing Receive
- * Threshold */
+#define E1000_DMACR_DMACWT_MASK 0x00003FFF /* DMA Coal Watchdog Timer */
+#define E1000_DMACR_DMACTHR_MASK 0x00FF0000 /* DMA Coal Rx Threshold */
#define E1000_DMACR_DMACTHR_SHIFT 16
-#define E1000_DMACR_DMAC_LX_MASK 0x30000000 /* Lx when no PCIe
- * transactions */
+#define E1000_DMACR_DMAC_LX_MASK 0x30000000 /* Lx when no PCIe trans */
#define E1000_DMACR_DMAC_LX_SHIFT 28
#define E1000_DMACR_DMAC_EN 0x80000000 /* Enable DMA Coalescing */
/* DMA Coalescing BMC-to-OS Watchdog Enable */
#define E1000_DMACR_DC_BMC2OSW_EN 0x00008000
-#define E1000_DMCTXTH_DMCTTHR_MASK 0x00000FFF /* DMA Coalescing Transmit
- * Threshold */
+#define E1000_DMCTXTH_DMCTTHR_MASK 0x00000FFF /* DMA Coal Tx Threshold */
#define E1000_DMCTLX_TTLX_MASK 0x00000FFF /* Time to LX request */
-#define E1000_DMCRTRH_UTRESH_MASK 0x0007FFFF /* Receive Traffic Rate
- * Threshold */
-#define E1000_DMCRTRH_LRPRCW 0x80000000 /* Rcv packet rate in
- * current window */
+#define E1000_DMCRTRH_UTRESH_MASK 0x0007FFFF /* Rx Traffic Rate Thresh */
+#define E1000_DMCRTRH_LRPRCW 0x80000000 /* Rx pkt rate curr window */
-#define E1000_DMCCNT_CCOUNT_MASK 0x01FFFFFF /* DMA Coal Rcv Traffic
- * Current Cnt */
+#define E1000_DMCCNT_CCOUNT_MASK 0x01FFFFFF /* DMA Coal Rx Current Cnt */
-#define E1000_FCRTC_RTH_COAL_MASK 0x0003FFF0 /* Flow ctrl Rcv Threshold
- * High val */
+#define E1000_FCRTC_RTH_COAL_MASK 0x0003FFF0 /* FC Rx Thresh High val */
#define E1000_FCRTC_RTH_COAL_SHIFT 4
#define E1000_PCIEMISC_LX_DECISION 0x00000080 /* Lx power decision */
/* Timestamp in Rx buffer */
#define E1000_RXPBS_CFG_TS_EN 0x80000000
+#define I210_RXPBSIZE_DEFAULT 0x000000A2 /* RXPBSIZE default */
+#define I210_TXPBSIZE_DEFAULT 0x04000014 /* TXPBSIZE default */
+
/* SerDes Control */
#define E1000_SCTL_DISABLE_SERDES_LOOPBACK 0x0400
@@ -406,12 +398,12 @@
* o LSC = Link Status Change
*/
#define IMS_ENABLE_MASK ( \
- E1000_IMS_RXT0 | \
- E1000_IMS_TXDW | \
- E1000_IMS_RXDMT0 | \
- E1000_IMS_RXSEQ | \
- E1000_IMS_LSC | \
- E1000_IMS_DOUTSYNC)
+ E1000_IMS_RXT0 | \
+ E1000_IMS_TXDW | \
+ E1000_IMS_RXDMT0 | \
+ E1000_IMS_RXSEQ | \
+ E1000_IMS_LSC | \
+ E1000_IMS_DOUTSYNC)
/* Interrupt Mask Set */
#define E1000_IMS_TXDW E1000_ICR_TXDW /* Transmit desc written back */
@@ -467,7 +459,6 @@
#define E1000_RAH_POOL_1 0x00040000
/* Error Codes */
-#define E1000_SUCCESS 0
#define E1000_ERR_NVM 1
#define E1000_ERR_PHY 2
#define E1000_ERR_CONFIG 3
@@ -1011,8 +1002,7 @@
#define E1000_VFTA_ENTRY_BIT_SHIFT_MASK 0x1F
/* DMA Coalescing register fields */
-#define E1000_PCIEMISC_LX_DECISION 0x00000080 /* Lx power decision based
- on DMA coal */
+#define E1000_PCIEMISC_LX_DECISION 0x00000080 /* Lx power on DMA coal */
/* Tx Rate-Scheduler Config fields */
#define E1000_RTTBCNRC_RS_ENA 0x80000000
diff --git a/drivers/net/ethernet/intel/igb/e1000_hw.h b/drivers/net/ethernet/intel/igb/e1000_hw.h
index 10741d170f2d..89925e405849 100644
--- a/drivers/net/ethernet/intel/igb/e1000_hw.h
+++ b/drivers/net/ethernet/intel/igb/e1000_hw.h
@@ -1,28 +1,24 @@
-/*******************************************************************************
-
- Intel(R) Gigabit Ethernet Linux driver
- Copyright(c) 2007-2014 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, see <http://www.gnu.org/licenses/>.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
+/* Intel(R) Gigabit Ethernet Linux driver
+ * Copyright(c) 2007-2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ */
#ifndef _E1000_HW_H_
#define _E1000_HW_H_
@@ -320,15 +316,15 @@ struct e1000_host_mng_command_info {
#include "e1000_mbx.h"
struct e1000_mac_operations {
- s32 (*check_for_link)(struct e1000_hw *);
- s32 (*reset_hw)(struct e1000_hw *);
- s32 (*init_hw)(struct e1000_hw *);
+ s32 (*check_for_link)(struct e1000_hw *);
+ s32 (*reset_hw)(struct e1000_hw *);
+ s32 (*init_hw)(struct e1000_hw *);
bool (*check_mng_mode)(struct e1000_hw *);
- s32 (*setup_physical_interface)(struct e1000_hw *);
+ s32 (*setup_physical_interface)(struct e1000_hw *);
void (*rar_set)(struct e1000_hw *, u8 *, u32);
- s32 (*read_mac_addr)(struct e1000_hw *);
- s32 (*get_speed_and_duplex)(struct e1000_hw *, u16 *, u16 *);
- s32 (*acquire_swfw_sync)(struct e1000_hw *, u16);
+ s32 (*read_mac_addr)(struct e1000_hw *);
+ s32 (*get_speed_and_duplex)(struct e1000_hw *, u16 *, u16 *);
+ s32 (*acquire_swfw_sync)(struct e1000_hw *, u16);
void (*release_swfw_sync)(struct e1000_hw *, u16);
#ifdef CONFIG_IGB_HWMON
s32 (*get_thermal_sensor_data)(struct e1000_hw *);
@@ -338,31 +334,31 @@ struct e1000_mac_operations {
};
struct e1000_phy_operations {
- s32 (*acquire)(struct e1000_hw *);
- s32 (*check_polarity)(struct e1000_hw *);
- s32 (*check_reset_block)(struct e1000_hw *);
- s32 (*force_speed_duplex)(struct e1000_hw *);
- s32 (*get_cfg_done)(struct e1000_hw *hw);
- s32 (*get_cable_length)(struct e1000_hw *);
- s32 (*get_phy_info)(struct e1000_hw *);
- s32 (*read_reg)(struct e1000_hw *, u32, u16 *);
+ s32 (*acquire)(struct e1000_hw *);
+ s32 (*check_polarity)(struct e1000_hw *);
+ s32 (*check_reset_block)(struct e1000_hw *);
+ s32 (*force_speed_duplex)(struct e1000_hw *);
+ s32 (*get_cfg_done)(struct e1000_hw *hw);
+ s32 (*get_cable_length)(struct e1000_hw *);
+ s32 (*get_phy_info)(struct e1000_hw *);
+ s32 (*read_reg)(struct e1000_hw *, u32, u16 *);
void (*release)(struct e1000_hw *);
- s32 (*reset)(struct e1000_hw *);
- s32 (*set_d0_lplu_state)(struct e1000_hw *, bool);
- s32 (*set_d3_lplu_state)(struct e1000_hw *, bool);
- s32 (*write_reg)(struct e1000_hw *, u32, u16);
+ s32 (*reset)(struct e1000_hw *);
+ s32 (*set_d0_lplu_state)(struct e1000_hw *, bool);
+ s32 (*set_d3_lplu_state)(struct e1000_hw *, bool);
+ s32 (*write_reg)(struct e1000_hw *, u32, u16);
s32 (*read_i2c_byte)(struct e1000_hw *, u8, u8, u8 *);
s32 (*write_i2c_byte)(struct e1000_hw *, u8, u8, u8);
};
struct e1000_nvm_operations {
- s32 (*acquire)(struct e1000_hw *);
- s32 (*read)(struct e1000_hw *, u16, u16, u16 *);
+ s32 (*acquire)(struct e1000_hw *);
+ s32 (*read)(struct e1000_hw *, u16, u16, u16 *);
void (*release)(struct e1000_hw *);
- s32 (*write)(struct e1000_hw *, u16, u16, u16 *);
- s32 (*update)(struct e1000_hw *);
- s32 (*validate)(struct e1000_hw *);
- s32 (*valid_led_default)(struct e1000_hw *, u16 *);
+ s32 (*write)(struct e1000_hw *, u16, u16, u16 *);
+ s32 (*update)(struct e1000_hw *);
+ s32 (*validate)(struct e1000_hw *);
+ s32 (*valid_led_default)(struct e1000_hw *, u16 *);
};
#define E1000_MAX_SENSORS 3
diff --git a/drivers/net/ethernet/intel/igb/e1000_i210.c b/drivers/net/ethernet/intel/igb/e1000_i210.c
index f67f8a170b90..337161f440dd 100644
--- a/drivers/net/ethernet/intel/igb/e1000_i210.c
+++ b/drivers/net/ethernet/intel/igb/e1000_i210.c
@@ -1,28 +1,25 @@
-/*******************************************************************************
-
- Intel(R) Gigabit Ethernet Linux driver
- Copyright(c) 2007-2014 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, see <http://www.gnu.org/licenses/>.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-******************************************************************************/
+/* Intel(R) Gigabit Ethernet Linux driver
+ * Copyright(c) 2007-2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ */
/* e1000_i210
* e1000_i211
@@ -100,7 +97,7 @@ static s32 igb_get_hw_semaphore_i210(struct e1000_hw *hw)
return -E1000_ERR_NVM;
}
- return E1000_SUCCESS;
+ return 0;
}
/**
@@ -142,7 +139,7 @@ s32 igb_acquire_swfw_sync_i210(struct e1000_hw *hw, u16 mask)
u32 swfw_sync;
u32 swmask = mask;
u32 fwmask = mask << 16;
- s32 ret_val = E1000_SUCCESS;
+ s32 ret_val = 0;
s32 i = 0, timeout = 200; /* FIXME: find real value to use here */
while (i < timeout) {
@@ -187,7 +184,7 @@ void igb_release_swfw_sync_i210(struct e1000_hw *hw, u16 mask)
{
u32 swfw_sync;
- while (igb_get_hw_semaphore_i210(hw) != E1000_SUCCESS)
+ while (igb_get_hw_semaphore_i210(hw))
; /* Empty */
swfw_sync = rd32(E1000_SW_FW_SYNC);
@@ -210,7 +207,7 @@ void igb_release_swfw_sync_i210(struct e1000_hw *hw, u16 mask)
static s32 igb_read_nvm_srrd_i210(struct e1000_hw *hw, u16 offset, u16 words,
u16 *data)
{
- s32 status = E1000_SUCCESS;
+ s32 status = 0;
u16 i, count;
/* We cannot hold synchronization semaphores for too long,
@@ -220,7 +217,7 @@ static s32 igb_read_nvm_srrd_i210(struct e1000_hw *hw, u16 offset, u16 words,
for (i = 0; i < words; i += E1000_EERD_EEWR_MAX_COUNT) {
count = (words - i) / E1000_EERD_EEWR_MAX_COUNT > 0 ?
E1000_EERD_EEWR_MAX_COUNT : (words - i);
- if (hw->nvm.ops.acquire(hw) == E1000_SUCCESS) {
+ if (!(hw->nvm.ops.acquire(hw))) {
status = igb_read_nvm_eerd(hw, offset, count,
data + i);
hw->nvm.ops.release(hw);
@@ -228,7 +225,7 @@ static s32 igb_read_nvm_srrd_i210(struct e1000_hw *hw, u16 offset, u16 words,
status = E1000_ERR_SWFW_SYNC;
}
- if (status != E1000_SUCCESS)
+ if (status)
break;
}
@@ -253,7 +250,7 @@ static s32 igb_write_nvm_srwr(struct e1000_hw *hw, u16 offset, u16 words,
struct e1000_nvm_info *nvm = &hw->nvm;
u32 i, k, eewr = 0;
u32 attempts = 100000;
- s32 ret_val = E1000_SUCCESS;
+ s32 ret_val = 0;
/* A check for invalid values: offset too large, too many words,
* too many words for the offset, and not enough words.
@@ -275,13 +272,13 @@ static s32 igb_write_nvm_srwr(struct e1000_hw *hw, u16 offset, u16 words,
for (k = 0; k < attempts; k++) {
if (E1000_NVM_RW_REG_DONE &
rd32(E1000_SRWR)) {
- ret_val = E1000_SUCCESS;
+ ret_val = 0;
break;
}
udelay(5);
}
- if (ret_val != E1000_SUCCESS) {
+ if (ret_val) {
hw_dbg("Shadow RAM write EEWR timed out\n");
break;
}
@@ -310,7 +307,7 @@ out:
static s32 igb_write_nvm_srwr_i210(struct e1000_hw *hw, u16 offset, u16 words,
u16 *data)
{
- s32 status = E1000_SUCCESS;
+ s32 status = 0;
u16 i, count;
/* We cannot hold synchronization semaphores for too long,
@@ -320,7 +317,7 @@ static s32 igb_write_nvm_srwr_i210(struct e1000_hw *hw, u16 offset, u16 words,
for (i = 0; i < words; i += E1000_EERD_EEWR_MAX_COUNT) {
count = (words - i) / E1000_EERD_EEWR_MAX_COUNT > 0 ?
E1000_EERD_EEWR_MAX_COUNT : (words - i);
- if (hw->nvm.ops.acquire(hw) == E1000_SUCCESS) {
+ if (!(hw->nvm.ops.acquire(hw))) {
status = igb_write_nvm_srwr(hw, offset, count,
data + i);
hw->nvm.ops.release(hw);
@@ -328,7 +325,7 @@ static s32 igb_write_nvm_srwr_i210(struct e1000_hw *hw, u16 offset, u16 words,
status = E1000_ERR_SWFW_SYNC;
}
- if (status != E1000_SUCCESS)
+ if (status)
break;
}
@@ -367,12 +364,12 @@ static s32 igb_read_invm_word_i210(struct e1000_hw *hw, u8 address, u16 *data)
*data = INVM_DWORD_TO_WORD_DATA(invm_dword);
hw_dbg("Read INVM Word 0x%02x = %x\n",
address, *data);
- status = E1000_SUCCESS;
+ status = 0;
break;
}
}
}
- if (status != E1000_SUCCESS)
+ if (status)
hw_dbg("Requested word 0x%02x not found in OTP\n", address);
return status;
}
@@ -388,7 +385,7 @@ static s32 igb_read_invm_word_i210(struct e1000_hw *hw, u8 address, u16 *data)
static s32 igb_read_invm_i210(struct e1000_hw *hw, u16 offset,
u16 words __always_unused, u16 *data)
{
- s32 ret_val = E1000_SUCCESS;
+ s32 ret_val = 0;
/* Only the MAC addr is required to be present in the iNVM */
switch (offset) {
@@ -398,43 +395,44 @@ static s32 igb_read_invm_i210(struct e1000_hw *hw, u16 offset,
&data[1]);
ret_val |= igb_read_invm_word_i210(hw, (u8)offset+2,
&data[2]);
- if (ret_val != E1000_SUCCESS)
+ if (ret_val)
hw_dbg("MAC Addr not found in iNVM\n");
break;
case NVM_INIT_CTRL_2:
ret_val = igb_read_invm_word_i210(hw, (u8)offset, data);
- if (ret_val != E1000_SUCCESS) {
+ if (ret_val) {
*data = NVM_INIT_CTRL_2_DEFAULT_I211;
- ret_val = E1000_SUCCESS;
+ ret_val = 0;
}
break;
case NVM_INIT_CTRL_4:
ret_val = igb_read_invm_word_i210(hw, (u8)offset, data);
- if (ret_val != E1000_SUCCESS) {
+ if (ret_val) {
*data = NVM_INIT_CTRL_4_DEFAULT_I211;
- ret_val = E1000_SUCCESS;
+ ret_val = 0;
}
break;
case NVM_LED_1_CFG:
ret_val = igb_read_invm_word_i210(hw, (u8)offset, data);
- if (ret_val != E1000_SUCCESS) {
+ if (ret_val) {
*data = NVM_LED_1_CFG_DEFAULT_I211;
- ret_val = E1000_SUCCESS;
+ ret_val = 0;
}
break;
case NVM_LED_0_2_CFG:
ret_val = igb_read_invm_word_i210(hw, (u8)offset, data);
- if (ret_val != E1000_SUCCESS) {
+ if (ret_val) {
*data = NVM_LED_0_2_CFG_DEFAULT_I211;
- ret_val = E1000_SUCCESS;
+ ret_val = 0;
}
break;
case NVM_ID_LED_SETTINGS:
ret_val = igb_read_invm_word_i210(hw, (u8)offset, data);
- if (ret_val != E1000_SUCCESS) {
+ if (ret_val) {
*data = ID_LED_RESERVED_FFFF;
- ret_val = E1000_SUCCESS;
+ ret_val = 0;
}
+ break;
case NVM_SUB_DEV_ID:
*data = hw->subsystem_device_id;
break;
@@ -488,14 +486,14 @@ s32 igb_read_invm_version(struct e1000_hw *hw,
/* Check if we have first version location used */
if ((i == 1) && ((*record & E1000_INVM_VER_FIELD_ONE) == 0)) {
version = 0;
- status = E1000_SUCCESS;
+ status = 0;
break;
}
/* Check if we have second version location used */
else if ((i == 1) &&
((*record & E1000_INVM_VER_FIELD_TWO) == 0)) {
version = (*record & E1000_INVM_VER_FIELD_ONE) >> 3;
- status = E1000_SUCCESS;
+ status = 0;
break;
}
/* Check if we have odd version location
@@ -506,7 +504,7 @@ s32 igb_read_invm_version(struct e1000_hw *hw,
(i != 1))) {
version = (*next_record & E1000_INVM_VER_FIELD_TWO)
>> 13;
- status = E1000_SUCCESS;
+ status = 0;
break;
}
/* Check if we have even version location
@@ -515,12 +513,12 @@ s32 igb_read_invm_version(struct e1000_hw *hw,
else if (((*record & E1000_INVM_VER_FIELD_TWO) == 0) &&
((*record & 0x3) == 0)) {
version = (*record & E1000_INVM_VER_FIELD_ONE) >> 3;
- status = E1000_SUCCESS;
+ status = 0;
break;
}
}
- if (status == E1000_SUCCESS) {
+ if (!status) {
invm_ver->invm_major = (version & E1000_INVM_MAJOR_MASK)
>> E1000_INVM_MAJOR_SHIFT;
invm_ver->invm_minor = version & E1000_INVM_MINOR_MASK;
@@ -533,7 +531,7 @@ s32 igb_read_invm_version(struct e1000_hw *hw,
/* Check if we have image type in first location used */
if ((i == 1) && ((*record & E1000_INVM_IMGTYPE_FIELD) == 0)) {
invm_ver->invm_img_type = 0;
- status = E1000_SUCCESS;
+ status = 0;
break;
}
/* Check if we have image type in first location used */
@@ -542,7 +540,7 @@ s32 igb_read_invm_version(struct e1000_hw *hw,
((((*record & 0x3) != 0) && (i != 1)))) {
invm_ver->invm_img_type =
(*next_record & E1000_INVM_IMGTYPE_FIELD) >> 23;
- status = E1000_SUCCESS;
+ status = 0;
break;
}
}
@@ -558,10 +556,10 @@ s32 igb_read_invm_version(struct e1000_hw *hw,
**/
static s32 igb_validate_nvm_checksum_i210(struct e1000_hw *hw)
{
- s32 status = E1000_SUCCESS;
+ s32 status = 0;
s32 (*read_op_ptr)(struct e1000_hw *, u16, u16, u16 *);
- if (hw->nvm.ops.acquire(hw) == E1000_SUCCESS) {
+ if (!(hw->nvm.ops.acquire(hw))) {
/* Replace the read function with semaphore grabbing with
* the one that skips this for a while.
@@ -593,7 +591,7 @@ static s32 igb_validate_nvm_checksum_i210(struct e1000_hw *hw)
**/
static s32 igb_update_nvm_checksum_i210(struct e1000_hw *hw)
{
- s32 ret_val = E1000_SUCCESS;
+ s32 ret_val = 0;
u16 checksum = 0;
u16 i, nvm_data;
@@ -602,12 +600,12 @@ static s32 igb_update_nvm_checksum_i210(struct e1000_hw *hw)
* EEPROM read fails
*/
ret_val = igb_read_nvm_eerd(hw, 0, 1, &nvm_data);
- if (ret_val != E1000_SUCCESS) {
+ if (ret_val) {
hw_dbg("EEPROM read failed\n");
goto out;
}
- if (hw->nvm.ops.acquire(hw) == E1000_SUCCESS) {
+ if (!(hw->nvm.ops.acquire(hw))) {
/* Do not use hw->nvm.ops.write, hw->nvm.ops.read
* because we do not want to take the synchronization
* semaphores twice here.
@@ -625,7 +623,7 @@ static s32 igb_update_nvm_checksum_i210(struct e1000_hw *hw)
checksum = (u16) NVM_SUM - checksum;
ret_val = igb_write_nvm_srwr(hw, NVM_CHECKSUM_REG, 1,
&checksum);
- if (ret_val != E1000_SUCCESS) {
+ if (ret_val) {
hw->nvm.ops.release(hw);
hw_dbg("NVM Write Error while updating checksum.\n");
goto out;
@@ -654,7 +652,7 @@ static s32 igb_pool_flash_update_done_i210(struct e1000_hw *hw)
for (i = 0; i < E1000_FLUDONE_ATTEMPTS; i++) {
reg = rd32(E1000_EECD);
if (reg & E1000_EECD_FLUDONE_I210) {
- ret_val = E1000_SUCCESS;
+ ret_val = 0;
break;
}
udelay(5);
@@ -687,7 +685,7 @@ bool igb_get_flash_presence_i210(struct e1000_hw *hw)
**/
static s32 igb_update_flash_i210(struct e1000_hw *hw)
{
- s32 ret_val = E1000_SUCCESS;
+ s32 ret_val = 0;
u32 flup;
ret_val = igb_pool_flash_update_done_i210(hw);
@@ -700,7 +698,7 @@ static s32 igb_update_flash_i210(struct e1000_hw *hw)
wr32(E1000_EECD, flup);
ret_val = igb_pool_flash_update_done_i210(hw);
- if (ret_val == E1000_SUCCESS)
+ if (ret_val)
hw_dbg("Flash update complete\n");
else
hw_dbg("Flash update time out\n");
@@ -753,7 +751,7 @@ out:
static s32 __igb_access_xmdio_reg(struct e1000_hw *hw, u16 address,
u8 dev_addr, u16 *data, bool read)
{
- s32 ret_val = E1000_SUCCESS;
+ s32 ret_val = 0;
ret_val = hw->phy.ops.write_reg(hw, E1000_MMDAC, dev_addr);
if (ret_val)
diff --git a/drivers/net/ethernet/intel/igb/e1000_i210.h b/drivers/net/ethernet/intel/igb/e1000_i210.h
index 907fe99a9813..9f34976687ba 100644
--- a/drivers/net/ethernet/intel/igb/e1000_i210.h
+++ b/drivers/net/ethernet/intel/igb/e1000_i210.h
@@ -1,28 +1,25 @@
-/*******************************************************************************
-
- Intel(R) Gigabit Ethernet Linux driver
- Copyright(c) 2007-2014 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, see <http://www.gnu.org/licenses/>.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
+/* Intel(R) Gigabit Ethernet Linux driver
+ * Copyright(c) 2007-2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ */
#ifndef _E1000_I210_H_
#define _E1000_I210_H_
diff --git a/drivers/net/ethernet/intel/igb/e1000_mac.c b/drivers/net/ethernet/intel/igb/e1000_mac.c
index 1e0c404db81a..2a88595f956c 100644
--- a/drivers/net/ethernet/intel/igb/e1000_mac.c
+++ b/drivers/net/ethernet/intel/igb/e1000_mac.c
@@ -1,28 +1,25 @@
-/*******************************************************************************
-
- Intel(R) Gigabit Ethernet Linux driver
- Copyright(c) 2007-2014 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, see <http://www.gnu.org/licenses/>.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
+/* Intel(R) Gigabit Ethernet Linux driver
+ * Copyright(c) 2007-2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ */
#include <linux/if_ether.h>
#include <linux/delay.h>
@@ -442,7 +439,7 @@ static u32 igb_hash_mc_addr(struct e1000_hw *hw, u8 *mc_addr)
* The caller must have a packed mc_addr_list of multicast addresses.
**/
void igb_update_mc_addr_list(struct e1000_hw *hw,
- u8 *mc_addr_list, u32 mc_addr_count)
+ u8 *mc_addr_list, u32 mc_addr_count)
{
u32 hash_value, hash_bit, hash_reg;
int i;
@@ -866,8 +863,7 @@ s32 igb_config_fc_after_link_up(struct e1000_hw *hw)
goto out;
if (!(mii_status_reg & MII_SR_AUTONEG_COMPLETE)) {
- hw_dbg("Copper PHY and Auto Neg "
- "has not completed.\n");
+ hw_dbg("Copper PHY and Auto Neg has not completed.\n");
goto out;
}
@@ -1265,7 +1261,7 @@ s32 igb_get_auto_rd_done(struct e1000_hw *hw)
while (i < AUTO_READ_DONE_TIMEOUT) {
if (rd32(E1000_EECD) & E1000_EECD_AUTO_RD)
break;
- msleep(1);
+ usleep_range(1000, 2000);
i++;
}
@@ -1298,7 +1294,7 @@ static s32 igb_valid_led_default(struct e1000_hw *hw, u16 *data)
}
if (*data == ID_LED_RESERVED_0000 || *data == ID_LED_RESERVED_FFFF) {
- switch(hw->phy.media_type) {
+ switch (hw->phy.media_type) {
case e1000_media_type_internal_serdes:
*data = ID_LED_DEFAULT_82575_SERDES;
break;
diff --git a/drivers/net/ethernet/intel/igb/e1000_mac.h b/drivers/net/ethernet/intel/igb/e1000_mac.h
index 99299ba8ee3a..ea24961b0d70 100644
--- a/drivers/net/ethernet/intel/igb/e1000_mac.h
+++ b/drivers/net/ethernet/intel/igb/e1000_mac.h
@@ -1,28 +1,25 @@
-/*******************************************************************************
-
- Intel(R) Gigabit Ethernet Linux driver
- Copyright(c) 2007-2014 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, see <http://www.gnu.org/licenses/>.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
+/* Intel(R) Gigabit Ethernet Linux driver
+ * Copyright(c) 2007-2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ */
#ifndef _E1000_MAC_H_
#define _E1000_MAC_H_
diff --git a/drivers/net/ethernet/intel/igb/e1000_mbx.c b/drivers/net/ethernet/intel/igb/e1000_mbx.c
index d5b121771c31..162cc49345d0 100644
--- a/drivers/net/ethernet/intel/igb/e1000_mbx.c
+++ b/drivers/net/ethernet/intel/igb/e1000_mbx.c
@@ -1,28 +1,25 @@
-/*******************************************************************************
-
- Intel(R) Gigabit Ethernet Linux driver
- Copyright(c) 2007-2014 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, see <http://www.gnu.org/licenses/>.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
+/* Intel(R) Gigabit Ethernet Linux driver
+ * Copyright(c) 2007-2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ */
#include "e1000_mbx.h"
diff --git a/drivers/net/ethernet/intel/igb/e1000_mbx.h b/drivers/net/ethernet/intel/igb/e1000_mbx.h
index f52f5515e5a8..d20af6b2f581 100644
--- a/drivers/net/ethernet/intel/igb/e1000_mbx.h
+++ b/drivers/net/ethernet/intel/igb/e1000_mbx.h
@@ -1,28 +1,25 @@
-/*******************************************************************************
-
- Intel(R) Gigabit Ethernet Linux driver
- Copyright(c) 2007-2014 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, see <http://www.gnu.org/licenses/>.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
+/* Intel(R) Gigabit Ethernet Linux driver
+ * Copyright(c) 2007-2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ */
#ifndef _E1000_MBX_H_
#define _E1000_MBX_H_
diff --git a/drivers/net/ethernet/intel/igb/e1000_nvm.c b/drivers/net/ethernet/intel/igb/e1000_nvm.c
index 9abf82919c65..e8280d0d7f02 100644
--- a/drivers/net/ethernet/intel/igb/e1000_nvm.c
+++ b/drivers/net/ethernet/intel/igb/e1000_nvm.c
@@ -1,28 +1,24 @@
-/*******************************************************************************
-
- Intel(R) Gigabit Ethernet Linux driver
- Copyright(c) 2007-2014 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, see <http://www.gnu.org/licenses/>.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
+/* Intel(R) Gigabit Ethernet Linux driver
+ * Copyright(c) 2007-2014 Intel Corporation.
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ */
#include <linux/if_ether.h>
#include <linux/delay.h>
@@ -480,6 +476,7 @@ s32 igb_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
/* Loop to allow for up to whole page write of eeprom */
while (widx < words) {
u16 word_out = data[widx];
+
word_out = (word_out >> 8) | (word_out << 8);
igb_shift_out_eec_bits(hw, word_out, 16);
widx++;
@@ -801,5 +798,4 @@ etrack_id:
fw_vers->etrack_id = (eeprom_verh << NVM_ETRACK_SHIFT)
| eeprom_verl;
}
- return;
}
diff --git a/drivers/net/ethernet/intel/igb/e1000_nvm.h b/drivers/net/ethernet/intel/igb/e1000_nvm.h
index 5b101170b17e..febc9cdb7391 100644
--- a/drivers/net/ethernet/intel/igb/e1000_nvm.h
+++ b/drivers/net/ethernet/intel/igb/e1000_nvm.h
@@ -1,28 +1,25 @@
-/*******************************************************************************
-
- Intel(R) Gigabit Ethernet Linux driver
- Copyright(c) 2007-2014 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, see <http://www.gnu.org/licenses/>.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
+/* Intel(R) Gigabit Ethernet Linux driver
+ * Copyright(c) 2007-2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ */
#ifndef _E1000_NVM_H_
#define _E1000_NVM_H_
@@ -32,7 +29,7 @@ void igb_release_nvm(struct e1000_hw *hw);
s32 igb_read_mac_addr(struct e1000_hw *hw);
s32 igb_read_part_num(struct e1000_hw *hw, u32 *part_num);
s32 igb_read_part_string(struct e1000_hw *hw, u8 *part_num,
- u32 part_num_size);
+ u32 part_num_size);
s32 igb_read_nvm_eerd(struct e1000_hw *hw, u16 offset, u16 words, u16 *data);
s32 igb_read_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data);
s32 igb_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data);
diff --git a/drivers/net/ethernet/intel/igb/e1000_phy.c b/drivers/net/ethernet/intel/igb/e1000_phy.c
index 4009bbab7407..c1bb64d8366f 100644
--- a/drivers/net/ethernet/intel/igb/e1000_phy.c
+++ b/drivers/net/ethernet/intel/igb/e1000_phy.c
@@ -1,28 +1,25 @@
-/*******************************************************************************
-
- Intel(R) Gigabit Ethernet Linux driver
- Copyright(c) 2007-2014 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, see <http://www.gnu.org/licenses/>.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
+/* Intel(R) Gigabit Ethernet Linux driver
+ * Copyright(c) 2007-2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ */
#include <linux/if_ether.h>
#include <linux/delay.h>
@@ -924,8 +921,7 @@ static s32 igb_copper_link_autoneg(struct e1000_hw *hw)
if (phy->autoneg_wait_to_complete) {
ret_val = igb_wait_autoneg(hw);
if (ret_val) {
- hw_dbg("Error while waiting for "
- "autoneg to complete\n");
+ hw_dbg("Error while waiting for autoneg to complete\n");
goto out;
}
}
@@ -2208,16 +2204,10 @@ s32 igb_phy_init_script_igp3(struct e1000_hw *hw)
void igb_power_up_phy_copper(struct e1000_hw *hw)
{
u16 mii_reg = 0;
- u16 power_reg = 0;
/* The PHY will retain its settings across a power down/up cycle */
hw->phy.ops.read_reg(hw, PHY_CONTROL, &mii_reg);
mii_reg &= ~MII_CR_POWER_DOWN;
- if (hw->phy.type == e1000_phy_i210) {
- hw->phy.ops.read_reg(hw, GS40G_COPPER_SPEC, &power_reg);
- power_reg &= ~GS40G_CS_POWER_DOWN;
- hw->phy.ops.write_reg(hw, GS40G_COPPER_SPEC, power_reg);
- }
hw->phy.ops.write_reg(hw, PHY_CONTROL, mii_reg);
}
@@ -2231,20 +2221,12 @@ void igb_power_up_phy_copper(struct e1000_hw *hw)
void igb_power_down_phy_copper(struct e1000_hw *hw)
{
u16 mii_reg = 0;
- u16 power_reg = 0;
/* The PHY will retain its settings across a power down/up cycle */
hw->phy.ops.read_reg(hw, PHY_CONTROL, &mii_reg);
mii_reg |= MII_CR_POWER_DOWN;
-
- /* i210 Phy requires an additional bit for power up/down */
- if (hw->phy.type == e1000_phy_i210) {
- hw->phy.ops.read_reg(hw, GS40G_COPPER_SPEC, &power_reg);
- power_reg |= GS40G_CS_POWER_DOWN;
- hw->phy.ops.write_reg(hw, GS40G_COPPER_SPEC, power_reg);
- }
hw->phy.ops.write_reg(hw, PHY_CONTROL, mii_reg);
- msleep(1);
+ usleep_range(1000, 2000);
}
/**
diff --git a/drivers/net/ethernet/intel/igb/e1000_phy.h b/drivers/net/ethernet/intel/igb/e1000_phy.h
index 4c2c36c46a73..7af4ffab0285 100644
--- a/drivers/net/ethernet/intel/igb/e1000_phy.h
+++ b/drivers/net/ethernet/intel/igb/e1000_phy.h
@@ -1,28 +1,25 @@
-/*******************************************************************************
-
- Intel(R) Gigabit Ethernet Linux driver
- Copyright(c) 2007-2014 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, see <http://www.gnu.org/licenses/>.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
+/* Intel(R) Gigabit Ethernet Linux driver
+ * Copyright(c) 2007-2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ */
#ifndef _E1000_PHY_H_
#define _E1000_PHY_H_
@@ -154,7 +151,6 @@ s32 igb_check_polarity_m88(struct e1000_hw *hw);
#define GS40G_MAC_LB 0x4140
#define GS40G_MAC_SPEED_1G 0X0006
#define GS40G_COPPER_SPEC 0x0010
-#define GS40G_CS_POWER_DOWN 0x0002
#define GS40G_LINE_LB 0x4000
/* SFP modules ID memory locations */
diff --git a/drivers/net/ethernet/intel/igb/e1000_regs.h b/drivers/net/ethernet/intel/igb/e1000_regs.h
index bdb246e848e1..1cc4b1a7e597 100644
--- a/drivers/net/ethernet/intel/igb/e1000_regs.h
+++ b/drivers/net/ethernet/intel/igb/e1000_regs.h
@@ -1,28 +1,25 @@
-/*******************************************************************************
-
- Intel(R) Gigabit Ethernet Linux driver
- Copyright(c) 2007-2014 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, see <http://www.gnu.org/licenses/>.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
+/* Intel(R) Gigabit Ethernet Linux driver
+ * Copyright(c) 2007-2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ */
#ifndef _E1000_REGS_H_
#define _E1000_REGS_H_
@@ -195,6 +192,10 @@
: (0x0E038 + ((_n) * 0x40)))
#define E1000_TDWBAH(_n) ((_n) < 4 ? (0x0383C + ((_n) * 0x100)) \
: (0x0E03C + ((_n) * 0x40)))
+
+#define E1000_RXPBS 0x02404 /* Rx Packet Buffer Size - RW */
+#define E1000_TXPBS 0x03404 /* Tx Packet Buffer Size - RW */
+
#define E1000_TDFH 0x03410 /* TX Data FIFO Head - RW */
#define E1000_TDFT 0x03418 /* TX Data FIFO Tail - RW */
#define E1000_TDFHS 0x03420 /* TX Data FIFO Head Saved - RW */
@@ -301,9 +302,9 @@
#define E1000_RA2 0x054E0 /* 2nd half of Rx address array - RW Array */
#define E1000_PSRTYPE(_i) (0x05480 + ((_i) * 4))
#define E1000_RAL(_i) (((_i) <= 15) ? (0x05400 + ((_i) * 8)) : \
- (0x054E0 + ((_i - 16) * 8)))
+ (0x054E0 + ((_i - 16) * 8)))
#define E1000_RAH(_i) (((_i) <= 15) ? (0x05404 + ((_i) * 8)) : \
- (0x054E4 + ((_i - 16) * 8)))
+ (0x054E4 + ((_i - 16) * 8)))
#define E1000_IP4AT_REG(_i) (0x05840 + ((_i) * 8))
#define E1000_IP6AT_REG(_i) (0x05880 + ((_i) * 4))
#define E1000_WUPM_REG(_i) (0x05A00 + ((_i) * 4))
@@ -358,8 +359,7 @@
#define E1000_VMBMEM(_n) (0x00800 + (64 * (_n)))
#define E1000_VMOLR(_n) (0x05AD0 + (4 * (_n)))
#define E1000_DVMOLR(_n) (0x0C038 + (64 * (_n)))
-#define E1000_VLVF(_n) (0x05D00 + (4 * (_n))) /* VLAN Virtual Machine
- * Filter - RW */
+#define E1000_VLVF(_n) (0x05D00 + (4 * (_n))) /* VLAN VM Filter */
#define E1000_VMVIR(_n) (0x03700 + (4 * (_n)))
struct e1000_hw;
diff --git a/drivers/net/ethernet/intel/igb/igb.h b/drivers/net/ethernet/intel/igb/igb.h
index 27130065d92a..06102d1f7c03 100644
--- a/drivers/net/ethernet/intel/igb/igb.h
+++ b/drivers/net/ethernet/intel/igb/igb.h
@@ -1,29 +1,25 @@
-/*******************************************************************************
-
- Intel(R) Gigabit Ethernet Linux driver
- Copyright(c) 2007-2014 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, see <http://www.gnu.org/licenses/>.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
-
+/* Intel(R) Gigabit Ethernet Linux driver
+ * Copyright(c) 2007-2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ */
/* Linux PRO/1000 Ethernet Driver main header file */
@@ -198,6 +194,7 @@ struct igb_tx_buffer {
unsigned int bytecount;
u16 gso_segs;
__be16 protocol;
+
DEFINE_DMA_UNMAP_ADDR(dma);
DEFINE_DMA_UNMAP_LEN(len);
u32 tx_flags;
diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c
index e5570acbeea8..c737d1f40838 100644
--- a/drivers/net/ethernet/intel/igb/igb_ethtool.c
+++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c
@@ -1,28 +1,25 @@
-/*******************************************************************************
-
- Intel(R) Gigabit Ethernet Linux driver
- Copyright(c) 2007-2014 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, see <http://www.gnu.org/licenses/>.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
+/* Intel(R) Gigabit Ethernet Linux driver
+ * Copyright(c) 2007-2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ */
/* ethtool support for igb */
@@ -144,6 +141,7 @@ static int igb_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
struct e1000_dev_spec_82575 *dev_spec = &hw->dev_spec._82575;
struct e1000_sfp_flags *eth_flags = &dev_spec->eth_flags;
u32 status;
+ u32 speed;
status = rd32(E1000_STATUS);
if (hw->phy.media_type == e1000_media_type_copper) {
@@ -218,13 +216,13 @@ static int igb_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
if (status & E1000_STATUS_LU) {
if ((status & E1000_STATUS_2P5_SKU) &&
!(status & E1000_STATUS_2P5_SKU_OVER)) {
- ecmd->speed = SPEED_2500;
+ speed = SPEED_2500;
} else if (status & E1000_STATUS_SPEED_1000) {
- ecmd->speed = SPEED_1000;
+ speed = SPEED_1000;
} else if (status & E1000_STATUS_SPEED_100) {
- ecmd->speed = SPEED_100;
+ speed = SPEED_100;
} else {
- ecmd->speed = SPEED_10;
+ speed = SPEED_10;
}
if ((status & E1000_STATUS_FD) ||
hw->phy.media_type != e1000_media_type_copper)
@@ -232,9 +230,10 @@ static int igb_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
else
ecmd->duplex = DUPLEX_HALF;
} else {
- ecmd->speed = -1;
- ecmd->duplex = -1;
+ speed = SPEED_UNKNOWN;
+ ecmd->duplex = DUPLEX_UNKNOWN;
}
+ ethtool_cmd_speed_set(ecmd, speed);
if ((hw->phy.media_type == e1000_media_type_fiber) ||
hw->mac.autoneg)
ecmd->autoneg = AUTONEG_ENABLE;
@@ -286,7 +285,7 @@ static int igb_set_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
}
while (test_and_set_bit(__IGB_RESETTING, &adapter->state))
- msleep(1);
+ usleep_range(1000, 2000);
if (ecmd->autoneg == AUTONEG_ENABLE) {
hw->mac.autoneg = 1;
@@ -399,7 +398,7 @@ static int igb_set_pauseparam(struct net_device *netdev,
adapter->fc_autoneg = pause->autoneg;
while (test_and_set_bit(__IGB_RESETTING, &adapter->state))
- msleep(1);
+ usleep_range(1000, 2000);
if (adapter->fc_autoneg == AUTONEG_ENABLE) {
hw->fc.requested_mode = e1000_fc_default;
@@ -886,7 +885,7 @@ static int igb_set_ringparam(struct net_device *netdev,
}
while (test_and_set_bit(__IGB_RESETTING, &adapter->state))
- msleep(1);
+ usleep_range(1000, 2000);
if (!netif_running(adapter->netdev)) {
for (i = 0; i < adapter->num_tx_queues; i++)
@@ -1060,8 +1059,8 @@ static struct igb_reg_test reg_test_i350[] = {
{ E1000_TDT(0), 0x100, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
{ E1000_TDT(4), 0x40, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
{ E1000_RCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 },
- { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0x003FFFFB },
- { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0xFFFFFFFF },
+ { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0x003FFFFB },
+ { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0xFFFFFFFF },
{ E1000_TCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 },
{ E1000_RA, 0, 16, TABLE64_TEST_LO,
0xFFFFFFFF, 0xFFFFFFFF },
@@ -1103,8 +1102,8 @@ static struct igb_reg_test reg_test_82580[] = {
{ E1000_TDT(0), 0x100, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
{ E1000_TDT(4), 0x40, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
{ E1000_RCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 },
- { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0x003FFFFB },
- { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0xFFFFFFFF },
+ { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0x003FFFFB },
+ { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0xFFFFFFFF },
{ E1000_TCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 },
{ E1000_RA, 0, 16, TABLE64_TEST_LO,
0xFFFFFFFF, 0xFFFFFFFF },
@@ -1132,8 +1131,10 @@ static struct igb_reg_test reg_test_82576[] = {
{ E1000_RDBAH(4), 0x40, 12, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
{ E1000_RDLEN(4), 0x40, 12, PATTERN_TEST, 0x000FFFF0, 0x000FFFFF },
/* Enable all RX queues before testing. */
- { E1000_RXDCTL(0), 0x100, 4, WRITE_NO_TEST, 0, E1000_RXDCTL_QUEUE_ENABLE },
- { E1000_RXDCTL(4), 0x40, 12, WRITE_NO_TEST, 0, E1000_RXDCTL_QUEUE_ENABLE },
+ { E1000_RXDCTL(0), 0x100, 4, WRITE_NO_TEST, 0,
+ E1000_RXDCTL_QUEUE_ENABLE },
+ { E1000_RXDCTL(4), 0x40, 12, WRITE_NO_TEST, 0,
+ E1000_RXDCTL_QUEUE_ENABLE },
/* RDH is read-only for 82576, only test RDT. */
{ E1000_RDT(0), 0x100, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
{ E1000_RDT(4), 0x40, 12, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
@@ -1149,14 +1150,14 @@ static struct igb_reg_test reg_test_82576[] = {
{ E1000_TDBAH(4), 0x40, 12, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
{ E1000_TDLEN(4), 0x40, 12, PATTERN_TEST, 0x000FFFF0, 0x000FFFFF },
{ E1000_RCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 },
- { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0x003FFFFB },
- { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0xFFFFFFFF },
+ { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0x003FFFFB },
+ { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0xFFFFFFFF },
{ E1000_TCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 },
{ E1000_RA, 0, 16, TABLE64_TEST_LO, 0xFFFFFFFF, 0xFFFFFFFF },
{ E1000_RA, 0, 16, TABLE64_TEST_HI, 0x83FFFFFF, 0xFFFFFFFF },
{ E1000_RA2, 0, 8, TABLE64_TEST_LO, 0xFFFFFFFF, 0xFFFFFFFF },
{ E1000_RA2, 0, 8, TABLE64_TEST_HI, 0x83FFFFFF, 0xFFFFFFFF },
- { E1000_MTA, 0, 128,TABLE32_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
+ { E1000_MTA, 0, 128, TABLE32_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
{ 0, 0, 0, 0 }
};
@@ -1170,7 +1171,8 @@ static struct igb_reg_test reg_test_82575[] = {
{ E1000_RDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
{ E1000_RDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF },
/* Enable all four RX queues before testing. */
- { E1000_RXDCTL(0), 0x100, 4, WRITE_NO_TEST, 0, E1000_RXDCTL_QUEUE_ENABLE },
+ { E1000_RXDCTL(0), 0x100, 4, WRITE_NO_TEST, 0,
+ E1000_RXDCTL_QUEUE_ENABLE },
/* RDH is read-only for 82575, only test RDT. */
{ E1000_RDT(0), 0x100, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
{ E1000_RXDCTL(0), 0x100, 4, WRITE_NO_TEST, 0, 0 },
@@ -1196,8 +1198,8 @@ static bool reg_pattern_test(struct igb_adapter *adapter, u64 *data,
{
struct e1000_hw *hw = &adapter->hw;
u32 pat, val;
- static const u32 _test[] =
- {0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF};
+ static const u32 _test[] = {
+ 0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF};
for (pat = 0; pat < ARRAY_SIZE(_test); pat++) {
wr32(reg, (_test[pat] & write));
val = rd32(reg) & mask;
@@ -1206,11 +1208,11 @@ static bool reg_pattern_test(struct igb_adapter *adapter, u64 *data,
"pattern test reg %04X failed: got 0x%08X expected 0x%08X\n",
reg, val, (_test[pat] & write & mask));
*data = reg;
- return 1;
+ return true;
}
}
- return 0;
+ return false;
}
static bool reg_set_and_check(struct igb_adapter *adapter, u64 *data,
@@ -1218,17 +1220,18 @@ static bool reg_set_and_check(struct igb_adapter *adapter, u64 *data,
{
struct e1000_hw *hw = &adapter->hw;
u32 val;
+
wr32(reg, write & mask);
val = rd32(reg);
if ((write & mask) != (val & mask)) {
dev_err(&adapter->pdev->dev,
- "set/check reg %04X test failed: got 0x%08X expected 0x%08X\n", reg,
- (val & mask), (write & mask));
+ "set/check reg %04X test failed: got 0x%08X expected 0x%08X\n",
+ reg, (val & mask), (write & mask));
*data = reg;
- return 1;
+ return true;
}
- return 0;
+ return false;
}
#define REG_PATTERN_TEST(reg, mask, write) \
@@ -1387,14 +1390,14 @@ static int igb_intr_test(struct igb_adapter *adapter, u64 *data)
/* Hook up test interrupt handler just for this test */
if (adapter->flags & IGB_FLAG_HAS_MSIX) {
if (request_irq(adapter->msix_entries[0].vector,
- igb_test_intr, 0, netdev->name, adapter)) {
+ igb_test_intr, 0, netdev->name, adapter)) {
*data = 1;
return -1;
}
} else if (adapter->flags & IGB_FLAG_HAS_MSI) {
shared_int = false;
if (request_irq(irq,
- igb_test_intr, 0, netdev->name, adapter)) {
+ igb_test_intr, 0, netdev->name, adapter)) {
*data = 1;
return -1;
}
@@ -1412,7 +1415,7 @@ static int igb_intr_test(struct igb_adapter *adapter, u64 *data)
/* Disable all the interrupts */
wr32(E1000_IMC, ~0);
wrfl();
- msleep(10);
+ usleep_range(10000, 11000);
/* Define all writable bits for ICS */
switch (hw->mac.type) {
@@ -1459,7 +1462,7 @@ static int igb_intr_test(struct igb_adapter *adapter, u64 *data)
wr32(E1000_IMC, mask);
wr32(E1000_ICS, mask);
wrfl();
- msleep(10);
+ usleep_range(10000, 11000);
if (adapter->test_icr & mask) {
*data = 3;
@@ -1481,7 +1484,7 @@ static int igb_intr_test(struct igb_adapter *adapter, u64 *data)
wr32(E1000_IMS, mask);
wr32(E1000_ICS, mask);
wrfl();
- msleep(10);
+ usleep_range(10000, 11000);
if (!(adapter->test_icr & mask)) {
*data = 4;
@@ -1503,7 +1506,7 @@ static int igb_intr_test(struct igb_adapter *adapter, u64 *data)
wr32(E1000_IMC, ~mask);
wr32(E1000_ICS, ~mask);
wrfl();
- msleep(10);
+ usleep_range(10000, 11000);
if (adapter->test_icr & mask) {
*data = 5;
@@ -1515,7 +1518,7 @@ static int igb_intr_test(struct igb_adapter *adapter, u64 *data)
/* Disable all the interrupts */
wr32(E1000_IMC, ~0);
wrfl();
- msleep(10);
+ usleep_range(10000, 11000);
/* Unhook test interrupt handler */
if (adapter->flags & IGB_FLAG_HAS_MSIX)
@@ -1664,8 +1667,8 @@ static int igb_setup_loopback_test(struct igb_adapter *adapter)
(hw->device_id == E1000_DEV_ID_DH89XXCC_SERDES) ||
(hw->device_id == E1000_DEV_ID_DH89XXCC_BACKPLANE) ||
(hw->device_id == E1000_DEV_ID_DH89XXCC_SFP) ||
- (hw->device_id == E1000_DEV_ID_I354_SGMII)) {
-
+ (hw->device_id == E1000_DEV_ID_I354_SGMII) ||
+ (hw->device_id == E1000_DEV_ID_I354_BACKPLANE_2_5GBPS)) {
/* Enable DH89xxCC MPHY for near end loopback */
reg = rd32(E1000_MPHY_ADDR_CTL);
reg = (reg & E1000_MPHY_ADDR_CTL_OFFSET_MASK) |
@@ -1949,6 +1952,7 @@ static int igb_link_test(struct igb_adapter *adapter, u64 *data)
*data = 0;
if (hw->phy.media_type == e1000_media_type_internal_serdes) {
int i = 0;
+
hw->mac.serdes_has_link = false;
/* On some blade server designs, link establishment
@@ -2413,9 +2417,11 @@ static int igb_get_rss_hash_opts(struct igb_adapter *adapter,
switch (cmd->flow_type) {
case TCP_V4_FLOW:
cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
+ /* Fall through */
case UDP_V4_FLOW:
if (adapter->flags & IGB_FLAG_RSS_FIELD_IPV4_UDP)
cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
+ /* Fall through */
case SCTP_V4_FLOW:
case AH_ESP_V4_FLOW:
case AH_V4_FLOW:
@@ -2425,9 +2431,11 @@ static int igb_get_rss_hash_opts(struct igb_adapter *adapter,
break;
case TCP_V6_FLOW:
cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
+ /* Fall through */
case UDP_V6_FLOW:
if (adapter->flags & IGB_FLAG_RSS_FIELD_IPV6_UDP)
cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
+ /* Fall through */
case SCTP_V6_FLOW:
case AH_ESP_V6_FLOW:
case AH_V6_FLOW:
@@ -2730,7 +2738,7 @@ static int igb_get_module_info(struct net_device *netdev,
{
struct igb_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
- u32 status = E1000_SUCCESS;
+ u32 status = 0;
u16 sff8472_rev, addr_mode;
bool page_swap = false;
@@ -2740,12 +2748,12 @@ static int igb_get_module_info(struct net_device *netdev,
/* Check whether we support SFF-8472 or not */
status = igb_read_phy_reg_i2c(hw, IGB_SFF_8472_COMP, &sff8472_rev);
- if (status != E1000_SUCCESS)
+ if (status)
return -EIO;
/* addressing mode is not supported */
status = igb_read_phy_reg_i2c(hw, IGB_SFF_8472_SWAP, &addr_mode);
- if (status != E1000_SUCCESS)
+ if (status)
return -EIO;
/* addressing mode is not supported */
@@ -2772,7 +2780,7 @@ static int igb_get_module_eeprom(struct net_device *netdev,
{
struct igb_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
- u32 status = E1000_SUCCESS;
+ u32 status = 0;
u16 *dataword;
u16 first_word, last_word;
int i = 0;
@@ -2791,7 +2799,7 @@ static int igb_get_module_eeprom(struct net_device *netdev,
/* Read EEPROM block, SFF-8079/SFF-8472, word at a time */
for (i = 0; i < last_word - first_word + 1; i++) {
status = igb_read_phy_reg_i2c(hw, first_word + i, &dataword[i]);
- if (status != E1000_SUCCESS) {
+ if (status) {
/* Error occurred while reading module */
kfree(dataword);
return -EIO;
@@ -2824,7 +2832,7 @@ static u32 igb_get_rxfh_indir_size(struct net_device *netdev)
return IGB_RETA_SIZE;
}
-static int igb_get_rxfh_indir(struct net_device *netdev, u32 *indir)
+static int igb_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key)
{
struct igb_adapter *adapter = netdev_priv(netdev);
int i;
@@ -2870,7 +2878,8 @@ void igb_write_rss_indir_tbl(struct igb_adapter *adapter)
}
}
-static int igb_set_rxfh_indir(struct net_device *netdev, const u32 *indir)
+static int igb_set_rxfh(struct net_device *netdev, const u32 *indir,
+ const u8 *key)
{
struct igb_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
@@ -3019,8 +3028,8 @@ static const struct ethtool_ops igb_ethtool_ops = {
.get_module_info = igb_get_module_info,
.get_module_eeprom = igb_get_module_eeprom,
.get_rxfh_indir_size = igb_get_rxfh_indir_size,
- .get_rxfh_indir = igb_get_rxfh_indir,
- .set_rxfh_indir = igb_set_rxfh_indir,
+ .get_rxfh = igb_get_rxfh,
+ .set_rxfh = igb_set_rxfh,
.get_channels = igb_get_channels,
.set_channels = igb_set_channels,
.begin = igb_ethtool_begin,
@@ -3029,5 +3038,5 @@ static const struct ethtool_ops igb_ethtool_ops = {
void igb_set_ethtool_ops(struct net_device *netdev)
{
- SET_ETHTOOL_OPS(netdev, &igb_ethtool_ops);
+ netdev->ethtool_ops = &igb_ethtool_ops;
}
diff --git a/drivers/net/ethernet/intel/igb/igb_hwmon.c b/drivers/net/ethernet/intel/igb/igb_hwmon.c
index 8333f67acf96..44b6a68f1af7 100644
--- a/drivers/net/ethernet/intel/igb/igb_hwmon.c
+++ b/drivers/net/ethernet/intel/igb/igb_hwmon.c
@@ -1,28 +1,25 @@
-/*******************************************************************************
-
- Intel(R) Gigabit Ethernet Linux driver
- Copyright(c) 2007-2014 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, see <http://www.gnu.org/licenses/>.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
+/* Intel(R) Gigabit Ethernet Linux driver
+ * Copyright(c) 2007-2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ */
#include "igb.h"
#include "e1000_82575.h"
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index 16430a8440fa..f145adbb55ac 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -1,28 +1,25 @@
-/*******************************************************************************
-
- Intel(R) Gigabit Ethernet Linux driver
- Copyright(c) 2007-2014 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, see <http://www.gnu.org/licenses/>.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
+/* Intel(R) Gigabit Ethernet Linux driver
+ * Copyright(c) 2007-2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ */
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
@@ -75,7 +72,7 @@ static const struct e1000_info *igb_info_tbl[] = {
[board_82575] = &e1000_82575_info,
};
-static DEFINE_PCI_DEVICE_TABLE(igb_pci_tbl) = {
+static const struct pci_device_id igb_pci_tbl[] = {
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_I354_BACKPLANE_1GBPS) },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_I354_SGMII) },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_I354_BACKPLANE_2_5GBPS) },
@@ -117,7 +114,6 @@ static DEFINE_PCI_DEVICE_TABLE(igb_pci_tbl) = {
MODULE_DEVICE_TABLE(pci, igb_pci_tbl);
-void igb_reset(struct igb_adapter *);
static int igb_setup_all_tx_resources(struct igb_adapter *);
static int igb_setup_all_rx_resources(struct igb_adapter *);
static void igb_free_all_tx_resources(struct igb_adapter *);
@@ -141,7 +137,7 @@ static void igb_watchdog(unsigned long);
static void igb_watchdog_task(struct work_struct *);
static netdev_tx_t igb_xmit_frame(struct sk_buff *skb, struct net_device *);
static struct rtnl_link_stats64 *igb_get_stats64(struct net_device *dev,
- struct rtnl_link_stats64 *stats);
+ struct rtnl_link_stats64 *stats);
static int igb_change_mtu(struct net_device *, int);
static int igb_set_mac(struct net_device *, void *);
static void igb_set_uta(struct igb_adapter *adapter);
@@ -159,7 +155,8 @@ static bool igb_clean_rx_irq(struct igb_q_vector *, int);
static int igb_ioctl(struct net_device *, struct ifreq *, int cmd);
static void igb_tx_timeout(struct net_device *);
static void igb_reset_task(struct work_struct *);
-static void igb_vlan_mode(struct net_device *netdev, netdev_features_t features);
+static void igb_vlan_mode(struct net_device *netdev,
+ netdev_features_t features);
static int igb_vlan_rx_add_vid(struct net_device *, __be16, u16);
static int igb_vlan_rx_kill_vid(struct net_device *, __be16, u16);
static void igb_restore_vlan(struct igb_adapter *);
@@ -172,7 +169,7 @@ static void igb_restore_vf_multicasts(struct igb_adapter *adapter);
static int igb_ndo_set_vf_mac(struct net_device *netdev, int vf, u8 *mac);
static int igb_ndo_set_vf_vlan(struct net_device *netdev,
int vf, u16 vlan, u8 qos);
-static int igb_ndo_set_vf_bw(struct net_device *netdev, int vf, int tx_rate);
+static int igb_ndo_set_vf_bw(struct net_device *, int, int, int);
static int igb_ndo_set_vf_spoofchk(struct net_device *netdev, int vf,
bool setting);
static int igb_ndo_get_vf_config(struct net_device *netdev, int vf,
@@ -215,10 +212,9 @@ static struct notifier_block dca_notifier = {
static void igb_netpoll(struct net_device *);
#endif
#ifdef CONFIG_PCI_IOV
-static unsigned int max_vfs = 0;
+static unsigned int max_vfs;
module_param(max_vfs, uint, 0);
-MODULE_PARM_DESC(max_vfs, "Maximum number of virtual functions to allocate "
- "per physical function");
+MODULE_PARM_DESC(max_vfs, "Maximum number of virtual functions to allocate per physical function");
#endif /* CONFIG_PCI_IOV */
static pci_ers_result_t igb_io_error_detected(struct pci_dev *,
@@ -384,8 +380,7 @@ static void igb_dump(struct igb_adapter *adapter)
/* Print netdevice Info */
if (netdev) {
dev_info(&adapter->pdev->dev, "Net device Info\n");
- pr_info("Device Name state trans_start "
- "last_rx\n");
+ pr_info("Device Name state trans_start last_rx\n");
pr_info("%-15s %016lX %016lX %016lX\n", netdev->name,
netdev->state, netdev->trans_start, netdev->last_rx);
}
@@ -438,9 +433,7 @@ static void igb_dump(struct igb_adapter *adapter)
pr_info("------------------------------------\n");
pr_info("TX QUEUE INDEX = %d\n", tx_ring->queue_index);
pr_info("------------------------------------\n");
- pr_info("T [desc] [address 63:0 ] [PlPOCIStDDM Ln] "
- "[bi->dma ] leng ntw timestamp "
- "bi->skb\n");
+ pr_info("T [desc] [address 63:0 ] [PlPOCIStDDM Ln] [bi->dma ] leng ntw timestamp bi->skb\n");
for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) {
const char *next_desc;
@@ -458,9 +451,8 @@ static void igb_dump(struct igb_adapter *adapter)
else
next_desc = "";
- pr_info("T [0x%03X] %016llX %016llX %016llX"
- " %04X %p %016llX %p%s\n", i,
- le64_to_cpu(u0->a),
+ pr_info("T [0x%03X] %016llX %016llX %016llX %04X %p %016llX %p%s\n",
+ i, le64_to_cpu(u0->a),
le64_to_cpu(u0->b),
(u64)dma_unmap_addr(buffer_info, dma),
dma_unmap_len(buffer_info, len),
@@ -519,10 +511,8 @@ rx_ring_summary:
pr_info("------------------------------------\n");
pr_info("RX QUEUE INDEX = %d\n", rx_ring->queue_index);
pr_info("------------------------------------\n");
- pr_info("R [desc] [ PktBuf A0] [ HeadBuf DD] "
- "[bi->dma ] [bi->skb] <-- Adv Rx Read format\n");
- pr_info("RWB[desc] [PcsmIpSHl PtRs] [vl er S cks ln] -----"
- "----------- [bi->skb] <-- Adv Rx Write-Back format\n");
+ pr_info("R [desc] [ PktBuf A0] [ HeadBuf DD] [bi->dma ] [bi->skb] <-- Adv Rx Read format\n");
+ pr_info("RWB[desc] [PcsmIpSHl PtRs] [vl er S cks ln] ---------------- [bi->skb] <-- Adv Rx Write-Back format\n");
for (i = 0; i < rx_ring->count; i++) {
const char *next_desc;
@@ -584,7 +574,7 @@ static int igb_get_i2c_data(void *data)
struct e1000_hw *hw = &adapter->hw;
s32 i2cctl = rd32(E1000_I2CPARAMS);
- return ((i2cctl & E1000_I2C_DATA_IN) != 0);
+ return !!(i2cctl & E1000_I2C_DATA_IN);
}
/**
@@ -648,7 +638,7 @@ static int igb_get_i2c_clk(void *data)
struct e1000_hw *hw = &adapter->hw;
s32 i2cctl = rd32(E1000_I2CPARAMS);
- return ((i2cctl & E1000_I2C_CLK_IN) != 0);
+ return !!(i2cctl & E1000_I2C_CLK_IN);
}
static const struct i2c_algo_bit_data igb_i2c_algo = {
@@ -681,9 +671,9 @@ struct net_device *igb_get_hw_dev(struct e1000_hw *hw)
static int __init igb_init_module(void)
{
int ret;
+
pr_info("%s - version %s\n",
igb_driver_string, igb_driver_version);
-
pr_info("%s\n", igb_copyright);
#ifdef CONFIG_IGB_DCA
@@ -736,12 +726,14 @@ static void igb_cache_ring_register(struct igb_adapter *adapter)
adapter->rx_ring[i]->reg_idx = rbase_offset +
Q_IDX_82576(i);
}
+ /* Fall through */
case e1000_82575:
case e1000_82580:
case e1000_i350:
case e1000_i354:
case e1000_i210:
case e1000_i211:
+ /* Fall through */
default:
for (; i < adapter->num_rx_queues; i++)
adapter->rx_ring[i]->reg_idx = rbase_offset + i;
@@ -1292,8 +1284,7 @@ static int igb_alloc_q_vector(struct igb_adapter *adapter,
if (adapter->hw.mac.type >= e1000_82576)
set_bit(IGB_RING_FLAG_RX_SCTP_CSUM, &ring->flags);
- /*
- * On i350, i354, i210, and i211, loopback VLAN packets
+ /* On i350, i354, i210, and i211, loopback VLAN packets
* have the tag byte-swapped.
*/
if (adapter->hw.mac.type >= e1000_i350)
@@ -1345,6 +1336,7 @@ static int igb_alloc_q_vectors(struct igb_adapter *adapter)
for (; v_idx < q_vectors; v_idx++) {
int rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors - v_idx);
int tqpv = DIV_ROUND_UP(txr_remaining, q_vectors - v_idx);
+
err = igb_alloc_q_vector(adapter, q_vectors, v_idx,
tqpv, txr_idx, rqpv, rxr_idx);
@@ -1484,6 +1476,7 @@ static void igb_irq_disable(struct igb_adapter *adapter)
*/
if (adapter->flags & IGB_FLAG_HAS_MSIX) {
u32 regval = rd32(E1000_EIAM);
+
wr32(E1000_EIAM, regval & ~adapter->eims_enable_mask);
wr32(E1000_EIMC, adapter->eims_enable_mask);
regval = rd32(E1000_EIAC);
@@ -1495,6 +1488,7 @@ static void igb_irq_disable(struct igb_adapter *adapter)
wrfl();
if (adapter->flags & IGB_FLAG_HAS_MSIX) {
int i;
+
for (i = 0; i < adapter->num_q_vectors; i++)
synchronize_irq(adapter->msix_entries[i].vector);
} else {
@@ -1513,6 +1507,7 @@ static void igb_irq_enable(struct igb_adapter *adapter)
if (adapter->flags & IGB_FLAG_HAS_MSIX) {
u32 ims = E1000_IMS_LSC | E1000_IMS_DOUTSYNC | E1000_IMS_DRSTA;
u32 regval = rd32(E1000_EIAC);
+
wr32(E1000_EIAC, regval | adapter->eims_enable_mask);
regval = rd32(E1000_EIAM);
wr32(E1000_EIAM, regval | adapter->eims_enable_mask);
@@ -1745,6 +1740,7 @@ int igb_up(struct igb_adapter *adapter)
/* notify VFs that reset has been completed */
if (adapter->vfs_allocated_count) {
u32 reg_data = rd32(E1000_CTRL_EXT);
+
reg_data |= E1000_CTRL_EXT_PFRSTD;
wr32(E1000_CTRL_EXT, reg_data);
}
@@ -1787,7 +1783,7 @@ void igb_down(struct igb_adapter *adapter)
wr32(E1000_TCTL, tctl);
/* flush both disables and wait for them to finish */
wrfl();
- msleep(10);
+ usleep_range(10000, 11000);
igb_irq_disable(adapter);
@@ -1827,7 +1823,7 @@ void igb_reinit_locked(struct igb_adapter *adapter)
{
WARN_ON(in_interrupt());
while (test_and_set_bit(__IGB_RESETTING, &adapter->state))
- msleep(1);
+ usleep_range(1000, 2000);
igb_down(adapter);
igb_up(adapter);
clear_bit(__IGB_RESETTING, &adapter->state);
@@ -1960,6 +1956,7 @@ void igb_reset(struct igb_adapter *adapter)
/* disable receive for all VFs and wait one second */
if (adapter->vfs_allocated_count) {
int i;
+
for (i = 0 ; i < adapter->vfs_allocated_count; i++)
adapter->vf_data[i].flags &= IGB_VF_FLAG_PF_SET_MAC;
@@ -2087,7 +2084,7 @@ static const struct net_device_ops igb_netdev_ops = {
.ndo_vlan_rx_kill_vid = igb_vlan_rx_kill_vid,
.ndo_set_vf_mac = igb_ndo_set_vf_mac,
.ndo_set_vf_vlan = igb_ndo_set_vf_vlan,
- .ndo_set_vf_tx_rate = igb_ndo_set_vf_bw,
+ .ndo_set_vf_rate = igb_ndo_set_vf_bw,
.ndo_set_vf_spoofchk = igb_ndo_set_vf_spoofchk,
.ndo_get_vf_config = igb_ndo_get_vf_config,
#ifdef CONFIG_NET_POLL_CONTROLLER
@@ -2142,7 +2139,6 @@ void igb_set_fw_version(struct igb_adapter *adapter)
}
break;
}
- return;
}
/**
@@ -2203,11 +2199,11 @@ static void igb_init_mas(struct igb_adapter *adapter)
**/
static s32 igb_init_i2c(struct igb_adapter *adapter)
{
- s32 status = E1000_SUCCESS;
+ s32 status = 0;
/* I2C interface supported on i350 devices */
if (adapter->hw.mac.type != e1000_i350)
- return E1000_SUCCESS;
+ return 0;
/* Initialize the i2c bus which is controlled by the registers.
* This bus will use the i2c_algo_bit structue that implements
@@ -2437,6 +2433,12 @@ static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
/* get firmware version for ethtool -i */
igb_set_fw_version(adapter);
+ /* configure RXPBSIZE and TXPBSIZE */
+ if (hw->mac.type == e1000_i210) {
+ wr32(E1000_RXPBS, I210_RXPBSIZE_DEFAULT);
+ wr32(E1000_TXPBS, I210_TXPBSIZE_DEFAULT);
+ }
+
setup_timer(&adapter->watchdog_timer, igb_watchdog,
(unsigned long) adapter);
setup_timer(&adapter->phy_info_timer, igb_update_phy_info,
@@ -2529,7 +2531,8 @@ static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
}
/* let the f/w know that the h/w is now under the control of the
- * driver. */
+ * driver.
+ */
igb_get_hw_control(adapter);
strcpy(netdev->name, "eth%d");
@@ -3077,6 +3080,7 @@ static int __igb_open(struct net_device *netdev, bool resuming)
/* notify VFs that reset has been completed */
if (adapter->vfs_allocated_count) {
u32 reg_data = rd32(E1000_CTRL_EXT);
+
reg_data |= E1000_CTRL_EXT_PFRSTD;
wr32(E1000_CTRL_EXT, reg_data);
}
@@ -3248,7 +3252,7 @@ void igb_setup_tctl(struct igb_adapter *adapter)
* Configure a transmit ring after a reset.
**/
void igb_configure_tx_ring(struct igb_adapter *adapter,
- struct igb_ring *ring)
+ struct igb_ring *ring)
{
struct e1000_hw *hw = &adapter->hw;
u32 txdctl = 0;
@@ -3389,7 +3393,8 @@ static void igb_setup_mrqc(struct igb_adapter *adapter)
if (adapter->rss_indir_tbl_init != num_rx_queues) {
for (j = 0; j < IGB_RETA_SIZE; j++)
- adapter->rss_indir_tbl[j] = (j * num_rx_queues) / IGB_RETA_SIZE;
+ adapter->rss_indir_tbl[j] =
+ (j * num_rx_queues) / IGB_RETA_SIZE;
adapter->rss_indir_tbl_init = num_rx_queues;
}
igb_write_rss_indir_tbl(adapter);
@@ -3430,6 +3435,7 @@ static void igb_setup_mrqc(struct igb_adapter *adapter)
if (hw->mac.type > e1000_82575) {
/* Set the default pool for the PF's first queue */
u32 vtctl = rd32(E1000_VT_CTL);
+
vtctl &= ~(E1000_VT_CTL_DEFAULT_POOL_MASK |
E1000_VT_CTL_DISABLE_DEF_POOL);
vtctl |= adapter->vfs_allocated_count <<
@@ -3511,7 +3517,7 @@ void igb_setup_rctl(struct igb_adapter *adapter)
}
static inline int igb_set_vf_rlpml(struct igb_adapter *adapter, int size,
- int vfn)
+ int vfn)
{
struct e1000_hw *hw = &adapter->hw;
u32 vmolr;
@@ -4058,7 +4064,8 @@ static void igb_check_wvbr(struct igb_adapter *adapter)
switch (hw->mac.type) {
case e1000_82576:
case e1000_i350:
- if (!(wvbr = rd32(E1000_WVBR)))
+ wvbr = rd32(E1000_WVBR);
+ if (!wvbr)
return;
break;
default:
@@ -4077,7 +4084,7 @@ static void igb_spoof_check(struct igb_adapter *adapter)
if (!adapter->wvbr)
return;
- for(j = 0; j < adapter->vfs_allocated_count; j++) {
+ for (j = 0; j < adapter->vfs_allocated_count; j++) {
if (adapter->wvbr & (1 << j) ||
adapter->wvbr & (1 << (j + IGB_STAGGERED_QUEUE_OFFSET))) {
dev_warn(&adapter->pdev->dev,
@@ -4209,14 +4216,15 @@ static void igb_watchdog_task(struct work_struct *work)
if (!netif_carrier_ok(netdev)) {
u32 ctrl;
+
hw->mac.ops.get_speed_and_duplex(hw,
&adapter->link_speed,
&adapter->link_duplex);
ctrl = rd32(E1000_CTRL);
/* Links status message must follow this format */
- printk(KERN_INFO "igb: %s NIC Link is Up %d Mbps %s "
- "Duplex, Flow Control: %s\n",
+ netdev_info(netdev,
+ "igb: %s NIC Link is Up %d Mbps %s Duplex, Flow Control: %s\n",
netdev->name,
adapter->link_speed,
adapter->link_duplex == FULL_DUPLEX ?
@@ -4242,11 +4250,8 @@ static void igb_watchdog_task(struct work_struct *work)
/* check for thermal sensor event */
if (igb_thermal_sensor_event(hw,
- E1000_THSTAT_LINK_THROTTLE)) {
- netdev_info(netdev, "The network adapter link "
- "speed was downshifted because it "
- "overheated\n");
- }
+ E1000_THSTAT_LINK_THROTTLE))
+ netdev_info(netdev, "The network adapter link speed was downshifted because it overheated\n");
/* adjust timeout factor according to speed/duplex */
adapter->tx_timeout_factor = 1;
@@ -4277,12 +4282,11 @@ static void igb_watchdog_task(struct work_struct *work)
/* check for thermal sensor event */
if (igb_thermal_sensor_event(hw,
E1000_THSTAT_PWR_DOWN)) {
- netdev_err(netdev, "The network adapter was "
- "stopped because it overheated\n");
+ netdev_err(netdev, "The network adapter was stopped because it overheated\n");
}
/* Links status message must follow this format */
- printk(KERN_INFO "igb: %s NIC Link is Down\n",
+ netdev_info(netdev, "igb: %s NIC Link is Down\n",
netdev->name);
netif_carrier_off(netdev);
@@ -4344,6 +4348,7 @@ static void igb_watchdog_task(struct work_struct *work)
/* Cause software interrupt to ensure Rx ring is cleaned */
if (adapter->flags & IGB_FLAG_HAS_MSIX) {
u32 eics = 0;
+
for (i = 0; i < adapter->num_q_vectors; i++)
eics |= adapter->q_vector[i]->eims_value;
wr32(E1000_EICS, eics);
@@ -4483,13 +4488,12 @@ static void igb_update_itr(struct igb_q_vector *q_vector,
case low_latency: /* 50 usec aka 20000 ints/s */
if (bytes > 10000) {
/* this if handles the TSO accounting */
- if (bytes/packets > 8000) {
+ if (bytes/packets > 8000)
itrval = bulk_latency;
- } else if ((packets < 10) || ((bytes/packets) > 1200)) {
+ else if ((packets < 10) || ((bytes/packets) > 1200))
itrval = bulk_latency;
- } else if ((packets > 35)) {
+ else if ((packets > 35))
itrval = lowest_latency;
- }
} else if (bytes/packets > 2000) {
itrval = bulk_latency;
} else if (packets <= 2 && bytes < 512) {
@@ -4675,6 +4679,7 @@ static void igb_tx_csum(struct igb_ring *tx_ring, struct igb_tx_buffer *first)
return;
} else {
u8 l4_hdr = 0;
+
switch (first->protocol) {
case htons(ETH_P_IP):
vlan_macip_lens |= skb_network_header_len(skb);
@@ -4962,6 +4967,7 @@ netdev_tx_t igb_xmit_frame_ring(struct sk_buff *skb,
*/
if (NETDEV_FRAG_PAGE_MAX_SIZE > IGB_MAX_DATA_PER_TXD) {
unsigned short f;
+
for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size);
} else {
@@ -5140,7 +5146,7 @@ static int igb_change_mtu(struct net_device *netdev, int new_mtu)
max_frame = ETH_FRAME_LEN + ETH_FCS_LEN;
while (test_and_set_bit(__IGB_RESETTING, &adapter->state))
- msleep(1);
+ usleep_range(1000, 2000);
/* igb_down has a dependency on max_frame_size */
adapter->max_frame_size = max_frame;
@@ -5621,6 +5627,7 @@ static int igb_set_vf_promisc(struct igb_adapter *adapter, u32 *msgbuf, u32 vf)
vmolr |= E1000_VMOLR_MPME;
} else if (vf_data->num_vf_mc_hashes) {
int j;
+
vmolr |= E1000_VMOLR_ROMPE;
for (j = 0; j < vf_data->num_vf_mc_hashes; j++)
igb_mta_set(hw, vf_data->vf_mc_hashes[j]);
@@ -5672,6 +5679,7 @@ static void igb_restore_vf_multicasts(struct igb_adapter *adapter)
for (i = 0; i < adapter->vfs_allocated_count; i++) {
u32 vmolr = rd32(E1000_VMOLR(i));
+
vmolr &= ~(E1000_VMOLR_ROMPE | E1000_VMOLR_MPME);
vf_data = &adapter->vf_data[i];
@@ -5770,6 +5778,7 @@ static s32 igb_vlvf_set(struct igb_adapter *adapter, u32 vid, bool add, u32 vf)
if (!adapter->vf_data[vf].vlans_enabled) {
u32 size;
+
reg = rd32(E1000_VMOLR(vf));
size = reg & E1000_VMOLR_RLPML_MASK;
size += 4;
@@ -5798,6 +5807,7 @@ static s32 igb_vlvf_set(struct igb_adapter *adapter, u32 vid, bool add, u32 vf)
adapter->vf_data[vf].vlans_enabled--;
if (!adapter->vf_data[vf].vlans_enabled) {
u32 size;
+
reg = rd32(E1000_VMOLR(vf));
size = reg & E1000_VMOLR_RLPML_MASK;
size -= 4;
@@ -5902,8 +5912,8 @@ static int igb_set_vf_vlan(struct igb_adapter *adapter, u32 *msgbuf, u32 vf)
*/
if (!add && (adapter->netdev->flags & IFF_PROMISC)) {
u32 vlvf, bits;
-
int regndx = igb_find_vlvf_entry(adapter, vid);
+
if (regndx < 0)
goto out;
/* See if any other pools are set for this VLAN filter
@@ -6494,7 +6504,7 @@ static void igb_reuse_rx_page(struct igb_ring *rx_ring,
rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
/* transfer page from old buffer to new buffer */
- memcpy(new_buff, old_buff, sizeof(struct igb_rx_buffer));
+ *new_buff = *old_buff;
/* sync the buffer for use by the device */
dma_sync_single_range_for_device(rx_ring->dev, old_buff->dma,
@@ -6963,6 +6973,7 @@ static void igb_process_skb_fields(struct igb_ring *rx_ring,
if ((dev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
igb_test_staterr(rx_desc, E1000_RXD_STAT_VP)) {
u16 vid;
+
if (igb_test_staterr(rx_desc, E1000_RXDEXT_STATERR_LB) &&
test_bit(IGB_RING_FLAG_RX_LB_VLAN_BSWAP, &rx_ring->flags))
vid = be16_to_cpu(rx_desc->wb.upper.vlan);
@@ -7051,7 +7062,7 @@ static bool igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget)
if (cleaned_count)
igb_alloc_rx_buffers(rx_ring, cleaned_count);
- return (total_packets < budget);
+ return total_packets < budget;
}
static bool igb_alloc_mapped_page(struct igb_ring *rx_ring,
@@ -7172,7 +7183,7 @@ static int igb_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
break;
case SIOCGMIIREG:
if (igb_read_phy_reg(&adapter->hw, data->reg_num & 0x1F,
- &data->val_out))
+ &data->val_out))
return -EIO;
break;
case SIOCSMIIREG:
@@ -7873,7 +7884,8 @@ static void igb_check_vf_rate_limit(struct igb_adapter *adapter)
}
}
-static int igb_ndo_set_vf_bw(struct net_device *netdev, int vf, int tx_rate)
+static int igb_ndo_set_vf_bw(struct net_device *netdev, int vf,
+ int min_tx_rate, int max_tx_rate)
{
struct igb_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
@@ -7882,15 +7894,19 @@ static int igb_ndo_set_vf_bw(struct net_device *netdev, int vf, int tx_rate)
if (hw->mac.type != e1000_82576)
return -EOPNOTSUPP;
+ if (min_tx_rate)
+ return -EINVAL;
+
actual_link_speed = igb_link_mbps(adapter->link_speed);
if ((vf >= adapter->vfs_allocated_count) ||
(!(rd32(E1000_STATUS) & E1000_STATUS_LU)) ||
- (tx_rate < 0) || (tx_rate > actual_link_speed))
+ (max_tx_rate < 0) ||
+ (max_tx_rate > actual_link_speed))
return -EINVAL;
adapter->vf_rate_link_speed = actual_link_speed;
- adapter->vf_data[vf].tx_rate = (u16)tx_rate;
- igb_set_vf_rate_limit(hw, vf, tx_rate, actual_link_speed);
+ adapter->vf_data[vf].tx_rate = (u16)max_tx_rate;
+ igb_set_vf_rate_limit(hw, vf, max_tx_rate, actual_link_speed);
return 0;
}
@@ -7919,7 +7935,7 @@ static int igb_ndo_set_vf_spoofchk(struct net_device *netdev, int vf,
wr32(reg_offset, reg_val);
adapter->vf_data[vf].spoofchk_enabled = setting;
- return E1000_SUCCESS;
+ return 0;
}
static int igb_ndo_get_vf_config(struct net_device *netdev,
@@ -7930,7 +7946,8 @@ static int igb_ndo_get_vf_config(struct net_device *netdev,
return -EINVAL;
ivi->vf = vf;
memcpy(&ivi->mac, adapter->vf_data[vf].vf_mac_addresses, ETH_ALEN);
- ivi->tx_rate = adapter->vf_data[vf].tx_rate;
+ ivi->max_tx_rate = adapter->vf_data[vf].tx_rate;
+ ivi->min_tx_rate = 0;
ivi->vlan = adapter->vf_data[vf].pf_vlan;
ivi->qos = adapter->vf_data[vf].pf_qos;
ivi->spoofchk = adapter->vf_data[vf].spoofchk_enabled;
@@ -7955,11 +7972,13 @@ static void igb_vmm_control(struct igb_adapter *adapter)
reg = rd32(E1000_DTXCTL);
reg |= E1000_DTXCTL_VLAN_ADDED;
wr32(E1000_DTXCTL, reg);
+ /* Fall through */
case e1000_82580:
/* enable replication vlan tag stripping */
reg = rd32(E1000_RPLOLR);
reg |= E1000_RPLOLR_STRVLAN;
wr32(E1000_RPLOLR, reg);
+ /* Fall through */
case e1000_i350:
/* none of the above registers are supported by i350 */
break;
@@ -8049,6 +8068,7 @@ static void igb_init_dmac(struct igb_adapter *adapter, u32 pba)
} /* endif adapter->dmac is not disabled */
} else if (hw->mac.type == e1000_82580) {
u32 reg = rd32(E1000_PCIEMISC);
+
wr32(E1000_PCIEMISC, reg & ~E1000_PCIEMISC_LX_DECISION);
wr32(E1000_DMACR, 0);
}
@@ -8077,8 +8097,7 @@ s32 igb_read_i2c_byte(struct e1000_hw *hw, u8 byte_offset,
swfw_mask = E1000_SWFW_PHY0_SM;
- if (hw->mac.ops.acquire_swfw_sync(hw, swfw_mask)
- != E1000_SUCCESS)
+ if (hw->mac.ops.acquire_swfw_sync(hw, swfw_mask))
return E1000_ERR_SWFW_SYNC;
status = i2c_smbus_read_byte_data(this_client, byte_offset);
@@ -8088,7 +8107,7 @@ s32 igb_read_i2c_byte(struct e1000_hw *hw, u8 byte_offset,
return E1000_ERR_I2C;
else {
*data = status;
- return E1000_SUCCESS;
+ return 0;
}
}
@@ -8113,7 +8132,7 @@ s32 igb_write_i2c_byte(struct e1000_hw *hw, u8 byte_offset,
if (!this_client)
return E1000_ERR_I2C;
- if (hw->mac.ops.acquire_swfw_sync(hw, swfw_mask) != E1000_SUCCESS)
+ if (hw->mac.ops.acquire_swfw_sync(hw, swfw_mask))
return E1000_ERR_SWFW_SYNC;
status = i2c_smbus_write_byte_data(this_client, byte_offset, data);
hw->mac.ops.release_swfw_sync(hw, swfw_mask);
@@ -8121,7 +8140,7 @@ s32 igb_write_i2c_byte(struct e1000_hw *hw, u8 byte_offset,
if (status)
return E1000_ERR_I2C;
else
- return E1000_SUCCESS;
+ return 0;
}
diff --git a/drivers/net/ethernet/intel/igb/igb_ptp.c b/drivers/net/ethernet/intel/igb/igb_ptp.c
index ab25e49365f7..794c139f0cc0 100644
--- a/drivers/net/ethernet/intel/igb/igb_ptp.c
+++ b/drivers/net/ethernet/intel/igb/igb_ptp.c
@@ -360,8 +360,8 @@ static int igb_ptp_settime_i210(struct ptp_clock_info *ptp,
return 0;
}
-static int igb_ptp_enable(struct ptp_clock_info *ptp,
- struct ptp_clock_request *rq, int on)
+static int igb_ptp_feature_enable(struct ptp_clock_info *ptp,
+ struct ptp_clock_request *rq, int on)
{
return -EOPNOTSUPP;
}
@@ -559,10 +559,11 @@ int igb_ptp_get_ts_config(struct net_device *netdev, struct ifreq *ifr)
return copy_to_user(ifr->ifr_data, config, sizeof(*config)) ?
-EFAULT : 0;
}
+
/**
- * igb_ptp_set_ts_config - control hardware time stamping
- * @netdev:
- * @ifreq:
+ * igb_ptp_set_timestamp_mode - setup hardware for timestamping
+ * @adapter: networking device structure
+ * @config: hwtstamp configuration
*
* Outgoing time stamping can be enabled and disabled. Play nice and
* disable it when requested, although it shouldn't case any overhead
@@ -575,12 +576,11 @@ int igb_ptp_get_ts_config(struct net_device *netdev, struct ifreq *ifr)
* type has to be specified. Matching the kind of event packet is
* not supported, with the exception of "all V2 events regardless of
* level 2 or 4".
- **/
-int igb_ptp_set_ts_config(struct net_device *netdev, struct ifreq *ifr)
+ */
+static int igb_ptp_set_timestamp_mode(struct igb_adapter *adapter,
+ struct hwtstamp_config *config)
{
- struct igb_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
- struct hwtstamp_config *config = &adapter->tstamp_config;
u32 tsync_tx_ctl = E1000_TSYNCTXCTL_ENABLED;
u32 tsync_rx_ctl = E1000_TSYNCRXCTL_ENABLED;
u32 tsync_rx_cfg = 0;
@@ -588,9 +588,6 @@ int igb_ptp_set_ts_config(struct net_device *netdev, struct ifreq *ifr)
bool is_l2 = false;
u32 regval;
- if (copy_from_user(config, ifr->ifr_data, sizeof(*config)))
- return -EFAULT;
-
/* reserved for future extensions */
if (config->flags)
return -EINVAL;
@@ -725,7 +722,33 @@ int igb_ptp_set_ts_config(struct net_device *netdev, struct ifreq *ifr)
regval = rd32(E1000_RXSTMPL);
regval = rd32(E1000_RXSTMPH);
- return copy_to_user(ifr->ifr_data, config, sizeof(*config)) ?
+ return 0;
+}
+
+/**
+ * igb_ptp_set_ts_config - set hardware time stamping config
+ * @netdev:
+ * @ifreq:
+ *
+ **/
+int igb_ptp_set_ts_config(struct net_device *netdev, struct ifreq *ifr)
+{
+ struct igb_adapter *adapter = netdev_priv(netdev);
+ struct hwtstamp_config config;
+ int err;
+
+ if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
+ return -EFAULT;
+
+ err = igb_ptp_set_timestamp_mode(adapter, &config);
+ if (err)
+ return err;
+
+ /* save these settings for future reference */
+ memcpy(&adapter->tstamp_config, &config,
+ sizeof(adapter->tstamp_config));
+
+ return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
-EFAULT : 0;
}
@@ -745,7 +768,7 @@ void igb_ptp_init(struct igb_adapter *adapter)
adapter->ptp_caps.adjtime = igb_ptp_adjtime_82576;
adapter->ptp_caps.gettime = igb_ptp_gettime_82576;
adapter->ptp_caps.settime = igb_ptp_settime_82576;
- adapter->ptp_caps.enable = igb_ptp_enable;
+ adapter->ptp_caps.enable = igb_ptp_feature_enable;
adapter->cc.read = igb_ptp_read_82576;
adapter->cc.mask = CLOCKSOURCE_MASK(64);
adapter->cc.mult = 1;
@@ -765,7 +788,7 @@ void igb_ptp_init(struct igb_adapter *adapter)
adapter->ptp_caps.adjtime = igb_ptp_adjtime_82576;
adapter->ptp_caps.gettime = igb_ptp_gettime_82576;
adapter->ptp_caps.settime = igb_ptp_settime_82576;
- adapter->ptp_caps.enable = igb_ptp_enable;
+ adapter->ptp_caps.enable = igb_ptp_feature_enable;
adapter->cc.read = igb_ptp_read_82580;
adapter->cc.mask = CLOCKSOURCE_MASK(IGB_NBITS_82580);
adapter->cc.mult = 1;
@@ -784,7 +807,7 @@ void igb_ptp_init(struct igb_adapter *adapter)
adapter->ptp_caps.adjtime = igb_ptp_adjtime_i210;
adapter->ptp_caps.gettime = igb_ptp_gettime_i210;
adapter->ptp_caps.settime = igb_ptp_settime_i210;
- adapter->ptp_caps.enable = igb_ptp_enable;
+ adapter->ptp_caps.enable = igb_ptp_feature_enable;
/* Enable the timer functions by clearing bit 31. */
wr32(E1000_TSAUXC, 0x0);
break;
@@ -820,6 +843,9 @@ void igb_ptp_init(struct igb_adapter *adapter)
wr32(E1000_IMS, E1000_IMS_TS);
}
+ adapter->tstamp_config.rx_filter = HWTSTAMP_FILTER_NONE;
+ adapter->tstamp_config.tx_type = HWTSTAMP_TX_OFF;
+
adapter->ptp_clock = ptp_clock_register(&adapter->ptp_caps,
&adapter->pdev->dev);
if (IS_ERR(adapter->ptp_clock)) {
@@ -884,7 +910,7 @@ void igb_ptp_reset(struct igb_adapter *adapter)
return;
/* reset the tstamp_config */
- memset(&adapter->tstamp_config, 0, sizeof(adapter->tstamp_config));
+ igb_ptp_set_timestamp_mode(adapter, &adapter->tstamp_config);
switch (adapter->hw.mac.type) {
case e1000_82576:
diff --git a/drivers/net/ethernet/intel/igbvf/ethtool.c b/drivers/net/ethernet/intel/igbvf/ethtool.c
index 90eef07943f4..2178f87e9f61 100644
--- a/drivers/net/ethernet/intel/igbvf/ethtool.c
+++ b/drivers/net/ethernet/intel/igbvf/ethtool.c
@@ -101,8 +101,8 @@ static int igbvf_get_settings(struct net_device *netdev,
else
ecmd->duplex = DUPLEX_HALF;
} else {
- ethtool_cmd_speed_set(ecmd, -1);
- ecmd->duplex = -1;
+ ethtool_cmd_speed_set(ecmd, SPEED_UNKNOWN);
+ ecmd->duplex = DUPLEX_UNKNOWN;
}
ecmd->autoneg = AUTONEG_DISABLE;
@@ -119,7 +119,6 @@ static int igbvf_set_settings(struct net_device *netdev,
static void igbvf_get_pauseparam(struct net_device *netdev,
struct ethtool_pauseparam *pause)
{
- return;
}
static int igbvf_set_pauseparam(struct net_device *netdev,
@@ -476,5 +475,5 @@ static const struct ethtool_ops igbvf_ethtool_ops = {
void igbvf_set_ethtool_ops(struct net_device *netdev)
{
- SET_ETHTOOL_OPS(netdev, &igbvf_ethtool_ops);
+ netdev->ethtool_ops = &igbvf_ethtool_ops;
}
diff --git a/drivers/net/ethernet/intel/ixgb/ixgb_ethtool.c b/drivers/net/ethernet/intel/ixgb/ixgb_ethtool.c
index dbb7dd2f8e36..b311e9e710d2 100644
--- a/drivers/net/ethernet/intel/ixgb/ixgb_ethtool.c
+++ b/drivers/net/ethernet/intel/ixgb/ixgb_ethtool.c
@@ -107,8 +107,8 @@ ixgb_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
ethtool_cmd_speed_set(ecmd, SPEED_10000);
ecmd->duplex = DUPLEX_FULL;
} else {
- ethtool_cmd_speed_set(ecmd, -1);
- ecmd->duplex = -1;
+ ethtool_cmd_speed_set(ecmd, SPEED_UNKNOWN);
+ ecmd->duplex = DUPLEX_UNKNOWN;
}
ecmd->autoneg = AUTONEG_DISABLE;
@@ -656,5 +656,5 @@ static const struct ethtool_ops ixgb_ethtool_ops = {
void ixgb_set_ethtool_ops(struct net_device *netdev)
{
- SET_ETHTOOL_OPS(netdev, &ixgb_ethtool_ops);
+ netdev->ethtool_ops = &ixgb_ethtool_ops;
}
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
index c6c4ca7d68e6..ac9f2148cdc5 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
@@ -155,7 +155,6 @@ struct vf_data_storage {
struct vf_macvlans {
struct list_head l;
int vf;
- int rar_entry;
bool free;
bool is_macvlan;
u8 vf_macvlan[ETH_ALEN];
@@ -363,7 +362,7 @@ struct ixgbe_ring_container {
for (pos = (head).ring; pos != NULL; pos = pos->next)
#define MAX_RX_PACKET_BUFFERS ((adapter->flags & IXGBE_FLAG_DCB_ENABLED) \
- ? 8 : 1)
+ ? 8 : 1)
#define MAX_TX_PACKET_BUFFERS MAX_RX_PACKET_BUFFERS
/* MAX_Q_VECTORS of these are allocated,
@@ -613,6 +612,15 @@ static inline void ixgbe_write_tail(struct ixgbe_ring *ring, u32 value)
#define MAX_MSIX_VECTORS_82598 18
#define MAX_Q_VECTORS_82598 16
+struct ixgbe_mac_addr {
+ u8 addr[ETH_ALEN];
+ u16 queue;
+ u16 state; /* bitmask */
+};
+#define IXGBE_MAC_STATE_DEFAULT 0x1
+#define IXGBE_MAC_STATE_MODIFIED 0x2
+#define IXGBE_MAC_STATE_IN_USE 0x4
+
#define MAX_Q_VECTORS MAX_Q_VECTORS_82599
#define MAX_MSIX_COUNT MAX_MSIX_VECTORS_82599
@@ -785,6 +793,7 @@ struct ixgbe_adapter {
u32 timer_event_accumulator;
u32 vferr_refcount;
+ struct ixgbe_mac_addr *mac_table;
struct kobject *info_kobj;
#ifdef CONFIG_IXGBE_HWMON
struct hwmon_buff *ixgbe_hwmon_buff;
@@ -863,6 +872,13 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter);
int ixgbe_init_interrupt_scheme(struct ixgbe_adapter *adapter);
int ixgbe_wol_supported(struct ixgbe_adapter *adapter, u16 device_id,
u16 subdevice_id);
+#ifdef CONFIG_PCI_IOV
+void ixgbe_full_sync_mac_table(struct ixgbe_adapter *adapter);
+#endif
+int ixgbe_add_mac_filter(struct ixgbe_adapter *adapter,
+ u8 *addr, u16 queue);
+int ixgbe_del_mac_filter(struct ixgbe_adapter *adapter,
+ u8 *addr, u16 queue);
void ixgbe_clear_interrupt_scheme(struct ixgbe_adapter *adapter);
netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *, struct ixgbe_adapter *,
struct ixgbe_ring *);
@@ -941,6 +957,7 @@ static inline struct netdev_queue *txring_txq(const struct ixgbe_ring *ring)
}
void ixgbe_ptp_init(struct ixgbe_adapter *adapter);
+void ixgbe_ptp_suspend(struct ixgbe_adapter *adapter);
void ixgbe_ptp_stop(struct ixgbe_adapter *adapter);
void ixgbe_ptp_overflow_check(struct ixgbe_adapter *adapter);
void ixgbe_ptp_rx_hang(struct ixgbe_adapter *adapter);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c
index 4c78ea8946c1..15609331ec17 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c
@@ -41,10 +41,10 @@
#define IXGBE_82598_RX_PB_SIZE 512
static s32 ixgbe_setup_copper_link_82598(struct ixgbe_hw *hw,
- ixgbe_link_speed speed,
- bool autoneg_wait_to_complete);
+ ixgbe_link_speed speed,
+ bool autoneg_wait_to_complete);
static s32 ixgbe_read_i2c_eeprom_82598(struct ixgbe_hw *hw, u8 byte_offset,
- u8 *eeprom_data);
+ u8 *eeprom_data);
/**
* ixgbe_set_pcie_completion_timeout - set pci-e completion timeout
@@ -140,7 +140,7 @@ static s32 ixgbe_init_phy_ops_82598(struct ixgbe_hw *hw)
phy->ops.setup_link = &ixgbe_setup_phy_link_tnx;
phy->ops.check_link = &ixgbe_check_phy_link_tnx;
phy->ops.get_firmware_version =
- &ixgbe_get_phy_firmware_version_tnx;
+ &ixgbe_get_phy_firmware_version_tnx;
break;
case ixgbe_phy_nl:
phy->ops.reset = &ixgbe_reset_phy_nl;
@@ -156,8 +156,8 @@ static s32 ixgbe_init_phy_ops_82598(struct ixgbe_hw *hw)
/* Check to see if SFP+ module is supported */
ret_val = ixgbe_get_sfp_init_sequence_offsets(hw,
- &list_offset,
- &data_offset);
+ &list_offset,
+ &data_offset);
if (ret_val != 0) {
ret_val = IXGBE_ERR_SFP_NOT_SUPPORTED;
goto out;
@@ -219,8 +219,8 @@ static s32 ixgbe_start_hw_82598(struct ixgbe_hw *hw)
* Determines the link capabilities by reading the AUTOC register.
**/
static s32 ixgbe_get_link_capabilities_82598(struct ixgbe_hw *hw,
- ixgbe_link_speed *speed,
- bool *autoneg)
+ ixgbe_link_speed *speed,
+ bool *autoneg)
{
s32 status = 0;
u32 autoc = 0;
@@ -337,19 +337,25 @@ static s32 ixgbe_fc_enable_82598(struct ixgbe_hw *hw)
int i;
bool link_up;
- /*
- * Validate the water mark configuration for packet buffer 0. Zero
- * water marks indicate that the packet buffer was not configured
- * and the watermarks for packet buffer 0 should always be configured.
- */
- if (!hw->fc.low_water ||
- !hw->fc.high_water[0] ||
- !hw->fc.pause_time) {
- hw_dbg(hw, "Invalid water mark configuration\n");
+ /* Validate the water mark configuration */
+ if (!hw->fc.pause_time) {
ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
goto out;
}
+ /* Low water mark of zero causes XOFF floods */
+ for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
+ if ((hw->fc.current_mode & ixgbe_fc_tx_pause) &&
+ hw->fc.high_water[i]) {
+ if (!hw->fc.low_water[i] ||
+ hw->fc.low_water[i] >= hw->fc.high_water[i]) {
+ hw_dbg(hw, "Invalid water mark configuration\n");
+ ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
+ goto out;
+ }
+ }
+ }
+
/*
* On 82598 having Rx FC on causes resets while doing 1G
* so if it's on turn it off once we know link_speed. For
@@ -432,12 +438,11 @@ static s32 ixgbe_fc_enable_82598(struct ixgbe_hw *hw)
IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl_reg);
IXGBE_WRITE_REG(hw, IXGBE_RMCS, rmcs_reg);
- fcrtl = (hw->fc.low_water << 10) | IXGBE_FCRTL_XONE;
-
/* Set up and enable Rx high/low water mark thresholds, enable XON. */
for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
if ((hw->fc.current_mode & ixgbe_fc_tx_pause) &&
hw->fc.high_water[i]) {
+ fcrtl = (hw->fc.low_water[i] << 10) | IXGBE_FCRTL_XONE;
fcrth = (hw->fc.high_water[i] << 10) | IXGBE_FCRTH_FCEN;
IXGBE_WRITE_REG(hw, IXGBE_FCRTL(i), fcrtl);
IXGBE_WRITE_REG(hw, IXGBE_FCRTH(i), fcrth);
@@ -468,7 +473,7 @@ out:
* Restarts the link. Performs autonegotiation if needed.
**/
static s32 ixgbe_start_mac_link_82598(struct ixgbe_hw *hw,
- bool autoneg_wait_to_complete)
+ bool autoneg_wait_to_complete)
{
u32 autoc_reg;
u32 links_reg;
@@ -550,8 +555,8 @@ static s32 ixgbe_validate_link_ready(struct ixgbe_hw *hw)
* Reads the links register to determine if link is up and the current speed
**/
static s32 ixgbe_check_mac_link_82598(struct ixgbe_hw *hw,
- ixgbe_link_speed *speed, bool *link_up,
- bool link_up_wait_to_complete)
+ ixgbe_link_speed *speed, bool *link_up,
+ bool link_up_wait_to_complete)
{
u32 links_reg;
u32 i;
@@ -567,7 +572,7 @@ static s32 ixgbe_check_mac_link_82598(struct ixgbe_hw *hw,
hw->phy.ops.read_reg(hw, 0xC79F, MDIO_MMD_PMAPMD, &link_reg);
hw->phy.ops.read_reg(hw, 0xC79F, MDIO_MMD_PMAPMD, &link_reg);
hw->phy.ops.read_reg(hw, 0xC00C, MDIO_MMD_PMAPMD,
- &adapt_comp_reg);
+ &adapt_comp_reg);
if (link_up_wait_to_complete) {
for (i = 0; i < IXGBE_LINK_UP_TIME; i++) {
if ((link_reg & 1) &&
@@ -579,11 +584,11 @@ static s32 ixgbe_check_mac_link_82598(struct ixgbe_hw *hw,
}
msleep(100);
hw->phy.ops.read_reg(hw, 0xC79F,
- MDIO_MMD_PMAPMD,
- &link_reg);
+ MDIO_MMD_PMAPMD,
+ &link_reg);
hw->phy.ops.read_reg(hw, 0xC00C,
- MDIO_MMD_PMAPMD,
- &adapt_comp_reg);
+ MDIO_MMD_PMAPMD,
+ &adapt_comp_reg);
}
} else {
if ((link_reg & 1) && ((adapt_comp_reg & 1) == 0))
@@ -656,7 +661,7 @@ static s32 ixgbe_setup_mac_link_82598(struct ixgbe_hw *hw,
/* Set KX4/KX support according to speed requested */
else if (link_mode == IXGBE_AUTOC_LMS_KX4_AN ||
- link_mode == IXGBE_AUTOC_LMS_KX4_AN_1G_AN) {
+ link_mode == IXGBE_AUTOC_LMS_KX4_AN_1G_AN) {
autoc &= ~IXGBE_AUTOC_KX4_KX_SUPP_MASK;
if (speed & IXGBE_LINK_SPEED_10GB_FULL)
autoc |= IXGBE_AUTOC_KX4_SUPP;
@@ -689,14 +694,14 @@ static s32 ixgbe_setup_mac_link_82598(struct ixgbe_hw *hw,
* Sets the link speed in the AUTOC register in the MAC and restarts link.
**/
static s32 ixgbe_setup_copper_link_82598(struct ixgbe_hw *hw,
- ixgbe_link_speed speed,
- bool autoneg_wait_to_complete)
+ ixgbe_link_speed speed,
+ bool autoneg_wait_to_complete)
{
s32 status;
/* Setup the PHY according to input speed */
status = hw->phy.ops.setup_link_speed(hw, speed,
- autoneg_wait_to_complete);
+ autoneg_wait_to_complete);
/* Set up MAC */
ixgbe_start_mac_link_82598(hw, autoneg_wait_to_complete);
@@ -735,28 +740,28 @@ static s32 ixgbe_reset_hw_82598(struct ixgbe_hw *hw)
if (analog_val & IXGBE_ATLAS_PDN_TX_REG_EN) {
/* Enable Tx Atlas so packets can be transmitted again */
hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK,
- &analog_val);
+ &analog_val);
analog_val &= ~IXGBE_ATLAS_PDN_TX_REG_EN;
hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK,
- analog_val);
+ analog_val);
hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_10G,
- &analog_val);
+ &analog_val);
analog_val &= ~IXGBE_ATLAS_PDN_TX_10G_QL_ALL;
hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_10G,
- analog_val);
+ analog_val);
hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_1G,
- &analog_val);
+ &analog_val);
analog_val &= ~IXGBE_ATLAS_PDN_TX_1G_QL_ALL;
hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_1G,
- analog_val);
+ analog_val);
hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_AN,
- &analog_val);
+ &analog_val);
analog_val &= ~IXGBE_ATLAS_PDN_TX_AN_QL_ALL;
hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_AN,
- analog_val);
+ analog_val);
}
/* Reset PHY */
@@ -955,7 +960,7 @@ static s32 ixgbe_clear_vfta_82598(struct ixgbe_hw *hw)
for (vlanbyte = 0; vlanbyte < 4; vlanbyte++)
for (offset = 0; offset < hw->mac.vft_size; offset++)
IXGBE_WRITE_REG(hw, IXGBE_VFTAVIND(vlanbyte, offset),
- 0);
+ 0);
return 0;
}
@@ -973,7 +978,7 @@ static s32 ixgbe_read_analog_reg8_82598(struct ixgbe_hw *hw, u32 reg, u8 *val)
u32 atlas_ctl;
IXGBE_WRITE_REG(hw, IXGBE_ATLASCTL,
- IXGBE_ATLASCTL_WRITE_CMD | (reg << 8));
+ IXGBE_ATLASCTL_WRITE_CMD | (reg << 8));
IXGBE_WRITE_FLUSH(hw);
udelay(10);
atlas_ctl = IXGBE_READ_REG(hw, IXGBE_ATLASCTL);
@@ -1273,8 +1278,6 @@ static void ixgbe_set_rxpba_82598(struct ixgbe_hw *hw, int num_pb,
/* Setup Tx packet buffer sizes */
for (i = 0; i < IXGBE_MAX_PACKET_BUFFERS; i++)
IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i), IXGBE_TXPBSIZE_40KB);
-
- return;
}
static struct ixgbe_mac_operations mac_ops_82598 = {
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
index f32b3dd1ba8e..bc7c924240a5 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
@@ -48,17 +48,17 @@ static s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
ixgbe_link_speed speed,
bool autoneg_wait_to_complete);
static s32 ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw *hw,
- ixgbe_link_speed speed,
- bool autoneg_wait_to_complete);
+ ixgbe_link_speed speed,
+ bool autoneg_wait_to_complete);
static void ixgbe_stop_mac_link_on_d3_82599(struct ixgbe_hw *hw);
static s32 ixgbe_start_mac_link_82599(struct ixgbe_hw *hw,
bool autoneg_wait_to_complete);
static s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw,
- ixgbe_link_speed speed,
- bool autoneg_wait_to_complete);
+ ixgbe_link_speed speed,
+ bool autoneg_wait_to_complete);
static s32 ixgbe_setup_copper_link_82599(struct ixgbe_hw *hw,
- ixgbe_link_speed speed,
- bool autoneg_wait_to_complete);
+ ixgbe_link_speed speed,
+ bool autoneg_wait_to_complete);
static s32 ixgbe_verify_fw_version_82599(struct ixgbe_hw *hw);
static s32 ixgbe_read_i2c_byte_82599(struct ixgbe_hw *hw, u8 byte_offset,
u8 dev_addr, u8 *data);
@@ -96,9 +96,9 @@ static void ixgbe_init_mac_link_ops_82599(struct ixgbe_hw *hw)
if ((mac->ops.get_media_type(hw) == ixgbe_media_type_fiber) &&
!ixgbe_mng_enabled(hw)) {
mac->ops.disable_tx_laser =
- &ixgbe_disable_tx_laser_multispeed_fiber;
+ &ixgbe_disable_tx_laser_multispeed_fiber;
mac->ops.enable_tx_laser =
- &ixgbe_enable_tx_laser_multispeed_fiber;
+ &ixgbe_enable_tx_laser_multispeed_fiber;
mac->ops.flap_tx_laser = &ixgbe_flap_tx_laser_multispeed_fiber;
} else {
mac->ops.disable_tx_laser = NULL;
@@ -132,13 +132,13 @@ static s32 ixgbe_setup_sfp_modules_82599(struct ixgbe_hw *hw)
hw->phy.ops.reset = NULL;
ret_val = ixgbe_get_sfp_init_sequence_offsets(hw, &list_offset,
- &data_offset);
+ &data_offset);
if (ret_val != 0)
goto setup_sfp_out;
/* PHY config will finish before releasing the semaphore */
ret_val = hw->mac.ops.acquire_swfw_sync(hw,
- IXGBE_GSSR_MAC_CSR_SM);
+ IXGBE_GSSR_MAC_CSR_SM);
if (ret_val != 0) {
ret_val = IXGBE_ERR_SWFW_SYNC;
goto setup_sfp_out;
@@ -334,7 +334,7 @@ static s32 ixgbe_init_phy_ops_82599(struct ixgbe_hw *hw)
phy->ops.check_link = &ixgbe_check_phy_link_tnx;
phy->ops.setup_link = &ixgbe_setup_phy_link_tnx;
phy->ops.get_firmware_version =
- &ixgbe_get_phy_firmware_version_tnx;
+ &ixgbe_get_phy_firmware_version_tnx;
break;
default:
break;
@@ -352,7 +352,7 @@ static s32 ixgbe_init_phy_ops_82599(struct ixgbe_hw *hw)
* Determines the link capabilities by reading the AUTOC register.
**/
static s32 ixgbe_get_link_capabilities_82599(struct ixgbe_hw *hw,
- ixgbe_link_speed *speed,
+ ixgbe_link_speed *speed,
bool *autoneg)
{
s32 status = 0;
@@ -543,7 +543,7 @@ static void ixgbe_stop_mac_link_on_d3_82599(struct ixgbe_hw *hw)
* Restarts the link. Performs autonegotiation if needed.
**/
static s32 ixgbe_start_mac_link_82599(struct ixgbe_hw *hw,
- bool autoneg_wait_to_complete)
+ bool autoneg_wait_to_complete)
{
u32 autoc_reg;
u32 links_reg;
@@ -672,8 +672,8 @@ static void ixgbe_flap_tx_laser_multispeed_fiber(struct ixgbe_hw *hw)
* Set the link speed in the AUTOC register and restarts link.
**/
static s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
- ixgbe_link_speed speed,
- bool autoneg_wait_to_complete)
+ ixgbe_link_speed speed,
+ bool autoneg_wait_to_complete)
{
s32 status = 0;
ixgbe_link_speed link_speed = IXGBE_LINK_SPEED_UNKNOWN;
@@ -820,8 +820,8 @@ static s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
*/
if (speedcnt > 1)
status = ixgbe_setup_mac_link_multispeed_fiber(hw,
- highest_link_speed,
- autoneg_wait_to_complete);
+ highest_link_speed,
+ autoneg_wait_to_complete);
out:
/* Set autoneg_advertised value based on input link speed */
@@ -1009,8 +1009,8 @@ static s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw,
if (speed & IXGBE_LINK_SPEED_1GB_FULL)
autoc |= IXGBE_AUTOC_KX_SUPP;
} else if ((pma_pmd_1g == IXGBE_AUTOC_1G_SFI) &&
- (link_mode == IXGBE_AUTOC_LMS_1G_LINK_NO_AN ||
- link_mode == IXGBE_AUTOC_LMS_1G_AN)) {
+ (link_mode == IXGBE_AUTOC_LMS_1G_LINK_NO_AN ||
+ link_mode == IXGBE_AUTOC_LMS_1G_AN)) {
/* Switch from 1G SFI to 10G SFI if requested */
if ((speed == IXGBE_LINK_SPEED_10GB_FULL) &&
(pma_pmd_10g_serial == IXGBE_AUTOC2_10G_SFI)) {
@@ -1018,7 +1018,7 @@ static s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw,
autoc |= IXGBE_AUTOC_LMS_10G_SERIAL;
}
} else if ((pma_pmd_10g_serial == IXGBE_AUTOC2_10G_SFI) &&
- (link_mode == IXGBE_AUTOC_LMS_10G_SERIAL)) {
+ (link_mode == IXGBE_AUTOC_LMS_10G_SERIAL)) {
/* Switch from 10G SFI to 1G SFI if requested */
if ((speed == IXGBE_LINK_SPEED_1GB_FULL) &&
(pma_pmd_1g == IXGBE_AUTOC_1G_SFI)) {
@@ -1051,7 +1051,7 @@ static s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw,
}
if (!(links_reg & IXGBE_LINKS_KX_AN_COMP)) {
status =
- IXGBE_ERR_AUTONEG_NOT_COMPLETE;
+ IXGBE_ERR_AUTONEG_NOT_COMPLETE;
hw_dbg(hw, "Autoneg did not complete.\n");
}
}
@@ -1074,14 +1074,14 @@ out:
* Restarts link on PHY and MAC based on settings passed in.
**/
static s32 ixgbe_setup_copper_link_82599(struct ixgbe_hw *hw,
- ixgbe_link_speed speed,
- bool autoneg_wait_to_complete)
+ ixgbe_link_speed speed,
+ bool autoneg_wait_to_complete)
{
s32 status;
/* Setup the PHY according to input speed */
status = hw->phy.ops.setup_link_speed(hw, speed,
- autoneg_wait_to_complete);
+ autoneg_wait_to_complete);
/* Set up MAC */
ixgbe_start_mac_link_82599(hw, autoneg_wait_to_complete);
@@ -1224,7 +1224,7 @@ mac_reset_top:
(hw->mac.orig_autoc2 & IXGBE_AUTOC2_UPPER_MASK)) {
autoc2 &= ~IXGBE_AUTOC2_UPPER_MASK;
autoc2 |= (hw->mac.orig_autoc2 &
- IXGBE_AUTOC2_UPPER_MASK);
+ IXGBE_AUTOC2_UPPER_MASK);
IXGBE_WRITE_REG(hw, IXGBE_AUTOC2, autoc2);
}
}
@@ -1246,7 +1246,7 @@ mac_reset_top:
/* Add the SAN MAC address to the RAR only if it's a valid address */
if (is_valid_ether_addr(hw->mac.san_addr)) {
hw->mac.ops.set_rar(hw, hw->mac.num_rar_entries - 1,
- hw->mac.san_addr, 0, IXGBE_RAH_AV);
+ hw->mac.san_addr, 0, IXGBE_RAH_AV);
/* Save the SAN MAC RAR index */
hw->mac.san_mac_rar_index = hw->mac.num_rar_entries - 1;
@@ -1257,7 +1257,7 @@ mac_reset_top:
/* Store the alternative WWNN/WWPN prefix */
hw->mac.ops.get_wwn_prefix(hw, &hw->mac.wwnn_prefix,
- &hw->mac.wwpn_prefix);
+ &hw->mac.wwpn_prefix);
reset_hw_out:
return status;
@@ -1271,6 +1271,7 @@ s32 ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw)
{
int i;
u32 fdirctrl = IXGBE_READ_REG(hw, IXGBE_FDIRCTRL);
+
fdirctrl &= ~IXGBE_FDIRCTRL_INIT_DONE;
/*
@@ -1284,8 +1285,7 @@ s32 ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw)
udelay(10);
}
if (i >= IXGBE_FDIRCMD_CMD_POLL) {
- hw_dbg(hw, "Flow Director previous command isn't complete, "
- "aborting table re-initialization.\n");
+ hw_dbg(hw, "Flow Director previous command isn't complete, aborting table re-initialization.\n");
return IXGBE_ERR_FDIR_REINIT_FAILED;
}
@@ -1299,12 +1299,12 @@ s32 ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw)
* - write 0 to bit 8 of FDIRCMD register
*/
IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD,
- (IXGBE_READ_REG(hw, IXGBE_FDIRCMD) |
- IXGBE_FDIRCMD_CLEARHT));
+ (IXGBE_READ_REG(hw, IXGBE_FDIRCMD) |
+ IXGBE_FDIRCMD_CLEARHT));
IXGBE_WRITE_FLUSH(hw);
IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD,
- (IXGBE_READ_REG(hw, IXGBE_FDIRCMD) &
- ~IXGBE_FDIRCMD_CLEARHT));
+ (IXGBE_READ_REG(hw, IXGBE_FDIRCMD) &
+ ~IXGBE_FDIRCMD_CLEARHT));
IXGBE_WRITE_FLUSH(hw);
/*
* Clear FDIR Hash register to clear any leftover hashes
@@ -1319,7 +1319,7 @@ s32 ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw)
/* Poll init-done after we write FDIRCTRL register */
for (i = 0; i < IXGBE_FDIR_INIT_DONE_POLL; i++) {
if (IXGBE_READ_REG(hw, IXGBE_FDIRCTRL) &
- IXGBE_FDIRCTRL_INIT_DONE)
+ IXGBE_FDIRCTRL_INIT_DONE)
break;
usleep_range(1000, 2000);
}
@@ -1368,7 +1368,7 @@ static void ixgbe_fdir_enable_82599(struct ixgbe_hw *hw, u32 fdirctrl)
IXGBE_WRITE_FLUSH(hw);
for (i = 0; i < IXGBE_FDIR_INIT_DONE_POLL; i++) {
if (IXGBE_READ_REG(hw, IXGBE_FDIRCTRL) &
- IXGBE_FDIRCTRL_INIT_DONE)
+ IXGBE_FDIRCTRL_INIT_DONE)
break;
usleep_range(1000, 2000);
}
@@ -1453,7 +1453,7 @@ do { \
bucket_hash ^= hi_hash_dword >> n; \
else if (IXGBE_ATR_SIGNATURE_HASH_KEY & (0x01 << (n + 16))) \
sig_hash ^= hi_hash_dword << (16 - n); \
-} while (0);
+} while (0)
/**
* ixgbe_atr_compute_sig_hash_82599 - Compute the signature hash
@@ -1529,9 +1529,9 @@ static u32 ixgbe_atr_compute_sig_hash_82599(union ixgbe_atr_hash_dword input,
* @queue: queue index to direct traffic to
**/
s32 ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw,
- union ixgbe_atr_hash_dword input,
- union ixgbe_atr_hash_dword common,
- u8 queue)
+ union ixgbe_atr_hash_dword input,
+ union ixgbe_atr_hash_dword common,
+ u8 queue)
{
u64 fdirhashcmd;
u32 fdircmd;
@@ -1555,7 +1555,7 @@ s32 ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw,
/* configure FDIRCMD register */
fdircmd = IXGBE_FDIRCMD_CMD_ADD_FLOW | IXGBE_FDIRCMD_FILTER_UPDATE |
- IXGBE_FDIRCMD_LAST | IXGBE_FDIRCMD_QUEUE_EN;
+ IXGBE_FDIRCMD_LAST | IXGBE_FDIRCMD_QUEUE_EN;
fdircmd |= input.formatted.flow_type << IXGBE_FDIRCMD_FLOW_TYPE_SHIFT;
fdircmd |= (u32)queue << IXGBE_FDIRCMD_RX_QUEUE_SHIFT;
@@ -1579,7 +1579,7 @@ do { \
bucket_hash ^= lo_hash_dword >> n; \
if (IXGBE_ATR_BUCKET_HASH_KEY & (0x01 << (n + 16))) \
bucket_hash ^= hi_hash_dword >> n; \
-} while (0);
+} while (0)
/**
* ixgbe_atr_compute_perfect_hash_82599 - Compute the perfect filter hash
@@ -1651,6 +1651,7 @@ void ixgbe_atr_compute_perfect_hash_82599(union ixgbe_atr_input *input,
static u32 ixgbe_get_fdirtcpm_82599(union ixgbe_atr_input *input_mask)
{
u32 mask = ntohs(input_mask->formatted.dst_port);
+
mask <<= IXGBE_FDIRTCPM_DPORTM_SHIFT;
mask |= ntohs(input_mask->formatted.src_port);
mask = ((mask & 0x55555555) << 1) | ((mask & 0xAAAAAAAA) >> 1);
@@ -1885,7 +1886,7 @@ static s32 ixgbe_read_analog_reg8_82599(struct ixgbe_hw *hw, u32 reg, u8 *val)
u32 core_ctl;
IXGBE_WRITE_REG(hw, IXGBE_CORECTL, IXGBE_CORECTL_WRITE_CMD |
- (reg << 8));
+ (reg << 8));
IXGBE_WRITE_FLUSH(hw);
udelay(10);
core_ctl = IXGBE_READ_REG(hw, IXGBE_CORECTL);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
index 981b8a7b100d..4e5385a2a465 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
@@ -41,7 +41,7 @@ static void ixgbe_release_eeprom_semaphore(struct ixgbe_hw *hw);
static s32 ixgbe_ready_eeprom(struct ixgbe_hw *hw);
static void ixgbe_standby_eeprom(struct ixgbe_hw *hw);
static void ixgbe_shift_out_eeprom_bits(struct ixgbe_hw *hw, u16 data,
- u16 count);
+ u16 count);
static u16 ixgbe_shift_in_eeprom_bits(struct ixgbe_hw *hw, u16 count);
static void ixgbe_raise_eeprom_clk(struct ixgbe_hw *hw, u32 *eec);
static void ixgbe_lower_eeprom_clk(struct ixgbe_hw *hw, u32 *eec);
@@ -271,6 +271,7 @@ out:
**/
s32 ixgbe_start_hw_generic(struct ixgbe_hw *hw)
{
+ s32 ret_val;
u32 ctrl_ext;
/* Set the media type */
@@ -292,12 +293,15 @@ s32 ixgbe_start_hw_generic(struct ixgbe_hw *hw)
IXGBE_WRITE_FLUSH(hw);
/* Setup flow control */
- ixgbe_setup_fc(hw);
+ ret_val = ixgbe_setup_fc(hw);
+ if (!ret_val)
+ goto out;
/* Clear adapter stopped flag */
hw->adapter_stopped = false;
- return 0;
+out:
+ return ret_val;
}
/**
@@ -481,7 +485,7 @@ s32 ixgbe_clear_hw_cntrs_generic(struct ixgbe_hw *hw)
* Reads the part number string from the EEPROM.
**/
s32 ixgbe_read_pba_string_generic(struct ixgbe_hw *hw, u8 *pba_num,
- u32 pba_num_size)
+ u32 pba_num_size)
{
s32 ret_val;
u16 data;
@@ -814,9 +818,8 @@ s32 ixgbe_init_eeprom_params_generic(struct ixgbe_hw *hw)
eeprom->address_bits = 16;
else
eeprom->address_bits = 8;
- hw_dbg(hw, "Eeprom params: type = %d, size = %d, address bits: "
- "%d\n", eeprom->type, eeprom->word_size,
- eeprom->address_bits);
+ hw_dbg(hw, "Eeprom params: type = %d, size = %d, address bits: %d\n",
+ eeprom->type, eeprom->word_size, eeprom->address_bits);
}
return 0;
@@ -1388,8 +1391,7 @@ static s32 ixgbe_get_eeprom_semaphore(struct ixgbe_hw *hw)
}
if (i == timeout) {
- hw_dbg(hw, "Driver can't access the Eeprom - SMBI Semaphore "
- "not granted.\n");
+ hw_dbg(hw, "Driver can't access the Eeprom - SMBI Semaphore not granted.\n");
/*
* this release is particularly important because our attempts
* above to get the semaphore may have succeeded, and if there
@@ -1434,14 +1436,12 @@ static s32 ixgbe_get_eeprom_semaphore(struct ixgbe_hw *hw)
* was not granted because we don't have access to the EEPROM
*/
if (i >= timeout) {
- hw_dbg(hw, "SWESMBI Software EEPROM semaphore "
- "not granted.\n");
+ hw_dbg(hw, "SWESMBI Software EEPROM semaphore not granted.\n");
ixgbe_release_eeprom_semaphore(hw);
status = IXGBE_ERR_EEPROM;
}
} else {
- hw_dbg(hw, "Software semaphore SMBI between device drivers "
- "not granted.\n");
+ hw_dbg(hw, "Software semaphore SMBI between device drivers not granted.\n");
}
return status;
@@ -1483,7 +1483,7 @@ static s32 ixgbe_ready_eeprom(struct ixgbe_hw *hw)
*/
for (i = 0; i < IXGBE_EEPROM_MAX_RETRY_SPI; i += 5) {
ixgbe_shift_out_eeprom_bits(hw, IXGBE_EEPROM_RDSR_OPCODE_SPI,
- IXGBE_EEPROM_OPCODE_BITS);
+ IXGBE_EEPROM_OPCODE_BITS);
spi_stat_reg = (u8)ixgbe_shift_in_eeprom_bits(hw, 8);
if (!(spi_stat_reg & IXGBE_EEPROM_STATUS_RDY_SPI))
break;
@@ -1532,7 +1532,7 @@ static void ixgbe_standby_eeprom(struct ixgbe_hw *hw)
* @count: number of bits to shift out
**/
static void ixgbe_shift_out_eeprom_bits(struct ixgbe_hw *hw, u16 data,
- u16 count)
+ u16 count)
{
u32 eec;
u32 mask;
@@ -1736,7 +1736,7 @@ u16 ixgbe_calc_eeprom_checksum_generic(struct ixgbe_hw *hw)
* caller does not need checksum_val, the value can be NULL.
**/
s32 ixgbe_validate_eeprom_checksum_generic(struct ixgbe_hw *hw,
- u16 *checksum_val)
+ u16 *checksum_val)
{
s32 status;
u16 checksum;
@@ -1809,7 +1809,7 @@ s32 ixgbe_update_eeprom_checksum_generic(struct ixgbe_hw *hw)
* Puts an ethernet address into a receive address register.
**/
s32 ixgbe_set_rar_generic(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq,
- u32 enable_addr)
+ u32 enable_addr)
{
u32 rar_low, rar_high;
u32 rar_entries = hw->mac.num_rar_entries;
@@ -2053,7 +2053,7 @@ s32 ixgbe_update_mc_addr_list_generic(struct ixgbe_hw *hw,
if (hw->addr_ctrl.mta_in_use > 0)
IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL,
- IXGBE_MCSTCTRL_MFE | hw->mac.mc_filter_type);
+ IXGBE_MCSTCTRL_MFE | hw->mac.mc_filter_type);
hw_dbg(hw, "ixgbe_update_mc_addr_list_generic Complete\n");
return 0;
@@ -2071,7 +2071,7 @@ s32 ixgbe_enable_mc_generic(struct ixgbe_hw *hw)
if (a->mta_in_use > 0)
IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, IXGBE_MCSTCTRL_MFE |
- hw->mac.mc_filter_type);
+ hw->mac.mc_filter_type);
return 0;
}
@@ -2106,19 +2106,25 @@ s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw)
u32 fcrtl, fcrth;
int i;
- /*
- * Validate the water mark configuration for packet buffer 0. Zero
- * water marks indicate that the packet buffer was not configured
- * and the watermarks for packet buffer 0 should always be configured.
- */
- if (!hw->fc.low_water ||
- !hw->fc.high_water[0] ||
- !hw->fc.pause_time) {
- hw_dbg(hw, "Invalid water mark configuration\n");
+ /* Validate the water mark configuration. */
+ if (!hw->fc.pause_time) {
ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
goto out;
}
+ /* Low water mark of zero causes XOFF floods */
+ for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
+ if ((hw->fc.current_mode & ixgbe_fc_tx_pause) &&
+ hw->fc.high_water[i]) {
+ if (!hw->fc.low_water[i] ||
+ hw->fc.low_water[i] >= hw->fc.high_water[i]) {
+ hw_dbg(hw, "Invalid water mark configuration\n");
+ ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
+ goto out;
+ }
+ }
+ }
+
/* Negotiate the fc mode to use */
ixgbe_fc_autoneg(hw);
@@ -2181,12 +2187,11 @@ s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw)
IXGBE_WRITE_REG(hw, IXGBE_MFLCN, mflcn_reg);
IXGBE_WRITE_REG(hw, IXGBE_FCCFG, fccfg_reg);
- fcrtl = (hw->fc.low_water << 10) | IXGBE_FCRTL_XONE;
-
/* Set up and enable Rx high/low water mark thresholds, enable XON. */
for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
if ((hw->fc.current_mode & ixgbe_fc_tx_pause) &&
hw->fc.high_water[i]) {
+ fcrtl = (hw->fc.low_water[i] << 10) | IXGBE_FCRTL_XONE;
IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), fcrtl);
fcrth = (hw->fc.high_water[i] << 10) | IXGBE_FCRTH_FCEN;
} else {
@@ -2654,8 +2659,7 @@ s32 ixgbe_disable_rx_buff_generic(struct ixgbe_hw *hw)
/* For informational purposes only */
if (i >= IXGBE_MAX_SECRX_POLL)
- hw_dbg(hw, "Rx unit being enabled before security "
- "path fully disabled. Continuing with init.\n");
+ hw_dbg(hw, "Rx unit being enabled before security path fully disabled. Continuing with init.\n");
return 0;
@@ -2782,7 +2786,7 @@ out:
* get and set mac_addr routines.
**/
static s32 ixgbe_get_san_mac_addr_offset(struct ixgbe_hw *hw,
- u16 *san_mac_offset)
+ u16 *san_mac_offset)
{
s32 ret_val;
@@ -2828,7 +2832,7 @@ s32 ixgbe_get_san_mac_addr_generic(struct ixgbe_hw *hw, u8 *san_mac_addr)
hw->mac.ops.set_lan_id(hw);
/* apply the port offset to the address offset */
(hw->bus.func) ? (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT1_OFFSET) :
- (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT0_OFFSET);
+ (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT0_OFFSET);
for (i = 0; i < 3; i++) {
ret_val = hw->eeprom.ops.read(hw, san_mac_offset,
&san_mac_data);
@@ -3068,7 +3072,7 @@ static s32 ixgbe_find_vlvf_slot(struct ixgbe_hw *hw, u32 vlan)
* Turn on/off specified VLAN in the VLAN filter table.
**/
s32 ixgbe_set_vfta_generic(struct ixgbe_hw *hw, u32 vlan, u32 vind,
- bool vlan_on)
+ bool vlan_on)
{
s32 regindex;
u32 bitindex;
@@ -3190,9 +3194,9 @@ s32 ixgbe_set_vfta_generic(struct ixgbe_hw *hw, u32 vlan, u32 vind,
* Ignore it. */
vfta_changed = false;
}
- }
- else
+ } else {
IXGBE_WRITE_REG(hw, IXGBE_VLVF(vlvf_index), 0);
+ }
}
if (vfta_changed)
@@ -3292,7 +3296,7 @@ s32 ixgbe_check_mac_link_generic(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
* block to check the support for the alternative WWNN/WWPN prefix support.
**/
s32 ixgbe_get_wwn_prefix_generic(struct ixgbe_hw *hw, u16 *wwnn_prefix,
- u16 *wwpn_prefix)
+ u16 *wwpn_prefix)
{
u16 offset, caps;
u16 alt_san_mac_blk_offset;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h
index f12c40fb5537..2ae5d4b8fc93 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h
@@ -39,7 +39,7 @@ s32 ixgbe_start_hw_generic(struct ixgbe_hw *hw);
s32 ixgbe_start_hw_gen2(struct ixgbe_hw *hw);
s32 ixgbe_clear_hw_cntrs_generic(struct ixgbe_hw *hw);
s32 ixgbe_read_pba_string_generic(struct ixgbe_hw *hw, u8 *pba_num,
- u32 pba_num_size);
+ u32 pba_num_size);
s32 ixgbe_get_mac_addr_generic(struct ixgbe_hw *hw, u8 *mac_addr);
enum ixgbe_bus_width ixgbe_convert_bus_width(u16 link_status);
enum ixgbe_bus_speed ixgbe_convert_bus_speed(u16 link_status);
@@ -61,16 +61,16 @@ s32 ixgbe_write_eewr_generic(struct ixgbe_hw *hw, u16 offset, u16 data);
s32 ixgbe_write_eewr_buffer_generic(struct ixgbe_hw *hw, u16 offset,
u16 words, u16 *data);
s32 ixgbe_read_eeprom_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
- u16 *data);
+ u16 *data);
s32 ixgbe_read_eeprom_buffer_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
u16 words, u16 *data);
u16 ixgbe_calc_eeprom_checksum_generic(struct ixgbe_hw *hw);
s32 ixgbe_validate_eeprom_checksum_generic(struct ixgbe_hw *hw,
- u16 *checksum_val);
+ u16 *checksum_val);
s32 ixgbe_update_eeprom_checksum_generic(struct ixgbe_hw *hw);
s32 ixgbe_set_rar_generic(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq,
- u32 enable_addr);
+ u32 enable_addr);
s32 ixgbe_clear_rar_generic(struct ixgbe_hw *hw, u32 index);
s32 ixgbe_init_rx_addrs_generic(struct ixgbe_hw *hw);
s32 ixgbe_update_mc_addr_list_generic(struct ixgbe_hw *hw,
@@ -92,13 +92,13 @@ s32 ixgbe_set_vmdq_san_mac_generic(struct ixgbe_hw *hw, u32 vmdq);
s32 ixgbe_clear_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq);
s32 ixgbe_init_uta_tables_generic(struct ixgbe_hw *hw);
s32 ixgbe_set_vfta_generic(struct ixgbe_hw *hw, u32 vlan,
- u32 vind, bool vlan_on);
+ u32 vind, bool vlan_on);
s32 ixgbe_clear_vfta_generic(struct ixgbe_hw *hw);
s32 ixgbe_check_mac_link_generic(struct ixgbe_hw *hw,
- ixgbe_link_speed *speed,
- bool *link_up, bool link_up_wait_to_complete);
+ ixgbe_link_speed *speed,
+ bool *link_up, bool link_up_wait_to_complete);
s32 ixgbe_get_wwn_prefix_generic(struct ixgbe_hw *hw, u16 *wwnn_prefix,
- u16 *wwpn_prefix);
+ u16 *wwpn_prefix);
s32 prot_autoc_read_generic(struct ixgbe_hw *hw, bool *, u32 *reg_val);
s32 prot_autoc_write_generic(struct ixgbe_hw *hw, u32 reg_val, bool locked);
@@ -141,8 +141,6 @@ static inline bool ixgbe_removed(void __iomem *addr)
return unlikely(!addr);
}
-void ixgbe_check_remove(struct ixgbe_hw *hw, u32 reg);
-
static inline void ixgbe_write_reg(struct ixgbe_hw *hw, u32 reg, u32 value)
{
u8 __iomem *reg_addr = ACCESS_ONCE(hw->hw_addr);
@@ -172,18 +170,7 @@ static inline void ixgbe_write_reg64(struct ixgbe_hw *hw, u32 reg, u64 value)
}
#define IXGBE_WRITE_REG64(a, reg, value) ixgbe_write_reg64((a), (reg), (value))
-static inline u32 ixgbe_read_reg(struct ixgbe_hw *hw, u32 reg)
-{
- u8 __iomem *reg_addr = ACCESS_ONCE(hw->hw_addr);
- u32 value;
-
- if (ixgbe_removed(reg_addr))
- return IXGBE_FAILED_READ_REG;
- value = readl(reg_addr + reg);
- if (unlikely(value == IXGBE_FAILED_READ_REG))
- ixgbe_check_remove(hw, reg);
- return value;
-}
+u32 ixgbe_read_reg(struct ixgbe_hw *hw, u32 reg);
#define IXGBE_READ_REG(a, reg) ixgbe_read_reg((a), (reg))
#define IXGBE_WRITE_REG_ARRAY(a, reg, offset, value) \
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c
index e055e000131b..a689ee0d4bed 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c
@@ -267,7 +267,7 @@ void ixgbe_dcb_unpack_map(struct ixgbe_dcb_config *cfg, int direction, u8 *map)
* Configure dcb settings and enable dcb mode.
*/
s32 ixgbe_dcb_hw_config(struct ixgbe_hw *hw,
- struct ixgbe_dcb_config *dcb_config)
+ struct ixgbe_dcb_config *dcb_config)
{
s32 ret = 0;
u8 pfc_en;
@@ -389,7 +389,6 @@ static void ixgbe_dcb_read_rtrup2tc_82599(struct ixgbe_hw *hw, u8 *map)
for (i = 0; i < MAX_USER_PRIORITY; i++)
map[i] = IXGBE_RTRUP2TC_UP_MASK &
(reg >> (i * IXGBE_RTRUP2TC_UP_SHIFT));
- return;
}
void ixgbe_dcb_read_rtrup2tc(struct ixgbe_hw *hw, u8 *map)
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.c
index 7a77f37a7cbc..d3ba63f9ad37 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.c
@@ -208,7 +208,6 @@ s32 ixgbe_dcb_config_pfc_82598(struct ixgbe_hw *hw, u8 pfc_en)
IXGBE_WRITE_REG(hw, IXGBE_FCTRL, reg);
- fcrtl = (hw->fc.low_water << 10) | IXGBE_FCRTL_XONE;
/* Configure PFC Tx thresholds per TC */
for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
if (!(pfc_en & (1 << i))) {
@@ -217,6 +216,7 @@ s32 ixgbe_dcb_config_pfc_82598(struct ixgbe_hw *hw, u8 pfc_en)
continue;
}
+ fcrtl = (hw->fc.low_water[i] << 10) | IXGBE_FCRTL_XONE;
reg = (hw->fc.high_water[i] << 10) | IXGBE_FCRTH_FCEN;
IXGBE_WRITE_REG(hw, IXGBE_FCRTL(i), fcrtl);
IXGBE_WRITE_REG(hw, IXGBE_FCRTH(i), reg);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.c
index bdb99b3b0f30..3b932fe64ab6 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.c
@@ -242,7 +242,6 @@ s32 ixgbe_dcb_config_pfc_82599(struct ixgbe_hw *hw, u8 pfc_en, u8 *prio_tc)
max_tc = prio_tc[i];
}
- fcrtl = (hw->fc.low_water << 10) | IXGBE_FCRTL_XONE;
/* Configure PFC Tx thresholds per TC */
for (i = 0; i <= max_tc; i++) {
@@ -257,6 +256,7 @@ s32 ixgbe_dcb_config_pfc_82599(struct ixgbe_hw *hw, u8 pfc_en, u8 *prio_tc)
if (enabled) {
reg = (hw->fc.high_water[i] << 10) | IXGBE_FCRTH_FCEN;
+ fcrtl = (hw->fc.low_water[i] << 10) | IXGBE_FCRTL_XONE;
IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), fcrtl);
} else {
reg = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i)) - 32;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.h
index d5a1e3db0774..90c370230e20 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.h
@@ -31,17 +31,17 @@
/* DCB register definitions */
#define IXGBE_RTTDCS_TDPAC 0x00000001 /* 0 Round Robin,
- * 1 WSP - Weighted Strict Priority
- */
+ * 1 WSP - Weighted Strict Priority
+ */
#define IXGBE_RTTDCS_VMPAC 0x00000002 /* 0 Round Robin,
- * 1 WRR - Weighted Round Robin
- */
+ * 1 WRR - Weighted Round Robin
+ */
#define IXGBE_RTTDCS_TDRM 0x00000010 /* Transmit Recycle Mode */
#define IXGBE_RTTDCS_ARBDIS 0x00000040 /* DCB arbiter disable */
#define IXGBE_RTTDCS_BDPM 0x00400000 /* Bypass Data Pipe - must clear! */
#define IXGBE_RTTDCS_BPBFSM 0x00800000 /* Bypass PB Free Space - must
- * clear!
- */
+ * clear!
+ */
#define IXGBE_RTTDCS_SPEED_CHG 0x80000000 /* Link speed change */
/* Receive UP2TC mapping */
@@ -56,11 +56,11 @@
#define IXGBE_RTRPT4C_LSP 0x80000000 /* LSP enable bit */
#define IXGBE_RDRXCTL_MPBEN 0x00000010 /* DMA config for multiple packet
- * buffers enable
- */
+ * buffers enable
+ */
#define IXGBE_RDRXCTL_MCEN 0x00000040 /* DMA config for multiple cores
- * (RSS) enable
- */
+ * (RSS) enable
+ */
/* RTRPCS Bit Masks */
#define IXGBE_RTRPCS_RRM 0x00000002 /* Receive Recycle Mode enable */
@@ -81,8 +81,8 @@
/* RTTPCS Bit Masks */
#define IXGBE_RTTPCS_TPPAC 0x00000020 /* 0 Round Robin,
- * 1 SP - Strict Priority
- */
+ * 1 SP - Strict Priority
+ */
#define IXGBE_RTTPCS_ARBDIS 0x00000040 /* Arbiter disable */
#define IXGBE_RTTPCS_TPRM 0x00000100 /* Transmit Recycle Mode enable */
#define IXGBE_RTTPCS_ARBD_SHIFT 22
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c
index edd89a1ef27f..5172b6b12c09 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c
@@ -192,8 +192,8 @@ static void ixgbe_dcbnl_get_perm_hw_addr(struct net_device *netdev,
}
static void ixgbe_dcbnl_set_pg_tc_cfg_tx(struct net_device *netdev, int tc,
- u8 prio, u8 bwg_id, u8 bw_pct,
- u8 up_map)
+ u8 prio, u8 bwg_id, u8 bw_pct,
+ u8 up_map)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
@@ -210,7 +210,7 @@ static void ixgbe_dcbnl_set_pg_tc_cfg_tx(struct net_device *netdev, int tc,
}
static void ixgbe_dcbnl_set_pg_bwg_cfg_tx(struct net_device *netdev, int bwg_id,
- u8 bw_pct)
+ u8 bw_pct)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
@@ -218,8 +218,8 @@ static void ixgbe_dcbnl_set_pg_bwg_cfg_tx(struct net_device *netdev, int bwg_id,
}
static void ixgbe_dcbnl_set_pg_tc_cfg_rx(struct net_device *netdev, int tc,
- u8 prio, u8 bwg_id, u8 bw_pct,
- u8 up_map)
+ u8 prio, u8 bwg_id, u8 bw_pct,
+ u8 up_map)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
@@ -236,7 +236,7 @@ static void ixgbe_dcbnl_set_pg_tc_cfg_rx(struct net_device *netdev, int tc,
}
static void ixgbe_dcbnl_set_pg_bwg_cfg_rx(struct net_device *netdev, int bwg_id,
- u8 bw_pct)
+ u8 bw_pct)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
@@ -244,8 +244,8 @@ static void ixgbe_dcbnl_set_pg_bwg_cfg_rx(struct net_device *netdev, int bwg_id,
}
static void ixgbe_dcbnl_get_pg_tc_cfg_tx(struct net_device *netdev, int tc,
- u8 *prio, u8 *bwg_id, u8 *bw_pct,
- u8 *up_map)
+ u8 *prio, u8 *bwg_id, u8 *bw_pct,
+ u8 *up_map)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
@@ -256,7 +256,7 @@ static void ixgbe_dcbnl_get_pg_tc_cfg_tx(struct net_device *netdev, int tc,
}
static void ixgbe_dcbnl_get_pg_bwg_cfg_tx(struct net_device *netdev, int bwg_id,
- u8 *bw_pct)
+ u8 *bw_pct)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
@@ -264,8 +264,8 @@ static void ixgbe_dcbnl_get_pg_bwg_cfg_tx(struct net_device *netdev, int bwg_id,
}
static void ixgbe_dcbnl_get_pg_tc_cfg_rx(struct net_device *netdev, int tc,
- u8 *prio, u8 *bwg_id, u8 *bw_pct,
- u8 *up_map)
+ u8 *prio, u8 *bwg_id, u8 *bw_pct,
+ u8 *up_map)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
@@ -276,7 +276,7 @@ static void ixgbe_dcbnl_get_pg_tc_cfg_rx(struct net_device *netdev, int tc,
}
static void ixgbe_dcbnl_get_pg_bwg_cfg_rx(struct net_device *netdev, int bwg_id,
- u8 *bw_pct)
+ u8 *bw_pct)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
@@ -284,7 +284,7 @@ static void ixgbe_dcbnl_get_pg_bwg_cfg_rx(struct net_device *netdev, int bwg_id,
}
static void ixgbe_dcbnl_set_pfc_cfg(struct net_device *netdev, int priority,
- u8 setting)
+ u8 setting)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
@@ -295,7 +295,7 @@ static void ixgbe_dcbnl_set_pfc_cfg(struct net_device *netdev, int priority,
}
static void ixgbe_dcbnl_get_pfc_cfg(struct net_device *netdev, int priority,
- u8 *setting)
+ u8 *setting)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_debugfs.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_debugfs.c
index 472b0f450bf9..5e2c1e35e517 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_debugfs.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_debugfs.c
@@ -253,8 +253,7 @@ void ixgbe_dbg_adapter_init(struct ixgbe_adapter *adapter)
**/
void ixgbe_dbg_adapter_exit(struct ixgbe_adapter *adapter)
{
- if (adapter->ixgbe_dbg_adapter)
- debugfs_remove_recursive(adapter->ixgbe_dbg_adapter);
+ debugfs_remove_recursive(adapter->ixgbe_dbg_adapter);
adapter->ixgbe_dbg_adapter = NULL;
}
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
index 6c55c14d082a..a452730a3278 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
@@ -141,8 +141,8 @@ static const struct ixgbe_stats ixgbe_gstrings_stats[] = {
sizeof(((struct ixgbe_adapter *)0)->stats.pxofftxc)) \
/ sizeof(u64))
#define IXGBE_STATS_LEN (IXGBE_GLOBAL_STATS_LEN + \
- IXGBE_PB_STATS_LEN + \
- IXGBE_QUEUE_STATS_LEN)
+ IXGBE_PB_STATS_LEN + \
+ IXGBE_QUEUE_STATS_LEN)
static const char ixgbe_gstrings_test[][ETH_GSTRING_LEN] = {
"Register test (offline)", "Eeprom test (offline)",
@@ -152,7 +152,7 @@ static const char ixgbe_gstrings_test[][ETH_GSTRING_LEN] = {
#define IXGBE_TEST_LEN sizeof(ixgbe_gstrings_test) / ETH_GSTRING_LEN
static int ixgbe_get_settings(struct net_device *netdev,
- struct ethtool_cmd *ecmd)
+ struct ethtool_cmd *ecmd)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
struct ixgbe_hw *hw = &adapter->hw;
@@ -161,13 +161,6 @@ static int ixgbe_get_settings(struct net_device *netdev,
bool autoneg = false;
bool link_up;
- /* SFP type is needed for get_link_capabilities */
- if (hw->phy.media_type & (ixgbe_media_type_fiber |
- ixgbe_media_type_fiber_qsfp)) {
- if (hw->phy.sfp_type == ixgbe_sfp_type_not_present)
- hw->phy.ops.identify_sfp(hw);
- }
-
hw->mac.ops.get_link_capabilities(hw, &supported_link, &autoneg);
/* set the supported link speeds */
@@ -303,15 +296,15 @@ static int ixgbe_get_settings(struct net_device *netdev,
}
ecmd->duplex = DUPLEX_FULL;
} else {
- ethtool_cmd_speed_set(ecmd, -1);
- ecmd->duplex = -1;
+ ethtool_cmd_speed_set(ecmd, SPEED_UNKNOWN);
+ ecmd->duplex = DUPLEX_UNKNOWN;
}
return 0;
}
static int ixgbe_set_settings(struct net_device *netdev,
- struct ethtool_cmd *ecmd)
+ struct ethtool_cmd *ecmd)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
struct ixgbe_hw *hw = &adapter->hw;
@@ -368,7 +361,7 @@ static int ixgbe_set_settings(struct net_device *netdev,
}
static void ixgbe_get_pauseparam(struct net_device *netdev,
- struct ethtool_pauseparam *pause)
+ struct ethtool_pauseparam *pause)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
struct ixgbe_hw *hw = &adapter->hw;
@@ -390,7 +383,7 @@ static void ixgbe_get_pauseparam(struct net_device *netdev,
}
static int ixgbe_set_pauseparam(struct net_device *netdev,
- struct ethtool_pauseparam *pause)
+ struct ethtool_pauseparam *pause)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
struct ixgbe_hw *hw = &adapter->hw;
@@ -450,7 +443,7 @@ static int ixgbe_get_regs_len(struct net_device *netdev)
#define IXGBE_GET_STAT(_A_, _R_) _A_->stats._R_
static void ixgbe_get_regs(struct net_device *netdev,
- struct ethtool_regs *regs, void *p)
+ struct ethtool_regs *regs, void *p)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
struct ixgbe_hw *hw = &adapter->hw;
@@ -812,7 +805,7 @@ static int ixgbe_get_eeprom_len(struct net_device *netdev)
}
static int ixgbe_get_eeprom(struct net_device *netdev,
- struct ethtool_eeprom *eeprom, u8 *bytes)
+ struct ethtool_eeprom *eeprom, u8 *bytes)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
struct ixgbe_hw *hw = &adapter->hw;
@@ -918,7 +911,7 @@ err:
}
static void ixgbe_get_drvinfo(struct net_device *netdev,
- struct ethtool_drvinfo *drvinfo)
+ struct ethtool_drvinfo *drvinfo)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
u32 nvm_track_id;
@@ -940,7 +933,7 @@ static void ixgbe_get_drvinfo(struct net_device *netdev,
}
static void ixgbe_get_ringparam(struct net_device *netdev,
- struct ethtool_ringparam *ring)
+ struct ethtool_ringparam *ring)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
struct ixgbe_ring *tx_ring = adapter->tx_ring[0];
@@ -953,7 +946,7 @@ static void ixgbe_get_ringparam(struct net_device *netdev,
}
static int ixgbe_set_ringparam(struct net_device *netdev,
- struct ethtool_ringparam *ring)
+ struct ethtool_ringparam *ring)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
struct ixgbe_ring *temp_ring;
@@ -1082,7 +1075,7 @@ static int ixgbe_get_sset_count(struct net_device *netdev, int sset)
}
static void ixgbe_get_ethtool_stats(struct net_device *netdev,
- struct ethtool_stats *stats, u64 *data)
+ struct ethtool_stats *stats, u64 *data)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
struct rtnl_link_stats64 temp;
@@ -1110,7 +1103,7 @@ static void ixgbe_get_ethtool_stats(struct net_device *netdev,
}
data[i] = (ixgbe_gstrings_stats[i].sizeof_stat ==
- sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
+ sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
}
for (j = 0; j < netdev->num_tx_queues; j++) {
ring = adapter->tx_ring[j];
@@ -1180,7 +1173,7 @@ static void ixgbe_get_ethtool_stats(struct net_device *netdev,
}
static void ixgbe_get_strings(struct net_device *netdev, u32 stringset,
- u8 *data)
+ u8 *data)
{
char *p = (char *)data;
int i;
@@ -1357,8 +1350,7 @@ static bool reg_pattern_test(struct ixgbe_adapter *adapter, u64 *data, int reg,
ixgbe_write_reg(&adapter->hw, reg, test_pattern[pat] & write);
val = ixgbe_read_reg(&adapter->hw, reg);
if (val != (test_pattern[pat] & write & mask)) {
- e_err(drv, "pattern test reg %04X failed: got "
- "0x%08X expected 0x%08X\n",
+ e_err(drv, "pattern test reg %04X failed: got 0x%08X expected 0x%08X\n",
reg, val, (test_pattern[pat] & write & mask));
*data = reg;
ixgbe_write_reg(&adapter->hw, reg, before);
@@ -1382,8 +1374,8 @@ static bool reg_set_and_check(struct ixgbe_adapter *adapter, u64 *data, int reg,
ixgbe_write_reg(&adapter->hw, reg, write & mask);
val = ixgbe_read_reg(&adapter->hw, reg);
if ((write & mask) != (val & mask)) {
- e_err(drv, "set/check reg %04X test failed: got 0x%08X "
- "expected 0x%08X\n", reg, (val & mask), (write & mask));
+ e_err(drv, "set/check reg %04X test failed: got 0x%08X expected 0x%08X\n",
+ reg, (val & mask), (write & mask));
*data = reg;
ixgbe_write_reg(&adapter->hw, reg, before);
return true;
@@ -1430,8 +1422,8 @@ static int ixgbe_reg_test(struct ixgbe_adapter *adapter, u64 *data)
ixgbe_write_reg(&adapter->hw, IXGBE_STATUS, toggle);
after = ixgbe_read_reg(&adapter->hw, IXGBE_STATUS) & toggle;
if (value != after) {
- e_err(drv, "failed STATUS register test got: 0x%08X "
- "expected: 0x%08X\n", after, value);
+ e_err(drv, "failed STATUS register test got: 0x%08X expected: 0x%08X\n",
+ after, value);
*data = 1;
return 1;
}
@@ -1533,10 +1525,10 @@ static int ixgbe_intr_test(struct ixgbe_adapter *adapter, u64 *data)
return -1;
}
} else if (!request_irq(irq, ixgbe_test_intr, IRQF_PROBE_SHARED,
- netdev->name, netdev)) {
+ netdev->name, netdev)) {
shared_int = false;
} else if (request_irq(irq, ixgbe_test_intr, IRQF_SHARED,
- netdev->name, netdev)) {
+ netdev->name, netdev)) {
*data = 1;
return -1;
}
@@ -1563,9 +1555,9 @@ static int ixgbe_intr_test(struct ixgbe_adapter *adapter, u64 *data)
*/
adapter->test_icr = 0;
IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC,
- ~mask & 0x00007FFF);
+ ~mask & 0x00007FFF);
IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS,
- ~mask & 0x00007FFF);
+ ~mask & 0x00007FFF);
IXGBE_WRITE_FLUSH(&adapter->hw);
usleep_range(10000, 20000);
@@ -1587,7 +1579,7 @@ static int ixgbe_intr_test(struct ixgbe_adapter *adapter, u64 *data)
IXGBE_WRITE_FLUSH(&adapter->hw);
usleep_range(10000, 20000);
- if (!(adapter->test_icr &mask)) {
+ if (!(adapter->test_icr & mask)) {
*data = 4;
break;
}
@@ -1602,9 +1594,9 @@ static int ixgbe_intr_test(struct ixgbe_adapter *adapter, u64 *data)
*/
adapter->test_icr = 0;
IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC,
- ~mask & 0x00007FFF);
+ ~mask & 0x00007FFF);
IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS,
- ~mask & 0x00007FFF);
+ ~mask & 0x00007FFF);
IXGBE_WRITE_FLUSH(&adapter->hw);
usleep_range(10000, 20000);
@@ -1964,7 +1956,7 @@ out:
}
static void ixgbe_diag_test(struct net_device *netdev,
- struct ethtool_test *eth_test, u64 *data)
+ struct ethtool_test *eth_test, u64 *data)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
bool if_running = netif_running(netdev);
@@ -1987,10 +1979,7 @@ static void ixgbe_diag_test(struct net_device *netdev,
int i;
for (i = 0; i < adapter->num_vfs; i++) {
if (adapter->vfinfo[i].clear_to_send) {
- netdev_warn(netdev, "%s",
- "offline diagnostic is not "
- "supported when VFs are "
- "present\n");
+ netdev_warn(netdev, "offline diagnostic is not supported when VFs are present\n");
data[0] = 1;
data[1] = 1;
data[2] = 1;
@@ -2037,8 +2026,7 @@ static void ixgbe_diag_test(struct net_device *netdev,
* loopback diagnostic. */
if (adapter->flags & (IXGBE_FLAG_SRIOV_ENABLED |
IXGBE_FLAG_VMDQ_ENABLED)) {
- e_info(hw, "Skip MAC loopback diagnostic in VT "
- "mode\n");
+ e_info(hw, "Skip MAC loopback diagnostic in VT mode\n");
data[3] = 0;
goto skip_loopback;
}
@@ -2078,7 +2066,7 @@ skip_ol_tests:
}
static int ixgbe_wol_exclusion(struct ixgbe_adapter *adapter,
- struct ethtool_wolinfo *wol)
+ struct ethtool_wolinfo *wol)
{
struct ixgbe_hw *hw = &adapter->hw;
int retval = 0;
@@ -2094,12 +2082,12 @@ static int ixgbe_wol_exclusion(struct ixgbe_adapter *adapter,
}
static void ixgbe_get_wol(struct net_device *netdev,
- struct ethtool_wolinfo *wol)
+ struct ethtool_wolinfo *wol)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
wol->supported = WAKE_UCAST | WAKE_MCAST |
- WAKE_BCAST | WAKE_MAGIC;
+ WAKE_BCAST | WAKE_MAGIC;
wol->wolopts = 0;
if (ixgbe_wol_exclusion(adapter, wol) ||
@@ -2181,7 +2169,7 @@ static int ixgbe_set_phys_id(struct net_device *netdev,
}
static int ixgbe_get_coalesce(struct net_device *netdev,
- struct ethtool_coalesce *ec)
+ struct ethtool_coalesce *ec)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
@@ -2222,8 +2210,7 @@ static bool ixgbe_update_rsc(struct ixgbe_adapter *adapter)
adapter->rx_itr_setting > IXGBE_MIN_RSC_ITR) {
if (!(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)) {
adapter->flags2 |= IXGBE_FLAG2_RSC_ENABLED;
- e_info(probe, "rx-usecs value high enough "
- "to re-enable RSC\n");
+ e_info(probe, "rx-usecs value high enough to re-enable RSC\n");
return true;
}
/* if interrupt rate is too high then disable RSC */
@@ -2236,7 +2223,7 @@ static bool ixgbe_update_rsc(struct ixgbe_adapter *adapter)
}
static int ixgbe_set_coalesce(struct net_device *netdev,
- struct ethtool_coalesce *ec)
+ struct ethtool_coalesce *ec)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
struct ixgbe_q_vector *q_vector;
@@ -2421,9 +2408,11 @@ static int ixgbe_get_rss_hash_opts(struct ixgbe_adapter *adapter,
switch (cmd->flow_type) {
case TCP_V4_FLOW:
cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
+ /* fallthrough */
case UDP_V4_FLOW:
if (adapter->flags2 & IXGBE_FLAG2_RSS_FIELD_IPV4_UDP)
cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
+ /* fallthrough */
case SCTP_V4_FLOW:
case AH_ESP_V4_FLOW:
case AH_V4_FLOW:
@@ -2433,9 +2422,11 @@ static int ixgbe_get_rss_hash_opts(struct ixgbe_adapter *adapter,
break;
case TCP_V6_FLOW:
cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
+ /* fallthrough */
case UDP_V6_FLOW:
if (adapter->flags2 & IXGBE_FLAG2_RSS_FIELD_IPV6_UDP)
cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
+ /* fallthrough */
case SCTP_V6_FLOW:
case AH_ESP_V6_FLOW:
case AH_V6_FLOW:
@@ -2787,8 +2778,7 @@ static int ixgbe_set_rss_hash_opt(struct ixgbe_adapter *adapter,
if ((flags2 & UDP_RSS_FLAGS) &&
!(adapter->flags2 & UDP_RSS_FLAGS))
- e_warn(drv, "enabling UDP RSS: fragmented packets"
- " may arrive out of order to the stack above\n");
+ e_warn(drv, "enabling UDP RSS: fragmented packets may arrive out of order to the stack above\n");
adapter->flags2 = flags2;
@@ -3099,5 +3089,5 @@ static const struct ethtool_ops ixgbe_ethtool_ops = {
void ixgbe_set_ethtool_ops(struct net_device *netdev)
{
- SET_ETHTOOL_OPS(netdev, &ixgbe_ethtool_ops);
+ netdev->ethtool_ops = &ixgbe_ethtool_ops;
}
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.h
index b16cc786750d..0772b7730fce 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.h
@@ -81,9 +81,7 @@ struct ixgbe_fcoe {
void *extra_ddp_buffer;
dma_addr_t extra_ddp_buffer_dma;
unsigned long mode;
-#ifdef CONFIG_IXGBE_DCB
u8 up;
-#endif
};
#endif /* _IXGBE_FCOE_H */
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
index 2067d392cc3d..2d9451e39686 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
@@ -1113,8 +1113,8 @@ static void ixgbe_set_interrupt_capability(struct ixgbe_adapter *adapter)
err = pci_enable_msi(adapter->pdev);
if (err) {
netif_printk(adapter, hw, KERN_DEBUG, adapter->netdev,
- "Unable to allocate MSI interrupt, "
- "falling back to legacy. Error: %d\n", err);
+ "Unable to allocate MSI interrupt, falling back to legacy. Error: %d\n",
+ err);
return;
}
adapter->flags |= IXGBE_FLAG_MSI_ENABLED;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index c047c3ef8d71..f5aa3311ea28 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -301,7 +301,7 @@ static void ixgbe_remove_adapter(struct ixgbe_hw *hw)
ixgbe_service_event_schedule(adapter);
}
-void ixgbe_check_remove(struct ixgbe_hw *hw, u32 reg)
+static void ixgbe_check_remove(struct ixgbe_hw *hw, u32 reg)
{
u32 value;
@@ -320,6 +320,32 @@ void ixgbe_check_remove(struct ixgbe_hw *hw, u32 reg)
ixgbe_remove_adapter(hw);
}
+/**
+ * ixgbe_read_reg - Read from device register
+ * @hw: hw specific details
+ * @reg: offset of register to read
+ *
+ * Returns : value read or IXGBE_FAILED_READ_REG if removed
+ *
+ * This function is used to read device registers. It checks for device
+ * removal by confirming any read that returns all ones by checking the
+ * status register value for all ones. This function avoids reading from
+ * the hardware if a removal was previously detected in which case it
+ * returns IXGBE_FAILED_READ_REG (all ones).
+ */
+u32 ixgbe_read_reg(struct ixgbe_hw *hw, u32 reg)
+{
+ u8 __iomem *reg_addr = ACCESS_ONCE(hw->hw_addr);
+ u32 value;
+
+ if (ixgbe_removed(reg_addr))
+ return IXGBE_FAILED_READ_REG;
+ value = readl(reg_addr + reg);
+ if (unlikely(value == IXGBE_FAILED_READ_REG))
+ ixgbe_check_remove(hw, reg);
+ return value;
+}
+
static bool ixgbe_check_cfg_remove(struct ixgbe_hw *hw, struct pci_dev *pdev)
{
u16 value;
@@ -3743,35 +3769,6 @@ static int ixgbe_vlan_rx_kill_vid(struct net_device *netdev,
}
/**
- * ixgbe_vlan_filter_disable - helper to disable hw vlan filtering
- * @adapter: driver data
- */
-static void ixgbe_vlan_filter_disable(struct ixgbe_adapter *adapter)
-{
- struct ixgbe_hw *hw = &adapter->hw;
- u32 vlnctrl;
-
- vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
- vlnctrl &= ~(IXGBE_VLNCTRL_VFE | IXGBE_VLNCTRL_CFIEN);
- IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
-}
-
-/**
- * ixgbe_vlan_filter_enable - helper to enable hw vlan filtering
- * @adapter: driver data
- */
-static void ixgbe_vlan_filter_enable(struct ixgbe_adapter *adapter)
-{
- struct ixgbe_hw *hw = &adapter->hw;
- u32 vlnctrl;
-
- vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
- vlnctrl |= IXGBE_VLNCTRL_VFE;
- vlnctrl &= ~IXGBE_VLNCTRL_CFIEN;
- IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
-}
-
-/**
* ixgbe_vlan_strip_disable - helper to disable hw vlan stripping
* @adapter: driver data
*/
@@ -3850,6 +3847,158 @@ static void ixgbe_restore_vlan(struct ixgbe_adapter *adapter)
}
/**
+ * ixgbe_write_mc_addr_list - write multicast addresses to MTA
+ * @netdev: network interface device structure
+ *
+ * Writes multicast address list to the MTA hash table.
+ * Returns: -ENOMEM on failure
+ * 0 on no addresses written
+ * X on writing X addresses to MTA
+ **/
+static int ixgbe_write_mc_addr_list(struct net_device *netdev)
+{
+ struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_hw *hw = &adapter->hw;
+
+ if (!netif_running(netdev))
+ return 0;
+
+ if (hw->mac.ops.update_mc_addr_list)
+ hw->mac.ops.update_mc_addr_list(hw, netdev);
+ else
+ return -ENOMEM;
+
+#ifdef CONFIG_PCI_IOV
+ ixgbe_restore_vf_multicasts(adapter);
+#endif
+
+ return netdev_mc_count(netdev);
+}
+
+#ifdef CONFIG_PCI_IOV
+void ixgbe_full_sync_mac_table(struct ixgbe_adapter *adapter)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ int i;
+ for (i = 0; i < hw->mac.num_rar_entries; i++) {
+ if (adapter->mac_table[i].state & IXGBE_MAC_STATE_IN_USE)
+ hw->mac.ops.set_rar(hw, i, adapter->mac_table[i].addr,
+ adapter->mac_table[i].queue,
+ IXGBE_RAH_AV);
+ else
+ hw->mac.ops.clear_rar(hw, i);
+
+ adapter->mac_table[i].state &= ~(IXGBE_MAC_STATE_MODIFIED);
+ }
+}
+#endif
+
+static void ixgbe_sync_mac_table(struct ixgbe_adapter *adapter)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ int i;
+ for (i = 0; i < hw->mac.num_rar_entries; i++) {
+ if (adapter->mac_table[i].state & IXGBE_MAC_STATE_MODIFIED) {
+ if (adapter->mac_table[i].state &
+ IXGBE_MAC_STATE_IN_USE)
+ hw->mac.ops.set_rar(hw, i,
+ adapter->mac_table[i].addr,
+ adapter->mac_table[i].queue,
+ IXGBE_RAH_AV);
+ else
+ hw->mac.ops.clear_rar(hw, i);
+
+ adapter->mac_table[i].state &=
+ ~(IXGBE_MAC_STATE_MODIFIED);
+ }
+ }
+}
+
+static void ixgbe_flush_sw_mac_table(struct ixgbe_adapter *adapter)
+{
+ int i;
+ struct ixgbe_hw *hw = &adapter->hw;
+
+ for (i = 0; i < hw->mac.num_rar_entries; i++) {
+ adapter->mac_table[i].state |= IXGBE_MAC_STATE_MODIFIED;
+ adapter->mac_table[i].state &= ~IXGBE_MAC_STATE_IN_USE;
+ memset(adapter->mac_table[i].addr, 0, ETH_ALEN);
+ adapter->mac_table[i].queue = 0;
+ }
+ ixgbe_sync_mac_table(adapter);
+}
+
+static int ixgbe_available_rars(struct ixgbe_adapter *adapter)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ int i, count = 0;
+
+ for (i = 0; i < hw->mac.num_rar_entries; i++) {
+ if (adapter->mac_table[i].state == 0)
+ count++;
+ }
+ return count;
+}
+
+/* this function destroys the first RAR entry */
+static void ixgbe_mac_set_default_filter(struct ixgbe_adapter *adapter,
+ u8 *addr)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+
+ memcpy(&adapter->mac_table[0].addr, addr, ETH_ALEN);
+ adapter->mac_table[0].queue = VMDQ_P(0);
+ adapter->mac_table[0].state = (IXGBE_MAC_STATE_DEFAULT |
+ IXGBE_MAC_STATE_IN_USE);
+ hw->mac.ops.set_rar(hw, 0, adapter->mac_table[0].addr,
+ adapter->mac_table[0].queue,
+ IXGBE_RAH_AV);
+}
+
+int ixgbe_add_mac_filter(struct ixgbe_adapter *adapter, u8 *addr, u16 queue)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ int i;
+
+ if (is_zero_ether_addr(addr))
+ return -EINVAL;
+
+ for (i = 0; i < hw->mac.num_rar_entries; i++) {
+ if (adapter->mac_table[i].state & IXGBE_MAC_STATE_IN_USE)
+ continue;
+ adapter->mac_table[i].state |= (IXGBE_MAC_STATE_MODIFIED |
+ IXGBE_MAC_STATE_IN_USE);
+ ether_addr_copy(adapter->mac_table[i].addr, addr);
+ adapter->mac_table[i].queue = queue;
+ ixgbe_sync_mac_table(adapter);
+ return i;
+ }
+ return -ENOMEM;
+}
+
+int ixgbe_del_mac_filter(struct ixgbe_adapter *adapter, u8 *addr, u16 queue)
+{
+ /* search table for addr, if found, set to 0 and sync */
+ int i;
+ struct ixgbe_hw *hw = &adapter->hw;
+
+ if (is_zero_ether_addr(addr))
+ return -EINVAL;
+
+ for (i = 0; i < hw->mac.num_rar_entries; i++) {
+ if (ether_addr_equal(addr, adapter->mac_table[i].addr) &&
+ adapter->mac_table[i].queue == queue) {
+ adapter->mac_table[i].state |= IXGBE_MAC_STATE_MODIFIED;
+ adapter->mac_table[i].state &= ~IXGBE_MAC_STATE_IN_USE;
+ memset(adapter->mac_table[i].addr, 0, ETH_ALEN);
+ adapter->mac_table[i].queue = 0;
+ ixgbe_sync_mac_table(adapter);
+ return 0;
+ }
+ }
+ return -ENOMEM;
+}
+/**
* ixgbe_write_uc_addr_list - write unicast addresses to RAR table
* @netdev: network interface device structure
*
@@ -3858,39 +4007,23 @@ static void ixgbe_restore_vlan(struct ixgbe_adapter *adapter)
* 0 on no addresses written
* X on writing X addresses to the RAR table
**/
-static int ixgbe_write_uc_addr_list(struct net_device *netdev)
+static int ixgbe_write_uc_addr_list(struct net_device *netdev, int vfn)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
- struct ixgbe_hw *hw = &adapter->hw;
- unsigned int rar_entries = hw->mac.num_rar_entries - 1;
int count = 0;
- /* In SR-IOV/VMDQ modes significantly less RAR entries are available */
- if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
- rar_entries = IXGBE_MAX_PF_MACVLANS - 1;
-
/* return ENOMEM indicating insufficient memory for addresses */
- if (netdev_uc_count(netdev) > rar_entries)
+ if (netdev_uc_count(netdev) > ixgbe_available_rars(adapter))
return -ENOMEM;
if (!netdev_uc_empty(netdev)) {
struct netdev_hw_addr *ha;
- /* return error if we do not support writing to RAR table */
- if (!hw->mac.ops.set_rar)
- return -ENOMEM;
-
netdev_for_each_uc_addr(ha, netdev) {
- if (!rar_entries)
- break;
- hw->mac.ops.set_rar(hw, rar_entries--, ha->addr,
- VMDQ_P(0), IXGBE_RAH_AV);
+ ixgbe_del_mac_filter(adapter, ha->addr, vfn);
+ ixgbe_add_mac_filter(adapter, ha->addr, vfn);
count++;
}
}
- /* write the addresses in reverse order to avoid write combining */
- for (; rar_entries > 0 ; rar_entries--)
- hw->mac.ops.clear_rar(hw, rar_entries);
-
return count;
}
@@ -3908,11 +4041,12 @@ void ixgbe_set_rx_mode(struct net_device *netdev)
struct ixgbe_adapter *adapter = netdev_priv(netdev);
struct ixgbe_hw *hw = &adapter->hw;
u32 fctrl, vmolr = IXGBE_VMOLR_BAM | IXGBE_VMOLR_AUPE;
+ u32 vlnctrl;
int count;
/* Check for Promiscuous and All Multicast modes */
-
fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
+ vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
/* set all bits that we expect to always be set */
fctrl &= ~IXGBE_FCTRL_SBP; /* disable store-bad-packets */
@@ -3922,26 +4056,24 @@ void ixgbe_set_rx_mode(struct net_device *netdev)
/* clear the bits we are changing the status of */
fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
-
+ vlnctrl &= ~(IXGBE_VLNCTRL_VFE | IXGBE_VLNCTRL_CFIEN);
if (netdev->flags & IFF_PROMISC) {
hw->addr_ctrl.user_set_promisc = true;
fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
- vmolr |= (IXGBE_VMOLR_ROPE | IXGBE_VMOLR_MPE);
+ vmolr |= IXGBE_VMOLR_MPE;
/* Only disable hardware filter vlans in promiscuous mode
* if SR-IOV and VMDQ are disabled - otherwise ensure
* that hardware VLAN filters remain enabled.
*/
if (!(adapter->flags & (IXGBE_FLAG_VMDQ_ENABLED |
IXGBE_FLAG_SRIOV_ENABLED)))
- ixgbe_vlan_filter_disable(adapter);
- else
- ixgbe_vlan_filter_enable(adapter);
+ vlnctrl |= (IXGBE_VLNCTRL_VFE | IXGBE_VLNCTRL_CFIEN);
} else {
if (netdev->flags & IFF_ALLMULTI) {
fctrl |= IXGBE_FCTRL_MPE;
vmolr |= IXGBE_VMOLR_MPE;
}
- ixgbe_vlan_filter_enable(adapter);
+ vlnctrl |= IXGBE_VLNCTRL_VFE;
hw->addr_ctrl.user_set_promisc = false;
}
@@ -3950,7 +4082,7 @@ void ixgbe_set_rx_mode(struct net_device *netdev)
* sufficient space to store all the addresses then enable
* unicast promiscuous mode
*/
- count = ixgbe_write_uc_addr_list(netdev);
+ count = ixgbe_write_uc_addr_list(netdev, VMDQ_P(0));
if (count < 0) {
fctrl |= IXGBE_FCTRL_UPE;
vmolr |= IXGBE_VMOLR_ROPE;
@@ -3960,11 +4092,13 @@ void ixgbe_set_rx_mode(struct net_device *netdev)
* then we should just turn on promiscuous mode so
* that we can at least receive multicast traffic
*/
- hw->mac.ops.update_mc_addr_list(hw, netdev);
- vmolr |= IXGBE_VMOLR_ROMPE;
-
- if (adapter->num_vfs)
- ixgbe_restore_vf_multicasts(adapter);
+ count = ixgbe_write_mc_addr_list(netdev);
+ if (count < 0) {
+ fctrl |= IXGBE_FCTRL_MPE;
+ vmolr |= IXGBE_VMOLR_MPE;
+ } else if (count) {
+ vmolr |= IXGBE_VMOLR_ROMPE;
+ }
if (hw->mac.type != ixgbe_mac_82598EB) {
vmolr |= IXGBE_READ_REG(hw, IXGBE_VMOLR(VMDQ_P(0))) &
@@ -3985,6 +4119,7 @@ void ixgbe_set_rx_mode(struct net_device *netdev)
/* NOTE: VLAN filtering is disabled by setting PROMISC */
}
+ IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX)
@@ -4101,8 +4236,8 @@ static int ixgbe_hpbthresh(struct ixgbe_adapter *adapter, int pb)
(tc < IXGBE_FCOE_JUMBO_FRAME_SIZE) &&
(pb == ixgbe_fcoe_get_tc(adapter)))
tc = IXGBE_FCOE_JUMBO_FRAME_SIZE;
-
#endif
+
/* Calculate delay value for device */
switch (hw->mac.type) {
case ixgbe_mac_X540:
@@ -4143,7 +4278,7 @@ static int ixgbe_hpbthresh(struct ixgbe_adapter *adapter, int pb)
* @adapter: board private structure to calculate for
* @pb: packet buffer to calculate
*/
-static int ixgbe_lpbthresh(struct ixgbe_adapter *adapter)
+static int ixgbe_lpbthresh(struct ixgbe_adapter *adapter, int pb)
{
struct ixgbe_hw *hw = &adapter->hw;
struct net_device *dev = adapter->netdev;
@@ -4153,6 +4288,14 @@ static int ixgbe_lpbthresh(struct ixgbe_adapter *adapter)
/* Calculate max LAN frame size */
tc = dev->mtu + ETH_HLEN + ETH_FCS_LEN;
+#ifdef IXGBE_FCOE
+ /* FCoE traffic class uses FCOE jumbo frames */
+ if ((dev->features & NETIF_F_FCOE_MTU) &&
+ (tc < IXGBE_FCOE_JUMBO_FRAME_SIZE) &&
+ (pb == netdev_get_prio_tc_map(dev, adapter->fcoe.up)))
+ tc = IXGBE_FCOE_JUMBO_FRAME_SIZE;
+#endif
+
/* Calculate delay value for device */
switch (hw->mac.type) {
case ixgbe_mac_X540:
@@ -4179,15 +4322,17 @@ static void ixgbe_pbthresh_setup(struct ixgbe_adapter *adapter)
if (!num_tc)
num_tc = 1;
- hw->fc.low_water = ixgbe_lpbthresh(adapter);
-
for (i = 0; i < num_tc; i++) {
hw->fc.high_water[i] = ixgbe_hpbthresh(adapter, i);
+ hw->fc.low_water[i] = ixgbe_lpbthresh(adapter, i);
/* Low water marks must not be larger than high water marks */
- if (hw->fc.low_water > hw->fc.high_water[i])
- hw->fc.low_water = 0;
+ if (hw->fc.low_water[i] > hw->fc.high_water[i])
+ hw->fc.low_water[i] = 0;
}
+
+ for (; i < MAX_TRAFFIC_CLASS; i++)
+ hw->fc.high_water[i] = 0;
}
static void ixgbe_configure_pb(struct ixgbe_adapter *adapter)
@@ -4249,20 +4394,10 @@ static void ixgbe_macvlan_set_rx_mode(struct net_device *dev, unsigned int pool,
vmolr |= IXGBE_VMOLR_ROMPE;
hw->mac.ops.update_mc_addr_list(hw, dev);
}
- ixgbe_write_uc_addr_list(adapter->netdev);
+ ixgbe_write_uc_addr_list(adapter->netdev, pool);
IXGBE_WRITE_REG(hw, IXGBE_VMOLR(pool), vmolr);
}
-static void ixgbe_add_mac_filter(struct ixgbe_adapter *adapter,
- u8 *addr, u16 pool)
-{
- struct ixgbe_hw *hw = &adapter->hw;
- unsigned int entry;
-
- entry = hw->mac.num_rar_entries - pool;
- hw->mac.ops.set_rar(hw, entry, addr, VMDQ_P(pool), IXGBE_RAH_AV);
-}
-
static void ixgbe_fwd_psrtype(struct ixgbe_fwd_adapter *vadapter)
{
struct ixgbe_adapter *adapter = vadapter->real_adapter;
@@ -4521,6 +4656,8 @@ static inline bool ixgbe_is_sfp(struct ixgbe_hw *hw)
case ixgbe_phy_qsfp_active_unknown:
case ixgbe_phy_qsfp_intel:
case ixgbe_phy_qsfp_unknown:
+ /* ixgbe_phy_none is set when no SFP module is present */
+ case ixgbe_phy_none:
return true;
case ixgbe_phy_nl:
if (hw->mac.type == ixgbe_mac_82598EB)
@@ -4742,7 +4879,9 @@ void ixgbe_up(struct ixgbe_adapter *adapter)
void ixgbe_reset(struct ixgbe_adapter *adapter)
{
struct ixgbe_hw *hw = &adapter->hw;
+ struct net_device *netdev = adapter->netdev;
int err;
+ u8 old_addr[ETH_ALEN];
if (ixgbe_removed(hw->hw_addr))
return;
@@ -4778,9 +4917,10 @@ void ixgbe_reset(struct ixgbe_adapter *adapter)
}
clear_bit(__IXGBE_IN_SFP_INIT, &adapter->state);
-
- /* reprogram the RAR[0] in case user changed it. */
- hw->mac.ops.set_rar(hw, 0, hw->mac.addr, VMDQ_P(0), IXGBE_RAH_AV);
+ /* do not flush user set addresses */
+ memcpy(old_addr, &adapter->mac_table[0].addr, netdev->addr_len);
+ ixgbe_flush_sw_mac_table(adapter);
+ ixgbe_mac_set_default_filter(adapter, old_addr);
/* update SAN MAC vmdq pool selection */
if (hw->mac.san_mac_rar_index)
@@ -5026,6 +5166,10 @@ static int ixgbe_sw_init(struct ixgbe_adapter *adapter)
#endif /* CONFIG_IXGBE_DCB */
#endif /* IXGBE_FCOE */
+ adapter->mac_table = kzalloc(sizeof(struct ixgbe_mac_addr) *
+ hw->mac.num_rar_entries,
+ GFP_ATOMIC);
+
/* Set MAC specific capability flags and exceptions */
switch (hw->mac.type) {
case ixgbe_mac_82598EB:
@@ -5517,6 +5661,17 @@ err_setup_tx:
return err;
}
+static void ixgbe_close_suspend(struct ixgbe_adapter *adapter)
+{
+ ixgbe_ptp_suspend(adapter);
+
+ ixgbe_down(adapter);
+ ixgbe_free_irq(adapter);
+
+ ixgbe_free_all_tx_resources(adapter);
+ ixgbe_free_all_rx_resources(adapter);
+}
+
/**
* ixgbe_close - Disables a network interface
* @netdev: network interface device structure
@@ -5534,14 +5689,10 @@ static int ixgbe_close(struct net_device *netdev)
ixgbe_ptp_stop(adapter);
- ixgbe_down(adapter);
- ixgbe_free_irq(adapter);
+ ixgbe_close_suspend(adapter);
ixgbe_fdir_filter_exit(adapter);
- ixgbe_free_all_tx_resources(adapter);
- ixgbe_free_all_rx_resources(adapter);
-
ixgbe_release_hw_control(adapter);
return 0;
@@ -5608,12 +5759,8 @@ static int __ixgbe_shutdown(struct pci_dev *pdev, bool *enable_wake)
netif_device_detach(netdev);
rtnl_lock();
- if (netif_running(netdev)) {
- ixgbe_down(adapter);
- ixgbe_free_irq(adapter);
- ixgbe_free_all_tx_resources(adapter);
- ixgbe_free_all_rx_resources(adapter);
- }
+ if (netif_running(netdev))
+ ixgbe_close_suspend(adapter);
rtnl_unlock();
ixgbe_clear_interrupt_scheme(adapter);
@@ -5945,7 +6092,7 @@ static void ixgbe_fdir_reinit_subtask(struct ixgbe_adapter *adapter)
if (ixgbe_reinit_fdir_tables_82599(hw) == 0) {
for (i = 0; i < adapter->num_tx_queues; i++)
set_bit(__IXGBE_TX_FDIR_INIT_DONE,
- &(adapter->tx_ring[i]->state));
+ &(adapter->tx_ring[i]->state));
/* re-enable flow director interrupts */
IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_FLOW_DIR);
} else {
@@ -7172,16 +7319,17 @@ static int ixgbe_set_mac(struct net_device *netdev, void *p)
struct ixgbe_adapter *adapter = netdev_priv(netdev);
struct ixgbe_hw *hw = &adapter->hw;
struct sockaddr *addr = p;
+ int ret;
if (!is_valid_ether_addr(addr->sa_data))
return -EADDRNOTAVAIL;
+ ixgbe_del_mac_filter(adapter, hw->mac.addr, VMDQ_P(0));
memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len);
- hw->mac.ops.set_rar(hw, 0, hw->mac.addr, VMDQ_P(0), IXGBE_RAH_AV);
-
- return 0;
+ ret = ixgbe_add_mac_filter(adapter, hw->mac.addr, VMDQ_P(0));
+ return ret > 0 ? 0 : ret;
}
static int
@@ -7783,7 +7931,7 @@ static const struct net_device_ops ixgbe_netdev_ops = {
.ndo_do_ioctl = ixgbe_ioctl,
.ndo_set_vf_mac = ixgbe_ndo_set_vf_mac,
.ndo_set_vf_vlan = ixgbe_ndo_set_vf_vlan,
- .ndo_set_vf_tx_rate = ixgbe_ndo_set_vf_bw,
+ .ndo_set_vf_rate = ixgbe_ndo_set_vf_bw,
.ndo_set_vf_spoofchk = ixgbe_ndo_set_vf_spoofchk,
.ndo_get_vf_config = ixgbe_ndo_get_vf_config,
.ndo_get_stats64 = ixgbe_get_stats64,
@@ -8187,6 +8335,8 @@ skip_sriov:
goto err_sw_init;
}
+ ixgbe_mac_set_default_filter(adapter, hw->mac.perm_addr);
+
setup_timer(&adapter->service_timer, &ixgbe_service_timer,
(unsigned long) adapter);
@@ -8242,7 +8392,7 @@ skip_sriov:
if (ixgbe_is_sfp(hw) && hw->phy.sfp_type != ixgbe_sfp_type_not_present)
e_dev_info("MAC: %d, PHY: %d, SFP+: %d, PBA No: %s\n",
hw->mac.type, hw->phy.type, hw->phy.sfp_type,
- part_str);
+ part_str);
else
e_dev_info("MAC: %d, PHY: %d, PBA No: %s\n",
hw->mac.type, hw->phy.type, part_str);
@@ -8304,8 +8454,8 @@ skip_sriov:
ixgbe_dbg_adapter_init(adapter);
- /* Need link setup for MNG FW, else wait for IXGBE_UP */
- if (ixgbe_mng_enabled(hw) && hw->mac.ops.setup_link)
+ /* setup link for SFP devices with MNG FW, else wait for IXGBE_UP */
+ if (ixgbe_mng_enabled(hw) && ixgbe_is_sfp(hw) && hw->mac.ops.setup_link)
hw->mac.ops.setup_link(hw,
IXGBE_LINK_SPEED_10GB_FULL | IXGBE_LINK_SPEED_1GB_FULL,
true);
@@ -8319,6 +8469,7 @@ err_sw_init:
ixgbe_disable_sriov(adapter);
adapter->flags2 &= ~IXGBE_FLAG2_SEARCH_FOR_SFP;
iounmap(adapter->io_addr);
+ kfree(adapter->mac_table);
err_ioremap:
free_netdev(netdev);
err_alloc_etherdev:
@@ -8392,6 +8543,7 @@ static void ixgbe_remove(struct pci_dev *pdev)
e_dev_info("complete\n");
+ kfree(adapter->mac_table);
free_netdev(netdev);
pci_disable_pcie_error_reporting(pdev);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c
index f5c6af2b891b..1918e0abf734 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c
@@ -223,7 +223,7 @@ out:
* received an ack to that message within delay * timeout period
**/
static s32 ixgbe_write_posted_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size,
- u16 mbx_id)
+ u16 mbx_id)
{
struct ixgbe_mbx_info *mbx = &hw->mbx;
s32 ret_val = IXGBE_ERR_MBX;
@@ -269,7 +269,7 @@ static s32 ixgbe_check_for_msg_pf(struct ixgbe_hw *hw, u16 vf_number)
u32 vf_bit = vf_number % 16;
if (!ixgbe_check_for_bit_pf(hw, IXGBE_MBVFICR_VFREQ_VF1 << vf_bit,
- index)) {
+ index)) {
ret_val = 0;
hw->mbx.stats.reqs++;
}
@@ -291,7 +291,7 @@ static s32 ixgbe_check_for_ack_pf(struct ixgbe_hw *hw, u16 vf_number)
u32 vf_bit = vf_number % 16;
if (!ixgbe_check_for_bit_pf(hw, IXGBE_MBVFICR_VFACK_VF1 << vf_bit,
- index)) {
+ index)) {
ret_val = 0;
hw->mbx.stats.acks++;
}
@@ -366,7 +366,7 @@ static s32 ixgbe_obtain_mbx_lock_pf(struct ixgbe_hw *hw, u16 vf_number)
* returns SUCCESS if it successfully copied message into the buffer
**/
static s32 ixgbe_write_mbx_pf(struct ixgbe_hw *hw, u32 *msg, u16 size,
- u16 vf_number)
+ u16 vf_number)
{
s32 ret_val;
u16 i;
@@ -407,7 +407,7 @@ out_no_write:
* a message due to a VF request so no polling for message is needed.
**/
static s32 ixgbe_read_mbx_pf(struct ixgbe_hw *hw, u32 *msg, u16 size,
- u16 vf_number)
+ u16 vf_number)
{
s32 ret_val;
u16 i;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h
index a9b9ad69ed0e..a5cb755de3a9 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h
@@ -54,11 +54,11 @@
* Message ACK's are the value or'd with 0xF0000000
*/
#define IXGBE_VT_MSGTYPE_ACK 0x80000000 /* Messages below or'd with
- * this are the ACK */
+ * this are the ACK */
#define IXGBE_VT_MSGTYPE_NACK 0x40000000 /* Messages below or'd with
- * this are the NACK */
+ * this are the NACK */
#define IXGBE_VT_MSGTYPE_CTS 0x20000000 /* Indicates that VF is still
- clear to send requests */
+ clear to send requests */
#define IXGBE_VT_MSGINFO_SHIFT 16
/* bits 23:16 are used for exra info for certain messages */
#define IXGBE_VT_MSGINFO_MASK (0xFF << IXGBE_VT_MSGINFO_SHIFT)
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
index a76af8e28a04..ff68b7a9deff 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
@@ -67,7 +67,7 @@ s32 ixgbe_identify_phy_generic(struct ixgbe_hw *hw)
if (mdio45_probe(&hw->phy.mdio, phy_addr) == 0) {
ixgbe_get_phy_id(hw);
hw->phy.type =
- ixgbe_get_phy_type_from_id(hw->phy.id);
+ ixgbe_get_phy_type_from_id(hw->phy.id);
if (hw->phy.type == ixgbe_phy_unknown) {
hw->phy.ops.read_reg(hw,
@@ -136,12 +136,12 @@ static s32 ixgbe_get_phy_id(struct ixgbe_hw *hw)
u16 phy_id_low = 0;
status = hw->phy.ops.read_reg(hw, MDIO_DEVID1, MDIO_MMD_PMAPMD,
- &phy_id_high);
+ &phy_id_high);
if (status == 0) {
hw->phy.id = (u32)(phy_id_high << 16);
status = hw->phy.ops.read_reg(hw, MDIO_DEVID2, MDIO_MMD_PMAPMD,
- &phy_id_low);
+ &phy_id_low);
hw->phy.id |= (u32)(phy_id_low & IXGBE_PHY_REVISION_MASK);
hw->phy.revision = (u32)(phy_id_low & ~IXGBE_PHY_REVISION_MASK);
}
@@ -318,7 +318,7 @@ s32 ixgbe_read_phy_reg_mdi(struct ixgbe_hw *hw, u32 reg_addr, u32 device_type,
* @phy_data: Pointer to read data from PHY register
**/
s32 ixgbe_read_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr,
- u32 device_type, u16 *phy_data)
+ u32 device_type, u16 *phy_data)
{
s32 status;
u16 gssr;
@@ -421,7 +421,7 @@ s32 ixgbe_write_phy_reg_mdi(struct ixgbe_hw *hw, u32 reg_addr,
* @phy_data: Data to write to the PHY register
**/
s32 ixgbe_write_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr,
- u32 device_type, u16 phy_data)
+ u32 device_type, u16 phy_data)
{
s32 status;
u16 gssr;
@@ -548,8 +548,8 @@ s32 ixgbe_setup_phy_link_generic(struct ixgbe_hw *hw)
* @speed: new link speed
**/
s32 ixgbe_setup_phy_link_speed_generic(struct ixgbe_hw *hw,
- ixgbe_link_speed speed,
- bool autoneg_wait_to_complete)
+ ixgbe_link_speed speed,
+ bool autoneg_wait_to_complete)
{
/*
@@ -582,8 +582,8 @@ s32 ixgbe_setup_phy_link_speed_generic(struct ixgbe_hw *hw,
* Determines the link capabilities by reading the AUTOC register.
*/
s32 ixgbe_get_copper_link_capabilities_generic(struct ixgbe_hw *hw,
- ixgbe_link_speed *speed,
- bool *autoneg)
+ ixgbe_link_speed *speed,
+ bool *autoneg)
{
s32 status = IXGBE_ERR_LINK_SETUP;
u16 speed_ability;
@@ -592,7 +592,7 @@ s32 ixgbe_get_copper_link_capabilities_generic(struct ixgbe_hw *hw,
*autoneg = true;
status = hw->phy.ops.read_reg(hw, MDIO_SPEED, MDIO_MMD_PMAPMD,
- &speed_ability);
+ &speed_ability);
if (status == 0) {
if (speed_ability & MDIO_SPEED_10G)
@@ -806,11 +806,11 @@ s32 ixgbe_reset_phy_nl(struct ixgbe_hw *hw)
/* reset the PHY and poll for completion */
hw->phy.ops.write_reg(hw, MDIO_CTRL1, MDIO_MMD_PHYXS,
- (phy_data | MDIO_CTRL1_RESET));
+ (phy_data | MDIO_CTRL1_RESET));
for (i = 0; i < 100; i++) {
hw->phy.ops.read_reg(hw, MDIO_CTRL1, MDIO_MMD_PHYXS,
- &phy_data);
+ &phy_data);
if ((phy_data & MDIO_CTRL1_RESET) == 0)
break;
usleep_range(10000, 20000);
@@ -824,7 +824,7 @@ s32 ixgbe_reset_phy_nl(struct ixgbe_hw *hw)
/* Get init offsets */
ret_val = ixgbe_get_sfp_init_sequence_offsets(hw, &list_offset,
- &data_offset);
+ &data_offset);
if (ret_val != 0)
goto out;
@@ -838,7 +838,7 @@ s32 ixgbe_reset_phy_nl(struct ixgbe_hw *hw)
if (ret_val)
goto err_eeprom;
control = (eword & IXGBE_CONTROL_MASK_NL) >>
- IXGBE_CONTROL_SHIFT_NL;
+ IXGBE_CONTROL_SHIFT_NL;
edata = eword & IXGBE_DATA_MASK_NL;
switch (control) {
case IXGBE_DELAY_NL:
@@ -859,7 +859,7 @@ s32 ixgbe_reset_phy_nl(struct ixgbe_hw *hw)
if (ret_val)
goto err_eeprom;
hw->phy.ops.write_reg(hw, phy_offset,
- MDIO_MMD_PMAPMD, eword);
+ MDIO_MMD_PMAPMD, eword);
hw_dbg(hw, "Wrote %4.4x to %4.4x\n", eword,
phy_offset);
data_offset++;
@@ -1010,10 +1010,10 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
if (cable_tech & IXGBE_SFF_DA_PASSIVE_CABLE) {
if (hw->bus.lan_id == 0)
hw->phy.sfp_type =
- ixgbe_sfp_type_da_cu_core0;
+ ixgbe_sfp_type_da_cu_core0;
else
hw->phy.sfp_type =
- ixgbe_sfp_type_da_cu_core1;
+ ixgbe_sfp_type_da_cu_core1;
} else if (cable_tech & IXGBE_SFF_DA_ACTIVE_CABLE) {
hw->phy.ops.read_i2c_eeprom(
hw, IXGBE_SFF_CABLE_SPEC_COMP,
@@ -1035,10 +1035,10 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
IXGBE_SFF_10GBASELR_CAPABLE)) {
if (hw->bus.lan_id == 0)
hw->phy.sfp_type =
- ixgbe_sfp_type_srlr_core0;
+ ixgbe_sfp_type_srlr_core0;
else
hw->phy.sfp_type =
- ixgbe_sfp_type_srlr_core1;
+ ixgbe_sfp_type_srlr_core1;
} else if (comp_codes_1g & IXGBE_SFF_1GBASET_CAPABLE) {
if (hw->bus.lan_id == 0)
hw->phy.sfp_type =
@@ -1087,15 +1087,15 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
goto err_read_i2c_eeprom;
status = hw->phy.ops.read_i2c_eeprom(hw,
- IXGBE_SFF_VENDOR_OUI_BYTE1,
- &oui_bytes[1]);
+ IXGBE_SFF_VENDOR_OUI_BYTE1,
+ &oui_bytes[1]);
if (status != 0)
goto err_read_i2c_eeprom;
status = hw->phy.ops.read_i2c_eeprom(hw,
- IXGBE_SFF_VENDOR_OUI_BYTE2,
- &oui_bytes[2]);
+ IXGBE_SFF_VENDOR_OUI_BYTE2,
+ &oui_bytes[2]);
if (status != 0)
goto err_read_i2c_eeprom;
@@ -1403,8 +1403,8 @@ err_read_i2c_eeprom:
* so it returns the offsets to the phy init sequence block.
**/
s32 ixgbe_get_sfp_init_sequence_offsets(struct ixgbe_hw *hw,
- u16 *list_offset,
- u16 *data_offset)
+ u16 *list_offset,
+ u16 *data_offset)
{
u16 sfp_id;
u16 sfp_type = hw->phy.sfp_type;
@@ -1493,11 +1493,11 @@ err_phy:
* Performs byte read operation to SFP module's EEPROM over I2C interface.
**/
s32 ixgbe_read_i2c_eeprom_generic(struct ixgbe_hw *hw, u8 byte_offset,
- u8 *eeprom_data)
+ u8 *eeprom_data)
{
return hw->phy.ops.read_i2c_byte(hw, byte_offset,
- IXGBE_I2C_EEPROM_DEV_ADDR,
- eeprom_data);
+ IXGBE_I2C_EEPROM_DEV_ADDR,
+ eeprom_data);
}
/**
@@ -1525,11 +1525,11 @@ s32 ixgbe_read_i2c_sff8472_generic(struct ixgbe_hw *hw, u8 byte_offset,
* Performs byte write operation to SFP module's EEPROM over I2C interface.
**/
s32 ixgbe_write_i2c_eeprom_generic(struct ixgbe_hw *hw, u8 byte_offset,
- u8 eeprom_data)
+ u8 eeprom_data)
{
return hw->phy.ops.write_i2c_byte(hw, byte_offset,
- IXGBE_I2C_EEPROM_DEV_ADDR,
- eeprom_data);
+ IXGBE_I2C_EEPROM_DEV_ADDR,
+ eeprom_data);
}
/**
@@ -1542,7 +1542,7 @@ s32 ixgbe_write_i2c_eeprom_generic(struct ixgbe_hw *hw, u8 byte_offset,
* a specified device address.
**/
s32 ixgbe_read_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset,
- u8 dev_addr, u8 *data)
+ u8 dev_addr, u8 *data)
{
s32 status = 0;
u32 max_retry = 10;
@@ -1631,7 +1631,7 @@ read_byte_out:
* a specified device address.
**/
s32 ixgbe_write_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset,
- u8 dev_addr, u8 data)
+ u8 dev_addr, u8 data)
{
s32 status = 0;
u32 max_retry = 1;
@@ -2046,7 +2046,7 @@ s32 ixgbe_tn_check_overtemp(struct ixgbe_hw *hw)
/* Check that the LASI temp alarm status was triggered */
hw->phy.ops.read_reg(hw, IXGBE_TN_LASI_STATUS_REG,
- MDIO_MMD_PMAPMD, &phy_data);
+ MDIO_MMD_PMAPMD, &phy_data);
if (!(phy_data & IXGBE_TN_LASI_STATUS_TEMP_ALARM))
goto out;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h
index 0bb047f751c2..54071ed17e3b 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h
@@ -114,47 +114,47 @@ s32 ixgbe_init_phy_ops_generic(struct ixgbe_hw *hw);
s32 ixgbe_identify_phy_generic(struct ixgbe_hw *hw);
s32 ixgbe_reset_phy_generic(struct ixgbe_hw *hw);
s32 ixgbe_read_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr,
- u32 device_type, u16 *phy_data);
+ u32 device_type, u16 *phy_data);
s32 ixgbe_write_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr,
- u32 device_type, u16 phy_data);
+ u32 device_type, u16 phy_data);
s32 ixgbe_read_phy_reg_mdi(struct ixgbe_hw *hw, u32 reg_addr,
u32 device_type, u16 *phy_data);
s32 ixgbe_write_phy_reg_mdi(struct ixgbe_hw *hw, u32 reg_addr,
u32 device_type, u16 phy_data);
s32 ixgbe_setup_phy_link_generic(struct ixgbe_hw *hw);
s32 ixgbe_setup_phy_link_speed_generic(struct ixgbe_hw *hw,
- ixgbe_link_speed speed,
- bool autoneg_wait_to_complete);
+ ixgbe_link_speed speed,
+ bool autoneg_wait_to_complete);
s32 ixgbe_get_copper_link_capabilities_generic(struct ixgbe_hw *hw,
- ixgbe_link_speed *speed,
- bool *autoneg);
+ ixgbe_link_speed *speed,
+ bool *autoneg);
bool ixgbe_check_reset_blocked(struct ixgbe_hw *hw);
/* PHY specific */
s32 ixgbe_check_phy_link_tnx(struct ixgbe_hw *hw,
- ixgbe_link_speed *speed,
- bool *link_up);
+ ixgbe_link_speed *speed,
+ bool *link_up);
s32 ixgbe_setup_phy_link_tnx(struct ixgbe_hw *hw);
s32 ixgbe_get_phy_firmware_version_tnx(struct ixgbe_hw *hw,
- u16 *firmware_version);
+ u16 *firmware_version);
s32 ixgbe_get_phy_firmware_version_generic(struct ixgbe_hw *hw,
- u16 *firmware_version);
+ u16 *firmware_version);
s32 ixgbe_reset_phy_nl(struct ixgbe_hw *hw);
s32 ixgbe_identify_module_generic(struct ixgbe_hw *hw);
s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw);
s32 ixgbe_get_sfp_init_sequence_offsets(struct ixgbe_hw *hw,
- u16 *list_offset,
- u16 *data_offset);
+ u16 *list_offset,
+ u16 *data_offset);
s32 ixgbe_tn_check_overtemp(struct ixgbe_hw *hw);
s32 ixgbe_read_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset,
- u8 dev_addr, u8 *data);
+ u8 dev_addr, u8 *data);
s32 ixgbe_write_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset,
- u8 dev_addr, u8 data);
+ u8 dev_addr, u8 data);
s32 ixgbe_read_i2c_eeprom_generic(struct ixgbe_hw *hw, u8 byte_offset,
- u8 *eeprom_data);
+ u8 *eeprom_data);
s32 ixgbe_read_i2c_sff8472_generic(struct ixgbe_hw *hw, u8 byte_offset,
u8 *sff8472_data);
s32 ixgbe_write_i2c_eeprom_generic(struct ixgbe_hw *hw, u8 byte_offset,
- u8 eeprom_data);
+ u8 eeprom_data);
#endif /* _IXGBE_PHY_H_ */
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
index 8902ae683457..68f87ecb8a76 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
@@ -26,7 +26,6 @@
*******************************************************************************/
#include "ixgbe.h"
-#include <linux/export.h>
#include <linux/ptp_classify.h>
/*
@@ -334,7 +333,7 @@ static int ixgbe_ptp_settime(struct ptp_clock_info *ptp,
}
/**
- * ixgbe_ptp_enable
+ * ixgbe_ptp_feature_enable
* @ptp: the ptp clock structure
* @rq: the requested feature to change
* @on: whether to enable or disable the feature
@@ -342,8 +341,8 @@ static int ixgbe_ptp_settime(struct ptp_clock_info *ptp,
* enable (or disable) ancillary features of the phc subsystem.
* our driver only supports the PPS feature on the X540
*/
-static int ixgbe_ptp_enable(struct ptp_clock_info *ptp,
- struct ptp_clock_request *rq, int on)
+static int ixgbe_ptp_feature_enable(struct ptp_clock_info *ptp,
+ struct ptp_clock_request *rq, int on)
{
struct ixgbe_adapter *adapter =
container_of(ptp, struct ixgbe_adapter, ptp_caps);
@@ -570,9 +569,9 @@ int ixgbe_ptp_get_ts_config(struct ixgbe_adapter *adapter, struct ifreq *ifr)
}
/**
- * ixgbe_ptp_set_ts_config - control hardware time stamping
- * @adapter: pointer to adapter struct
- * @ifreq: ioctl data
+ * ixgbe_ptp_set_timestamp_mode - setup the hardware for the requested mode
+ * @adapter: the private ixgbe adapter structure
+ * @config: the hwtstamp configuration requested
*
* Outgoing time stamping can be enabled and disabled. Play nice and
* disable it when requested, although it shouldn't cause any overhead
@@ -590,25 +589,25 @@ int ixgbe_ptp_get_ts_config(struct ixgbe_adapter *adapter, struct ifreq *ifr)
* packets, regardless of the type specified in the register, only use V2
* Event mode. This more accurately tells the user what the hardware is going
* to do anyways.
+ *
+ * Note: this may modify the hwtstamp configuration towards a more general
+ * mode, if required to support the specifically requested mode.
*/
-int ixgbe_ptp_set_ts_config(struct ixgbe_adapter *adapter, struct ifreq *ifr)
+static int ixgbe_ptp_set_timestamp_mode(struct ixgbe_adapter *adapter,
+ struct hwtstamp_config *config)
{
struct ixgbe_hw *hw = &adapter->hw;
- struct hwtstamp_config config;
u32 tsync_tx_ctl = IXGBE_TSYNCTXCTL_ENABLED;
u32 tsync_rx_ctl = IXGBE_TSYNCRXCTL_ENABLED;
u32 tsync_rx_mtrl = PTP_EV_PORT << 16;
bool is_l2 = false;
u32 regval;
- if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
- return -EFAULT;
-
/* reserved for future extensions */
- if (config.flags)
+ if (config->flags)
return -EINVAL;
- switch (config.tx_type) {
+ switch (config->tx_type) {
case HWTSTAMP_TX_OFF:
tsync_tx_ctl = 0;
case HWTSTAMP_TX_ON:
@@ -617,7 +616,7 @@ int ixgbe_ptp_set_ts_config(struct ixgbe_adapter *adapter, struct ifreq *ifr)
return -ERANGE;
}
- switch (config.rx_filter) {
+ switch (config->rx_filter) {
case HWTSTAMP_FILTER_NONE:
tsync_rx_ctl = 0;
tsync_rx_mtrl = 0;
@@ -641,7 +640,7 @@ int ixgbe_ptp_set_ts_config(struct ixgbe_adapter *adapter, struct ifreq *ifr)
case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
tsync_rx_ctl |= IXGBE_TSYNCRXCTL_TYPE_EVENT_V2;
is_l2 = true;
- config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
+ config->rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
break;
case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
case HWTSTAMP_FILTER_ALL:
@@ -652,7 +651,7 @@ int ixgbe_ptp_set_ts_config(struct ixgbe_adapter *adapter, struct ifreq *ifr)
* Delay_Req messages and hardware does not support
* timestamping all packets => return error
*/
- config.rx_filter = HWTSTAMP_FILTER_NONE;
+ config->rx_filter = HWTSTAMP_FILTER_NONE;
return -ERANGE;
}
@@ -671,7 +670,6 @@ int ixgbe_ptp_set_ts_config(struct ixgbe_adapter *adapter, struct ifreq *ifr)
else
IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_1588), 0);
-
/* enable/disable TX */
regval = IXGBE_READ_REG(hw, IXGBE_TSYNCTXCTL);
regval &= ~IXGBE_TSYNCTXCTL_ENABLED;
@@ -693,6 +691,29 @@ int ixgbe_ptp_set_ts_config(struct ixgbe_adapter *adapter, struct ifreq *ifr)
regval = IXGBE_READ_REG(hw, IXGBE_TXSTMPH);
regval = IXGBE_READ_REG(hw, IXGBE_RXSTMPH);
+ return 0;
+}
+
+/**
+ * ixgbe_ptp_set_ts_config - user entry point for timestamp mode
+ * @adapter: pointer to adapter struct
+ * @ifreq: ioctl data
+ *
+ * Set hardware to requested mode. If unsupported, return an error with no
+ * changes. Otherwise, store the mode for future reference.
+ */
+int ixgbe_ptp_set_ts_config(struct ixgbe_adapter *adapter, struct ifreq *ifr)
+{
+ struct hwtstamp_config config;
+ int err;
+
+ if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
+ return -EFAULT;
+
+ err = ixgbe_ptp_set_timestamp_mode(adapter, &config);
+ if (err)
+ return err;
+
/* save these settings for future reference */
memcpy(&adapter->tstamp_config, &config,
sizeof(adapter->tstamp_config));
@@ -790,9 +811,13 @@ void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter)
* ixgbe_ptp_reset
* @adapter: the ixgbe private board structure
*
- * When the MAC resets, all timesync features are reset. This function should be
- * called to re-enable the PTP clock structure. It will re-init the timecounter
- * structure based on the kernel time as well as setup the cycle counter data.
+ * When the MAC resets, all the hardware bits for timesync are reset. This
+ * function is used to re-enable the device for PTP based on current settings.
+ * We do lose the current clock time, so just reset the cyclecounter to the
+ * system real clock time.
+ *
+ * This function will maintain hwtstamp_config settings, and resets the SDP
+ * output if it was enabled.
*/
void ixgbe_ptp_reset(struct ixgbe_adapter *adapter)
{
@@ -804,8 +829,8 @@ void ixgbe_ptp_reset(struct ixgbe_adapter *adapter)
IXGBE_WRITE_REG(hw, IXGBE_SYSTIMH, 0x00000000);
IXGBE_WRITE_FLUSH(hw);
- /* Reset the saved tstamp_config */
- memset(&adapter->tstamp_config, 0, sizeof(adapter->tstamp_config));
+ /* reset the hardware timestamping mode */
+ ixgbe_ptp_set_timestamp_mode(adapter, &adapter->tstamp_config);
ixgbe_ptp_start_cyclecounter(adapter);
@@ -825,16 +850,23 @@ void ixgbe_ptp_reset(struct ixgbe_adapter *adapter)
}
/**
- * ixgbe_ptp_init
+ * ixgbe_ptp_create_clock
* @adapter: the ixgbe private adapter structure
*
- * This function performs the required steps for enabling ptp
- * support. If ptp support has already been loaded it simply calls the
- * cyclecounter init routine and exits.
+ * This function performs setup of the user entry point function table and
+ * initializes the PTP clock device, which is used to access the clock-like
+ * features of the PTP core. It will be called by ixgbe_ptp_init, only if
+ * there isn't already a clock device (such as after a suspend/resume cycle,
+ * where the clock device wasn't destroyed).
*/
-void ixgbe_ptp_init(struct ixgbe_adapter *adapter)
+static int ixgbe_ptp_create_clock(struct ixgbe_adapter *adapter)
{
struct net_device *netdev = adapter->netdev;
+ long err;
+
+ /* do nothing if we already have a clock device */
+ if (!IS_ERR_OR_NULL(adapter->ptp_clock))
+ return 0;
switch (adapter->hw.mac.type) {
case ixgbe_mac_X540:
@@ -851,7 +883,7 @@ void ixgbe_ptp_init(struct ixgbe_adapter *adapter)
adapter->ptp_caps.adjtime = ixgbe_ptp_adjtime;
adapter->ptp_caps.gettime = ixgbe_ptp_gettime;
adapter->ptp_caps.settime = ixgbe_ptp_settime;
- adapter->ptp_caps.enable = ixgbe_ptp_enable;
+ adapter->ptp_caps.enable = ixgbe_ptp_feature_enable;
break;
case ixgbe_mac_82599EB:
snprintf(adapter->ptp_caps.name,
@@ -867,24 +899,57 @@ void ixgbe_ptp_init(struct ixgbe_adapter *adapter)
adapter->ptp_caps.adjtime = ixgbe_ptp_adjtime;
adapter->ptp_caps.gettime = ixgbe_ptp_gettime;
adapter->ptp_caps.settime = ixgbe_ptp_settime;
- adapter->ptp_caps.enable = ixgbe_ptp_enable;
+ adapter->ptp_caps.enable = ixgbe_ptp_feature_enable;
break;
default:
adapter->ptp_clock = NULL;
- return;
+ return -EOPNOTSUPP;
}
- spin_lock_init(&adapter->tmreg_lock);
- INIT_WORK(&adapter->ptp_tx_work, ixgbe_ptp_tx_hwtstamp_work);
-
adapter->ptp_clock = ptp_clock_register(&adapter->ptp_caps,
&adapter->pdev->dev);
if (IS_ERR(adapter->ptp_clock)) {
+ err = PTR_ERR(adapter->ptp_clock);
adapter->ptp_clock = NULL;
e_dev_err("ptp_clock_register failed\n");
+ return err;
} else
e_dev_info("registered PHC device on %s\n", netdev->name);
+ /* set default timestamp mode to disabled here. We do this in
+ * create_clock instead of init, because we don't want to override the
+ * previous settings during a resume cycle.
+ */
+ adapter->tstamp_config.rx_filter = HWTSTAMP_FILTER_NONE;
+ adapter->tstamp_config.tx_type = HWTSTAMP_TX_OFF;
+
+ return 0;
+}
+
+/**
+ * ixgbe_ptp_init
+ * @adapter: the ixgbe private adapter structure
+ *
+ * This function performs the required steps for enabling PTP
+ * support. If PTP support has already been loaded it simply calls the
+ * cyclecounter init routine and exits.
+ */
+void ixgbe_ptp_init(struct ixgbe_adapter *adapter)
+{
+ /* initialize the spin lock first since we can't control when a user
+ * will call the entry functions once we have initialized the clock
+ * device
+ */
+ spin_lock_init(&adapter->tmreg_lock);
+
+ /* obtain a PTP device, or re-use an existing device */
+ if (ixgbe_ptp_create_clock(adapter))
+ return;
+
+ /* we have a clock so we can initialize work now */
+ INIT_WORK(&adapter->ptp_tx_work, ixgbe_ptp_tx_hwtstamp_work);
+
+ /* reset the PTP related hardware bits */
ixgbe_ptp_reset(adapter);
/* enter the IXGBE_PTP_RUNNING state */
@@ -894,28 +959,45 @@ void ixgbe_ptp_init(struct ixgbe_adapter *adapter)
}
/**
- * ixgbe_ptp_stop - disable ptp device and stop the overflow check
- * @adapter: pointer to adapter struct
+ * ixgbe_ptp_suspend - stop PTP work items
+ * @ adapter: pointer to adapter struct
*
- * this function stops the ptp support, and cancels the delayed work.
+ * this function suspends PTP activity, and prevents more PTP work from being
+ * generated, but does not destroy the PTP clock device.
*/
-void ixgbe_ptp_stop(struct ixgbe_adapter *adapter)
+void ixgbe_ptp_suspend(struct ixgbe_adapter *adapter)
{
/* Leave the IXGBE_PTP_RUNNING state. */
if (!test_and_clear_bit(__IXGBE_PTP_RUNNING, &adapter->state))
return;
- /* stop the PPS signal */
- adapter->flags2 &= ~IXGBE_FLAG2_PTP_PPS_ENABLED;
- ixgbe_ptp_setup_sdp(adapter);
+ /* since this might be called in suspend, we don't clear the state,
+ * but simply reset the auxiliary PPS signal control register
+ */
+ IXGBE_WRITE_REG(&adapter->hw, IXGBE_TSAUXC, 0x0);
+ /* ensure that we cancel any pending PTP Tx work item in progress */
cancel_work_sync(&adapter->ptp_tx_work);
if (adapter->ptp_tx_skb) {
dev_kfree_skb_any(adapter->ptp_tx_skb);
adapter->ptp_tx_skb = NULL;
clear_bit_unlock(__IXGBE_PTP_TX_IN_PROGRESS, &adapter->state);
}
+}
+
+/**
+ * ixgbe_ptp_stop - close the PTP device
+ * @adapter: pointer to adapter struct
+ *
+ * completely destroy the PTP device, should only be called when the device is
+ * being fully closed.
+ */
+void ixgbe_ptp_stop(struct ixgbe_adapter *adapter)
+{
+ /* first, suspend PTP activity */
+ ixgbe_ptp_suspend(adapter);
+ /* disable the PTP clock device */
if (adapter->ptp_clock) {
ptp_clock_unregister(adapter->ptp_clock);
adapter->ptp_clock = NULL;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
index e6c68d396c99..16b3a1cd9db6 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
@@ -72,8 +72,6 @@ static int __ixgbe_enable_sriov(struct ixgbe_adapter *adapter)
for (i = 0; i < num_vf_macvlans; i++) {
mv_list->vf = -1;
mv_list->free = true;
- mv_list->rar_entry = hw->mac.num_rar_entries -
- (i + adapter->num_vfs + 1);
list_add(&mv_list->l, &adapter->vf_mvs.l);
mv_list++;
}
@@ -327,6 +325,7 @@ static int ixgbe_set_vf_multicasts(struct ixgbe_adapter *adapter,
u32 vector_bit;
u32 vector_reg;
u32 mta_reg;
+ u32 vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(vf));
/* only so many hash values supported */
entries = min(entries, IXGBE_MAX_VF_MC_ENTRIES);
@@ -353,25 +352,13 @@ static int ixgbe_set_vf_multicasts(struct ixgbe_adapter *adapter,
mta_reg |= (1 << vector_bit);
IXGBE_WRITE_REG(hw, IXGBE_MTA(vector_reg), mta_reg);
}
+ vmolr |= IXGBE_VMOLR_ROMPE;
+ IXGBE_WRITE_REG(hw, IXGBE_VMOLR(vf), vmolr);
return 0;
}
-static void ixgbe_restore_vf_macvlans(struct ixgbe_adapter *adapter)
-{
- struct ixgbe_hw *hw = &adapter->hw;
- struct list_head *pos;
- struct vf_macvlans *entry;
-
- list_for_each(pos, &adapter->vf_mvs.l) {
- entry = list_entry(pos, struct vf_macvlans, l);
- if (!entry->free)
- hw->mac.ops.set_rar(hw, entry->rar_entry,
- entry->vf_macvlan,
- entry->vf, IXGBE_RAH_AV);
- }
-}
-
+#ifdef CONFIG_PCI_IOV
void ixgbe_restore_vf_multicasts(struct ixgbe_adapter *adapter)
{
struct ixgbe_hw *hw = &adapter->hw;
@@ -382,6 +369,7 @@ void ixgbe_restore_vf_multicasts(struct ixgbe_adapter *adapter)
u32 mta_reg;
for (i = 0; i < adapter->num_vfs; i++) {
+ u32 vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(i));
vfinfo = &adapter->vfinfo[i];
for (j = 0; j < vfinfo->num_vf_mc_hashes; j++) {
hw->addr_ctrl.mta_in_use++;
@@ -391,11 +379,18 @@ void ixgbe_restore_vf_multicasts(struct ixgbe_adapter *adapter)
mta_reg |= (1 << vector_bit);
IXGBE_WRITE_REG(hw, IXGBE_MTA(vector_reg), mta_reg);
}
+
+ if (vfinfo->num_vf_mc_hashes)
+ vmolr |= IXGBE_VMOLR_ROMPE;
+ else
+ vmolr &= ~IXGBE_VMOLR_ROMPE;
+ IXGBE_WRITE_REG(hw, IXGBE_VMOLR(i), vmolr);
}
/* Restore any VF macvlans */
- ixgbe_restore_vf_macvlans(adapter);
+ ixgbe_full_sync_mac_table(adapter);
}
+#endif
static int ixgbe_set_vf_vlan(struct ixgbe_adapter *adapter, int add, int vid,
u32 vf)
@@ -495,8 +490,7 @@ static s32 ixgbe_set_vf_lpe(struct ixgbe_adapter *adapter, u32 *msgbuf, u32 vf)
static void ixgbe_set_vmolr(struct ixgbe_hw *hw, u32 vf, bool aupe)
{
u32 vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(vf));
- vmolr |= (IXGBE_VMOLR_ROMPE |
- IXGBE_VMOLR_BAM);
+ vmolr |= IXGBE_VMOLR_BAM;
if (aupe)
vmolr |= IXGBE_VMOLR_AUPE;
else
@@ -514,7 +508,6 @@ static inline void ixgbe_vf_reset_event(struct ixgbe_adapter *adapter, u32 vf)
{
struct ixgbe_hw *hw = &adapter->hw;
struct vf_data_storage *vfinfo = &adapter->vfinfo[vf];
- int rar_entry = hw->mac.num_rar_entries - (vf + 1);
u8 num_tcs = netdev_get_num_tc(adapter->netdev);
/* add PF assigned VLAN or VLAN 0 */
@@ -544,7 +537,7 @@ static inline void ixgbe_vf_reset_event(struct ixgbe_adapter *adapter, u32 vf)
/* Flush and reset the mta with the new values */
ixgbe_set_rx_mode(adapter->netdev);
- hw->mac.ops.clear_rar(hw, rar_entry);
+ ixgbe_del_mac_filter(adapter, adapter->vfinfo[vf].vf_mac_addresses, vf);
/* reset VF api back to unknown */
adapter->vfinfo[vf].vf_api = ixgbe_mbox_api_10;
@@ -553,11 +546,9 @@ static inline void ixgbe_vf_reset_event(struct ixgbe_adapter *adapter, u32 vf)
static int ixgbe_set_vf_mac(struct ixgbe_adapter *adapter,
int vf, unsigned char *mac_addr)
{
- struct ixgbe_hw *hw = &adapter->hw;
- int rar_entry = hw->mac.num_rar_entries - (vf + 1);
-
+ ixgbe_del_mac_filter(adapter, adapter->vfinfo[vf].vf_mac_addresses, vf);
memcpy(adapter->vfinfo[vf].vf_mac_addresses, mac_addr, ETH_ALEN);
- hw->mac.ops.set_rar(hw, rar_entry, mac_addr, vf, IXGBE_RAH_AV);
+ ixgbe_add_mac_filter(adapter, adapter->vfinfo[vf].vf_mac_addresses, vf);
return 0;
}
@@ -565,7 +556,6 @@ static int ixgbe_set_vf_mac(struct ixgbe_adapter *adapter,
static int ixgbe_set_vf_macvlan(struct ixgbe_adapter *adapter,
int vf, int index, unsigned char *mac_addr)
{
- struct ixgbe_hw *hw = &adapter->hw;
struct list_head *pos;
struct vf_macvlans *entry;
@@ -576,7 +566,8 @@ static int ixgbe_set_vf_macvlan(struct ixgbe_adapter *adapter,
entry->vf = -1;
entry->free = true;
entry->is_macvlan = false;
- hw->mac.ops.clear_rar(hw, entry->rar_entry);
+ ixgbe_del_mac_filter(adapter,
+ entry->vf_macvlan, vf);
}
}
}
@@ -612,7 +603,7 @@ static int ixgbe_set_vf_macvlan(struct ixgbe_adapter *adapter,
entry->vf = vf;
memcpy(entry->vf_macvlan, mac_addr, ETH_ALEN);
- hw->mac.ops.set_rar(hw, entry->rar_entry, mac_addr, vf, IXGBE_RAH_AV);
+ ixgbe_add_mac_filter(adapter, mac_addr, vf);
return 0;
}
@@ -1138,9 +1129,9 @@ int ixgbe_ndo_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos)
adapter->vfinfo[vf].vlan_count--;
adapter->vfinfo[vf].pf_vlan = 0;
adapter->vfinfo[vf].pf_qos = 0;
- }
+ }
out:
- return err;
+ return err;
}
static int ixgbe_link_mbps(struct ixgbe_adapter *adapter)
@@ -1231,7 +1222,8 @@ void ixgbe_check_vf_rate_limit(struct ixgbe_adapter *adapter)
}
}
-int ixgbe_ndo_set_vf_bw(struct net_device *netdev, int vf, int tx_rate)
+int ixgbe_ndo_set_vf_bw(struct net_device *netdev, int vf, int min_tx_rate,
+ int max_tx_rate)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
int link_speed;
@@ -1249,13 +1241,16 @@ int ixgbe_ndo_set_vf_bw(struct net_device *netdev, int vf, int tx_rate)
if (link_speed != 10000)
return -EINVAL;
+ if (min_tx_rate)
+ return -EINVAL;
+
/* rate limit cannot be less than 10Mbs or greater than link speed */
- if (tx_rate && ((tx_rate <= 10) || (tx_rate > link_speed)))
+ if (max_tx_rate && ((max_tx_rate <= 10) || (max_tx_rate > link_speed)))
return -EINVAL;
/* store values */
adapter->vf_rate_link_speed = link_speed;
- adapter->vfinfo[vf].tx_rate = tx_rate;
+ adapter->vfinfo[vf].tx_rate = max_tx_rate;
/* update hardware configuration */
ixgbe_set_vf_rate_limit(adapter, vf);
@@ -1297,7 +1292,8 @@ int ixgbe_ndo_get_vf_config(struct net_device *netdev,
return -EINVAL;
ivi->vf = vf;
memcpy(&ivi->mac, adapter->vfinfo[vf].vf_mac_addresses, ETH_ALEN);
- ivi->tx_rate = adapter->vfinfo[vf].tx_rate;
+ ivi->max_tx_rate = adapter->vfinfo[vf].tx_rate;
+ ivi->min_tx_rate = 0;
ivi->vlan = adapter->vfinfo[vf].pf_vlan;
ivi->qos = adapter->vfinfo[vf].pf_qos;
ivi->spoofchk = adapter->vfinfo[vf].spoofchk_enabled;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h
index 139eaddfb2ed..32c26d586c01 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h
@@ -34,7 +34,9 @@
*/
#define IXGBE_MAX_VFS_DRV_LIMIT (IXGBE_MAX_VF_FUNCTIONS - 1)
+#ifdef CONFIG_PCI_IOV
void ixgbe_restore_vf_multicasts(struct ixgbe_adapter *adapter);
+#endif
void ixgbe_msg_task(struct ixgbe_adapter *adapter);
int ixgbe_vf_configuration(struct pci_dev *pdev, unsigned int event_mask);
void ixgbe_disable_tx_rx(struct ixgbe_adapter *adapter);
@@ -42,7 +44,8 @@ void ixgbe_ping_all_vfs(struct ixgbe_adapter *adapter);
int ixgbe_ndo_set_vf_mac(struct net_device *netdev, int queue, u8 *mac);
int ixgbe_ndo_set_vf_vlan(struct net_device *netdev, int queue, u16 vlan,
u8 qos);
-int ixgbe_ndo_set_vf_bw(struct net_device *netdev, int vf, int tx_rate);
+int ixgbe_ndo_set_vf_bw(struct net_device *netdev, int vf, int min_tx_rate,
+ int max_tx_rate);
int ixgbe_ndo_set_vf_spoofchk(struct net_device *netdev, int vf, bool setting);
int ixgbe_ndo_get_vf_config(struct net_device *netdev,
int vf, struct ifla_vf_info *ivi);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
index 8a6ff2423f07..9a89f98b35f0 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
@@ -160,7 +160,7 @@ struct ixgbe_thermal_sensor_data {
#define IXGBE_MAX_EITR 0x00000FF8
#define IXGBE_MIN_EITR 8
#define IXGBE_EITR(_i) (((_i) <= 23) ? (0x00820 + ((_i) * 4)) : \
- (0x012300 + (((_i) - 24) * 4)))
+ (0x012300 + (((_i) - 24) * 4)))
#define IXGBE_EITR_ITR_INT_MASK 0x00000FF8
#define IXGBE_EITR_LLI_MOD 0x00008000
#define IXGBE_EITR_CNT_WDIS 0x80000000
@@ -213,7 +213,7 @@ struct ixgbe_thermal_sensor_data {
* 64-127: 0x0D014 + (n-64)*0x40
*/
#define IXGBE_SRRCTL(_i) (((_i) <= 15) ? (0x02100 + ((_i) * 4)) : \
- (((_i) < 64) ? (0x01014 + ((_i) * 0x40)) : \
+ (((_i) < 64) ? (0x01014 + ((_i) * 0x40)) : \
(0x0D014 + (((_i) - 64) * 0x40))))
/*
* Rx DCA Control Register:
@@ -222,11 +222,11 @@ struct ixgbe_thermal_sensor_data {
* 64-127: 0x0D00C + (n-64)*0x40
*/
#define IXGBE_DCA_RXCTRL(_i) (((_i) <= 15) ? (0x02200 + ((_i) * 4)) : \
- (((_i) < 64) ? (0x0100C + ((_i) * 0x40)) : \
+ (((_i) < 64) ? (0x0100C + ((_i) * 0x40)) : \
(0x0D00C + (((_i) - 64) * 0x40))))
#define IXGBE_RDRXCTL 0x02F00
#define IXGBE_RXPBSIZE(_i) (0x03C00 + ((_i) * 4))
- /* 8 of these 0x03C00 - 0x03C1C */
+ /* 8 of these 0x03C00 - 0x03C1C */
#define IXGBE_RXCTRL 0x03000
#define IXGBE_DROPEN 0x03D04
#define IXGBE_RXPBSIZE_SHIFT 10
@@ -239,14 +239,14 @@ struct ixgbe_thermal_sensor_data {
/* Multicast Table Array - 128 entries */
#define IXGBE_MTA(_i) (0x05200 + ((_i) * 4))
#define IXGBE_RAL(_i) (((_i) <= 15) ? (0x05400 + ((_i) * 8)) : \
- (0x0A200 + ((_i) * 8)))
+ (0x0A200 + ((_i) * 8)))
#define IXGBE_RAH(_i) (((_i) <= 15) ? (0x05404 + ((_i) * 8)) : \
- (0x0A204 + ((_i) * 8)))
+ (0x0A204 + ((_i) * 8)))
#define IXGBE_MPSAR_LO(_i) (0x0A600 + ((_i) * 8))
#define IXGBE_MPSAR_HI(_i) (0x0A604 + ((_i) * 8))
/* Packet split receive type */
#define IXGBE_PSRTYPE(_i) (((_i) <= 15) ? (0x05480 + ((_i) * 4)) : \
- (0x0EA00 + ((_i) * 4)))
+ (0x0EA00 + ((_i) * 4)))
/* array of 4096 1-bit vlan filters */
#define IXGBE_VFTA(_i) (0x0A000 + ((_i) * 4))
/*array of 4096 4-bit vlan vmdq indices */
@@ -696,7 +696,7 @@ struct ixgbe_thermal_sensor_data {
#define IXGBE_RQSMR(_i) (0x02300 + ((_i) * 4))
#define IXGBE_TQSMR(_i) (((_i) <= 7) ? (0x07300 + ((_i) * 4)) : \
- (0x08600 + ((_i) * 4)))
+ (0x08600 + ((_i) * 4)))
#define IXGBE_TQSM(_i) (0x08600 + ((_i) * 4))
#define IXGBE_QPRC(_i) (0x01030 + ((_i) * 0x40)) /* 16 of these */
@@ -820,7 +820,7 @@ struct ixgbe_thermal_sensor_data {
#define IXGBE_GCR_EXT_VT_MODE_32 0x00000002
#define IXGBE_GCR_EXT_VT_MODE_64 0x00000003
#define IXGBE_GCR_EXT_SRIOV (IXGBE_GCR_EXT_MSIX_EN | \
- IXGBE_GCR_EXT_VT_MODE_64)
+ IXGBE_GCR_EXT_VT_MODE_64)
/* Time Sync Registers */
#define IXGBE_TSYNCRXCTL 0x05188 /* Rx Time Sync Control register - RW */
@@ -1396,10 +1396,10 @@ enum {
#define IXGBE_EIMC_OTHER IXGBE_EICR_OTHER /* INT Cause Active */
#define IXGBE_EIMS_ENABLE_MASK ( \
- IXGBE_EIMS_RTX_QUEUE | \
- IXGBE_EIMS_LSC | \
- IXGBE_EIMS_TCP_TIMER | \
- IXGBE_EIMS_OTHER)
+ IXGBE_EIMS_RTX_QUEUE | \
+ IXGBE_EIMS_LSC | \
+ IXGBE_EIMS_TCP_TIMER | \
+ IXGBE_EIMS_OTHER)
/* Immediate Interrupt Rx (A.K.A. Low Latency Interrupt) */
#define IXGBE_IMIR_PORT_IM_EN 0x00010000 /* TCP port enable */
@@ -2161,18 +2161,18 @@ enum {
/* Masks to determine if packets should be dropped due to frame errors */
#define IXGBE_RXD_ERR_FRAME_ERR_MASK ( \
- IXGBE_RXD_ERR_CE | \
- IXGBE_RXD_ERR_LE | \
- IXGBE_RXD_ERR_PE | \
- IXGBE_RXD_ERR_OSE | \
- IXGBE_RXD_ERR_USE)
+ IXGBE_RXD_ERR_CE | \
+ IXGBE_RXD_ERR_LE | \
+ IXGBE_RXD_ERR_PE | \
+ IXGBE_RXD_ERR_OSE | \
+ IXGBE_RXD_ERR_USE)
#define IXGBE_RXDADV_ERR_FRAME_ERR_MASK ( \
- IXGBE_RXDADV_ERR_CE | \
- IXGBE_RXDADV_ERR_LE | \
- IXGBE_RXDADV_ERR_PE | \
- IXGBE_RXDADV_ERR_OSE | \
- IXGBE_RXDADV_ERR_USE)
+ IXGBE_RXDADV_ERR_CE | \
+ IXGBE_RXDADV_ERR_LE | \
+ IXGBE_RXDADV_ERR_PE | \
+ IXGBE_RXDADV_ERR_OSE | \
+ IXGBE_RXDADV_ERR_USE)
/* Multicast bit mask */
#define IXGBE_MCSTCTRL_MFE 0x4
@@ -2393,9 +2393,9 @@ struct ixgbe_adv_tx_context_desc {
#define IXGBE_ADVTXD_CC 0x00000080 /* Check Context */
#define IXGBE_ADVTXD_POPTS_SHIFT 8 /* Adv desc POPTS shift */
#define IXGBE_ADVTXD_POPTS_IXSM (IXGBE_TXD_POPTS_IXSM << \
- IXGBE_ADVTXD_POPTS_SHIFT)
+ IXGBE_ADVTXD_POPTS_SHIFT)
#define IXGBE_ADVTXD_POPTS_TXSM (IXGBE_TXD_POPTS_TXSM << \
- IXGBE_ADVTXD_POPTS_SHIFT)
+ IXGBE_ADVTXD_POPTS_SHIFT)
#define IXGBE_ADVTXD_POPTS_ISCO_1ST 0x00000000 /* 1st TSO of iSCSI PDU */
#define IXGBE_ADVTXD_POPTS_ISCO_MDL 0x00000800 /* Middle TSO of iSCSI PDU */
#define IXGBE_ADVTXD_POPTS_ISCO_LAST 0x00001000 /* Last TSO of iSCSI PDU */
@@ -2435,10 +2435,10 @@ typedef u32 ixgbe_link_speed;
#define IXGBE_LINK_SPEED_1GB_FULL 0x0020
#define IXGBE_LINK_SPEED_10GB_FULL 0x0080
#define IXGBE_LINK_SPEED_82598_AUTONEG (IXGBE_LINK_SPEED_1GB_FULL | \
- IXGBE_LINK_SPEED_10GB_FULL)
+ IXGBE_LINK_SPEED_10GB_FULL)
#define IXGBE_LINK_SPEED_82599_AUTONEG (IXGBE_LINK_SPEED_100_FULL | \
- IXGBE_LINK_SPEED_1GB_FULL | \
- IXGBE_LINK_SPEED_10GB_FULL)
+ IXGBE_LINK_SPEED_1GB_FULL | \
+ IXGBE_LINK_SPEED_10GB_FULL)
/* Physical layer type */
@@ -2746,7 +2746,7 @@ struct ixgbe_bus_info {
/* Flow control parameters */
struct ixgbe_fc_info {
u32 high_water[MAX_TRAFFIC_CLASS]; /* Flow Control High-water */
- u32 low_water; /* Flow Control Low-water */
+ u32 low_water[MAX_TRAFFIC_CLASS]; /* Flow Control Low-water */
u16 pause_time; /* Flow Control Pause timer */
bool send_xon; /* Flow control send XON */
bool strict_ieee; /* Strict IEEE mode */
@@ -2840,7 +2840,7 @@ struct ixgbe_hw;
/* iterator type for walking multicast address lists */
typedef u8* (*ixgbe_mc_addr_itr) (struct ixgbe_hw *hw, u8 **mc_addr_ptr,
- u32 *vmdq);
+ u32 *vmdq);
/* Function pointer table */
struct ixgbe_eeprom_operations {
@@ -2887,7 +2887,7 @@ struct ixgbe_mac_operations {
s32 (*setup_link)(struct ixgbe_hw *, ixgbe_link_speed, bool);
s32 (*check_link)(struct ixgbe_hw *, ixgbe_link_speed *, bool *, bool);
s32 (*get_link_capabilities)(struct ixgbe_hw *, ixgbe_link_speed *,
- bool *);
+ bool *);
/* Packet Buffer Manipulation */
void (*set_rxpba)(struct ixgbe_hw *, int, u32, int);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c
index 188a5974b85c..40dd798e1290 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c
@@ -81,7 +81,7 @@ static s32 ixgbe_setup_mac_link_X540(struct ixgbe_hw *hw,
bool autoneg_wait_to_complete)
{
return hw->phy.ops.setup_link_speed(hw, speed,
- autoneg_wait_to_complete);
+ autoneg_wait_to_complete);
}
/**
@@ -155,7 +155,7 @@ mac_reset_top:
/* Add the SAN MAC address to the RAR only if it's a valid address */
if (is_valid_ether_addr(hw->mac.san_addr)) {
hw->mac.ops.set_rar(hw, hw->mac.num_rar_entries - 1,
- hw->mac.san_addr, 0, IXGBE_RAH_AV);
+ hw->mac.san_addr, 0, IXGBE_RAH_AV);
/* Save the SAN MAC RAR index */
hw->mac.san_mac_rar_index = hw->mac.num_rar_entries - 1;
@@ -166,7 +166,7 @@ mac_reset_top:
/* Store the alternative WWNN/WWPN prefix */
hw->mac.ops.get_wwn_prefix(hw, &hw->mac.wwnn_prefix,
- &hw->mac.wwpn_prefix);
+ &hw->mac.wwpn_prefix);
reset_hw_out:
return status;
@@ -237,9 +237,9 @@ static s32 ixgbe_init_eeprom_params_X540(struct ixgbe_hw *hw)
eec = IXGBE_READ_REG(hw, IXGBE_EEC);
eeprom_size = (u16)((eec & IXGBE_EEC_SIZE) >>
- IXGBE_EEC_SIZE_SHIFT);
+ IXGBE_EEC_SIZE_SHIFT);
eeprom->word_size = 1 << (eeprom_size +
- IXGBE_EEPROM_WORD_SIZE_SHIFT);
+ IXGBE_EEPROM_WORD_SIZE_SHIFT);
hw_dbg(hw, "Eeprom params: type = %d, size = %d\n",
eeprom->type, eeprom->word_size);
@@ -712,8 +712,7 @@ static s32 ixgbe_get_swfw_sync_semaphore(struct ixgbe_hw *hw)
udelay(50);
}
} else {
- hw_dbg(hw, "Software semaphore SMBI between device drivers "
- "not granted.\n");
+ hw_dbg(hw, "Software semaphore SMBI between device drivers not granted.\n");
}
return status;
@@ -813,7 +812,7 @@ static struct ixgbe_mac_operations mac_ops_X540 = {
.clear_hw_cntrs = &ixgbe_clear_hw_cntrs_generic,
.get_media_type = &ixgbe_get_media_type_X540,
.get_supported_physical_layer =
- &ixgbe_get_supported_physical_layer_X540,
+ &ixgbe_get_supported_physical_layer_X540,
.enable_rx_dma = &ixgbe_enable_rx_dma_generic,
.get_mac_addr = &ixgbe_get_mac_addr_generic,
.get_san_mac_addr = &ixgbe_get_san_mac_addr_generic,
diff --git a/drivers/net/ethernet/intel/ixgbevf/ethtool.c b/drivers/net/ethernet/intel/ixgbevf/ethtool.c
index 1baecb60f065..d420f124633f 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ethtool.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ethtool.c
@@ -135,8 +135,8 @@ static int ixgbevf_get_settings(struct net_device *netdev,
ethtool_cmd_speed_set(ecmd, speed);
ecmd->duplex = DUPLEX_FULL;
} else {
- ethtool_cmd_speed_set(ecmd, -1);
- ecmd->duplex = -1;
+ ethtool_cmd_speed_set(ecmd, SPEED_UNKNOWN);
+ ecmd->duplex = DUPLEX_UNKNOWN;
}
return 0;
@@ -813,5 +813,5 @@ static const struct ethtool_ops ixgbevf_ethtool_ops = {
void ixgbevf_set_ethtool_ops(struct net_device *netdev)
{
- SET_ETHTOOL_OPS(netdev, &ixgbevf_ethtool_ops);
+ netdev->ethtool_ops = &ixgbevf_ethtool_ops;
}
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
index de2793b06305..75467f83772c 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
@@ -85,7 +85,7 @@ static DEFINE_PCI_DEVICE_TABLE(ixgbevf_pci_tbl) = {
MODULE_DEVICE_TABLE(pci, ixgbevf_pci_tbl);
MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
-MODULE_DESCRIPTION("Intel(R) 82599 Virtual Function Driver");
+MODULE_DESCRIPTION("Intel(R) 10 Gigabit Virtual Function Network Driver");
MODULE_LICENSE("GPL");
MODULE_VERSION(DRV_VERSION);
diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c
index b7b8d74c22d9..b151a949f352 100644
--- a/drivers/net/ethernet/marvell/mv643xx_eth.c
+++ b/drivers/net/ethernet/marvell/mv643xx_eth.c
@@ -42,6 +42,7 @@
#include <linux/dma-mapping.h>
#include <linux/in.h>
#include <linux/ip.h>
+#include <net/tso.h>
#include <linux/tcp.h>
#include <linux/udp.h>
#include <linux/etherdevice.h>
@@ -179,10 +180,18 @@ static char mv643xx_eth_driver_version[] = "1.4";
* Misc definitions.
*/
#define DEFAULT_RX_QUEUE_SIZE 128
-#define DEFAULT_TX_QUEUE_SIZE 256
+#define DEFAULT_TX_QUEUE_SIZE 512
#define SKB_DMA_REALIGN ((PAGE_SIZE - NET_SKB_PAD) % SMP_CACHE_BYTES)
+#define TSO_HEADER_SIZE 128
+/* Max number of allowed TCP segments for software TSO */
+#define MV643XX_MAX_TSO_SEGS 100
+#define MV643XX_MAX_SKB_DESCS (MV643XX_MAX_TSO_SEGS * 2 + MAX_SKB_FRAGS)
+
+#define IS_TSO_HEADER(txq, addr) \
+ ((addr >= txq->tso_hdrs_dma) && \
+ (addr < txq->tso_hdrs_dma + txq->tx_ring_size * TSO_HEADER_SIZE))
/*
* RX/TX descriptors.
*/
@@ -250,6 +259,7 @@ struct tx_desc {
#define GEN_TCP_UDP_CHECKSUM 0x00020000
#define UDP_FRAME 0x00010000
#define MAC_HDR_EXTRA_4_BYTES 0x00008000
+#define GEN_TCP_UDP_CHK_FULL 0x00000400
#define MAC_HDR_EXTRA_8_BYTES 0x00000200
#define TX_IHL_SHIFT 11
@@ -345,6 +355,12 @@ struct tx_queue {
int tx_curr_desc;
int tx_used_desc;
+ int tx_stop_threshold;
+ int tx_wake_threshold;
+
+ char *tso_hdrs;
+ dma_addr_t tso_hdrs_dma;
+
struct tx_desc *tx_desc_area;
dma_addr_t tx_desc_dma;
int tx_desc_area_size;
@@ -491,7 +507,7 @@ static void txq_maybe_wake(struct tx_queue *txq)
if (netif_tx_queue_stopped(nq)) {
__netif_tx_lock(nq, smp_processor_id());
- if (txq->tx_ring_size - txq->tx_desc_count >= MAX_SKB_FRAGS + 1)
+ if (txq->tx_desc_count <= txq->tx_wake_threshold)
netif_tx_wake_queue(nq);
__netif_tx_unlock(nq);
}
@@ -661,6 +677,198 @@ static inline unsigned int has_tiny_unaligned_frags(struct sk_buff *skb)
return 0;
}
+static inline __be16 sum16_as_be(__sum16 sum)
+{
+ return (__force __be16)sum;
+}
+
+static int skb_tx_csum(struct mv643xx_eth_private *mp, struct sk_buff *skb,
+ u16 *l4i_chk, u32 *command, int length)
+{
+ int ret;
+ u32 cmd = 0;
+
+ if (skb->ip_summed == CHECKSUM_PARTIAL) {
+ int hdr_len;
+ int tag_bytes;
+
+ BUG_ON(skb->protocol != htons(ETH_P_IP) &&
+ skb->protocol != htons(ETH_P_8021Q));
+
+ hdr_len = (void *)ip_hdr(skb) - (void *)skb->data;
+ tag_bytes = hdr_len - ETH_HLEN;
+
+ if (length - hdr_len > mp->shared->tx_csum_limit ||
+ unlikely(tag_bytes & ~12)) {
+ ret = skb_checksum_help(skb);
+ if (!ret)
+ goto no_csum;
+ return ret;
+ }
+
+ if (tag_bytes & 4)
+ cmd |= MAC_HDR_EXTRA_4_BYTES;
+ if (tag_bytes & 8)
+ cmd |= MAC_HDR_EXTRA_8_BYTES;
+
+ cmd |= GEN_TCP_UDP_CHECKSUM | GEN_TCP_UDP_CHK_FULL |
+ GEN_IP_V4_CHECKSUM |
+ ip_hdr(skb)->ihl << TX_IHL_SHIFT;
+
+ /* TODO: Revisit this. With the usage of GEN_TCP_UDP_CHK_FULL
+ * it seems we don't need to pass the initial checksum. */
+ switch (ip_hdr(skb)->protocol) {
+ case IPPROTO_UDP:
+ cmd |= UDP_FRAME;
+ *l4i_chk = 0;
+ break;
+ case IPPROTO_TCP:
+ *l4i_chk = 0;
+ break;
+ default:
+ WARN(1, "protocol not supported");
+ }
+ } else {
+no_csum:
+ /* Errata BTS #50, IHL must be 5 if no HW checksum */
+ cmd |= 5 << TX_IHL_SHIFT;
+ }
+ *command = cmd;
+ return 0;
+}
+
+static inline int
+txq_put_data_tso(struct net_device *dev, struct tx_queue *txq,
+ struct sk_buff *skb, char *data, int length,
+ bool last_tcp, bool is_last)
+{
+ int tx_index;
+ u32 cmd_sts;
+ struct tx_desc *desc;
+
+ tx_index = txq->tx_curr_desc++;
+ if (txq->tx_curr_desc == txq->tx_ring_size)
+ txq->tx_curr_desc = 0;
+ desc = &txq->tx_desc_area[tx_index];
+
+ desc->l4i_chk = 0;
+ desc->byte_cnt = length;
+ desc->buf_ptr = dma_map_single(dev->dev.parent, data,
+ length, DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(dev->dev.parent, desc->buf_ptr))) {
+ WARN(1, "dma_map_single failed!\n");
+ return -ENOMEM;
+ }
+
+ cmd_sts = BUFFER_OWNED_BY_DMA;
+ if (last_tcp) {
+ /* last descriptor in the TCP packet */
+ cmd_sts |= ZERO_PADDING | TX_LAST_DESC;
+ /* last descriptor in SKB */
+ if (is_last)
+ cmd_sts |= TX_ENABLE_INTERRUPT;
+ }
+ desc->cmd_sts = cmd_sts;
+ return 0;
+}
+
+static inline void
+txq_put_hdr_tso(struct sk_buff *skb, struct tx_queue *txq, int length)
+{
+ struct mv643xx_eth_private *mp = txq_to_mp(txq);
+ int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
+ int tx_index;
+ struct tx_desc *desc;
+ int ret;
+ u32 cmd_csum = 0;
+ u16 l4i_chk = 0;
+
+ tx_index = txq->tx_curr_desc;
+ desc = &txq->tx_desc_area[tx_index];
+
+ ret = skb_tx_csum(mp, skb, &l4i_chk, &cmd_csum, length);
+ if (ret)
+ WARN(1, "failed to prepare checksum!");
+
+ /* Should we set this? Can't use the value from skb_tx_csum()
+ * as it's not the correct initial L4 checksum to use. */
+ desc->l4i_chk = 0;
+
+ desc->byte_cnt = hdr_len;
+ desc->buf_ptr = txq->tso_hdrs_dma +
+ txq->tx_curr_desc * TSO_HEADER_SIZE;
+ desc->cmd_sts = cmd_csum | BUFFER_OWNED_BY_DMA | TX_FIRST_DESC |
+ GEN_CRC;
+
+ txq->tx_curr_desc++;
+ if (txq->tx_curr_desc == txq->tx_ring_size)
+ txq->tx_curr_desc = 0;
+}
+
+static int txq_submit_tso(struct tx_queue *txq, struct sk_buff *skb,
+ struct net_device *dev)
+{
+ struct mv643xx_eth_private *mp = txq_to_mp(txq);
+ int total_len, data_left, ret;
+ int desc_count = 0;
+ struct tso_t tso;
+ int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
+
+ /* Count needed descriptors */
+ if ((txq->tx_desc_count + tso_count_descs(skb)) >= txq->tx_ring_size) {
+ netdev_dbg(dev, "not enough descriptors for TSO!\n");
+ return -EBUSY;
+ }
+
+ /* Initialize the TSO handler, and prepare the first payload */
+ tso_start(skb, &tso);
+
+ total_len = skb->len - hdr_len;
+ while (total_len > 0) {
+ char *hdr;
+
+ data_left = min_t(int, skb_shinfo(skb)->gso_size, total_len);
+ total_len -= data_left;
+ desc_count++;
+
+ /* prepare packet headers: MAC + IP + TCP */
+ hdr = txq->tso_hdrs + txq->tx_curr_desc * TSO_HEADER_SIZE;
+ tso_build_hdr(skb, hdr, &tso, data_left, total_len == 0);
+ txq_put_hdr_tso(skb, txq, data_left);
+
+ while (data_left > 0) {
+ int size;
+ desc_count++;
+
+ size = min_t(int, tso.size, data_left);
+ ret = txq_put_data_tso(dev, txq, skb, tso.data, size,
+ size == data_left,
+ total_len == 0);
+ if (ret)
+ goto err_release;
+ data_left -= size;
+ tso_build_data(skb, &tso, size);
+ }
+ }
+
+ __skb_queue_tail(&txq->tx_skb, skb);
+ skb_tx_timestamp(skb);
+
+ /* clear TX_END status */
+ mp->work_tx_end &= ~(1 << txq->index);
+
+ /* ensure all descriptors are written before poking hardware */
+ wmb();
+ txq_enable(txq);
+ txq->tx_desc_count += desc_count;
+ return 0;
+err_release:
+ /* TODO: Release all used data descriptors; header descriptors must not
+ * be DMA-unmapped.
+ */
+ return ret;
+}
+
static void txq_submit_frag_skb(struct tx_queue *txq, struct sk_buff *skb)
{
struct mv643xx_eth_private *mp = txq_to_mp(txq);
@@ -671,8 +879,10 @@ static void txq_submit_frag_skb(struct tx_queue *txq, struct sk_buff *skb)
skb_frag_t *this_frag;
int tx_index;
struct tx_desc *desc;
+ void *addr;
this_frag = &skb_shinfo(skb)->frags[frag];
+ addr = page_address(this_frag->page.p) + this_frag->page_offset;
tx_index = txq->tx_curr_desc++;
if (txq->tx_curr_desc == txq->tx_ring_size)
txq->tx_curr_desc = 0;
@@ -692,19 +902,13 @@ static void txq_submit_frag_skb(struct tx_queue *txq, struct sk_buff *skb)
desc->l4i_chk = 0;
desc->byte_cnt = skb_frag_size(this_frag);
- desc->buf_ptr = skb_frag_dma_map(mp->dev->dev.parent,
- this_frag, 0,
- skb_frag_size(this_frag),
- DMA_TO_DEVICE);
+ desc->buf_ptr = dma_map_single(mp->dev->dev.parent, addr,
+ desc->byte_cnt, DMA_TO_DEVICE);
}
}
-static inline __be16 sum16_as_be(__sum16 sum)
-{
- return (__force __be16)sum;
-}
-
-static int txq_submit_skb(struct tx_queue *txq, struct sk_buff *skb)
+static int txq_submit_skb(struct tx_queue *txq, struct sk_buff *skb,
+ struct net_device *dev)
{
struct mv643xx_eth_private *mp = txq_to_mp(txq);
int nr_frags = skb_shinfo(skb)->nr_frags;
@@ -712,54 +916,22 @@ static int txq_submit_skb(struct tx_queue *txq, struct sk_buff *skb)
struct tx_desc *desc;
u32 cmd_sts;
u16 l4i_chk;
- int length;
+ int length, ret;
- cmd_sts = TX_FIRST_DESC | GEN_CRC | BUFFER_OWNED_BY_DMA;
+ cmd_sts = 0;
l4i_chk = 0;
- if (skb->ip_summed == CHECKSUM_PARTIAL) {
- int hdr_len;
- int tag_bytes;
-
- BUG_ON(skb->protocol != htons(ETH_P_IP) &&
- skb->protocol != htons(ETH_P_8021Q));
-
- hdr_len = (void *)ip_hdr(skb) - (void *)skb->data;
- tag_bytes = hdr_len - ETH_HLEN;
- if (skb->len - hdr_len > mp->shared->tx_csum_limit ||
- unlikely(tag_bytes & ~12)) {
- if (skb_checksum_help(skb) == 0)
- goto no_csum;
- dev_kfree_skb_any(skb);
- return 1;
- }
-
- if (tag_bytes & 4)
- cmd_sts |= MAC_HDR_EXTRA_4_BYTES;
- if (tag_bytes & 8)
- cmd_sts |= MAC_HDR_EXTRA_8_BYTES;
-
- cmd_sts |= GEN_TCP_UDP_CHECKSUM |
- GEN_IP_V4_CHECKSUM |
- ip_hdr(skb)->ihl << TX_IHL_SHIFT;
-
- switch (ip_hdr(skb)->protocol) {
- case IPPROTO_UDP:
- cmd_sts |= UDP_FRAME;
- l4i_chk = ntohs(sum16_as_be(udp_hdr(skb)->check));
- break;
- case IPPROTO_TCP:
- l4i_chk = ntohs(sum16_as_be(tcp_hdr(skb)->check));
- break;
- default:
- BUG();
- }
- } else {
-no_csum:
- /* Errata BTS #50, IHL must be 5 if no HW checksum */
- cmd_sts |= 5 << TX_IHL_SHIFT;
+ if (txq->tx_ring_size - txq->tx_desc_count < MAX_SKB_FRAGS + 1) {
+ if (net_ratelimit())
+ netdev_err(dev, "tx queue full?!\n");
+ return -EBUSY;
}
+ ret = skb_tx_csum(mp, skb, &l4i_chk, &cmd_sts, skb->len);
+ if (ret)
+ return ret;
+ cmd_sts |= TX_FIRST_DESC | GEN_CRC | BUFFER_OWNED_BY_DMA;
+
tx_index = txq->tx_curr_desc++;
if (txq->tx_curr_desc == txq->tx_ring_size)
txq->tx_curr_desc = 0;
@@ -801,7 +973,7 @@ no_csum:
static netdev_tx_t mv643xx_eth_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct mv643xx_eth_private *mp = netdev_priv(dev);
- int length, queue;
+ int length, queue, ret;
struct tx_queue *txq;
struct netdev_queue *nq;
@@ -810,30 +982,26 @@ static netdev_tx_t mv643xx_eth_xmit(struct sk_buff *skb, struct net_device *dev)
nq = netdev_get_tx_queue(dev, queue);
if (has_tiny_unaligned_frags(skb) && __skb_linearize(skb)) {
- txq->tx_dropped++;
netdev_printk(KERN_DEBUG, dev,
"failed to linearize skb with tiny unaligned fragment\n");
return NETDEV_TX_BUSY;
}
- if (txq->tx_ring_size - txq->tx_desc_count < MAX_SKB_FRAGS + 1) {
- if (net_ratelimit())
- netdev_err(dev, "tx queue full?!\n");
- dev_kfree_skb_any(skb);
- return NETDEV_TX_OK;
- }
-
length = skb->len;
- if (!txq_submit_skb(txq, skb)) {
- int entries_left;
-
+ if (skb_is_gso(skb))
+ ret = txq_submit_tso(txq, skb, dev);
+ else
+ ret = txq_submit_skb(txq, skb, dev);
+ if (!ret) {
txq->tx_bytes += length;
txq->tx_packets++;
- entries_left = txq->tx_ring_size - txq->tx_desc_count;
- if (entries_left < MAX_SKB_FRAGS + 1)
+ if (txq->tx_desc_count >= txq->tx_stop_threshold)
netif_tx_stop_queue(nq);
+ } else {
+ txq->tx_dropped++;
+ dev_kfree_skb_any(skb);
}
return NETDEV_TX_OK;
@@ -907,14 +1075,9 @@ static int txq_reclaim(struct tx_queue *txq, int budget, int force)
mp->dev->stats.tx_errors++;
}
- if (cmd_sts & TX_FIRST_DESC) {
+ if (!IS_TSO_HEADER(txq, desc->buf_ptr))
dma_unmap_single(mp->dev->dev.parent, desc->buf_ptr,
desc->byte_cnt, DMA_TO_DEVICE);
- } else {
- dma_unmap_page(mp->dev->dev.parent, desc->buf_ptr,
- desc->byte_cnt, DMA_TO_DEVICE);
- }
-
dev_kfree_skb(skb);
}
@@ -1010,8 +1173,9 @@ static void txq_set_fixed_prio_mode(struct tx_queue *txq)
/* mii management interface *************************************************/
-static void mv643xx_adjust_pscr(struct mv643xx_eth_private *mp)
+static void mv643xx_eth_adjust_link(struct net_device *dev)
{
+ struct mv643xx_eth_private *mp = netdev_priv(dev);
u32 pscr = rdlp(mp, PORT_SERIAL_CONTROL);
u32 autoneg_disable = FORCE_LINK_PASS |
DISABLE_AUTO_NEG_SPEED_GMII |
@@ -1387,7 +1551,7 @@ mv643xx_eth_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
ret = phy_ethtool_sset(mp->phy, cmd);
if (!ret)
- mv643xx_adjust_pscr(mp);
+ mv643xx_eth_adjust_link(dev);
return ret;
}
@@ -1456,7 +1620,11 @@ mv643xx_eth_set_ringparam(struct net_device *dev, struct ethtool_ringparam *er)
return -EINVAL;
mp->rx_ring_size = er->rx_pending < 4096 ? er->rx_pending : 4096;
- mp->tx_ring_size = er->tx_pending < 4096 ? er->tx_pending : 4096;
+ mp->tx_ring_size = clamp_t(unsigned int, er->tx_pending,
+ MV643XX_MAX_SKB_DESCS * 2, 4096);
+ if (mp->tx_ring_size != er->tx_pending)
+ netdev_warn(dev, "TX queue size set to %u (requested %u)\n",
+ mp->tx_ring_size, er->tx_pending);
if (netif_running(dev)) {
mv643xx_eth_stop(dev);
@@ -1832,6 +2000,13 @@ static int txq_init(struct mv643xx_eth_private *mp, int index)
txq->tx_ring_size = mp->tx_ring_size;
+ /* A queue must always have room for at least one skb.
+ * Therefore, stop the queue when the free entries reaches
+ * the maximum number of descriptors per skb.
+ */
+ txq->tx_stop_threshold = txq->tx_ring_size - MV643XX_MAX_SKB_DESCS;
+ txq->tx_wake_threshold = txq->tx_stop_threshold / 2;
+
txq->tx_desc_count = 0;
txq->tx_curr_desc = 0;
txq->tx_used_desc = 0;
@@ -1871,6 +2046,15 @@ static int txq_init(struct mv643xx_eth_private *mp, int index)
nexti * sizeof(struct tx_desc);
}
+ /* Allocate DMA buffers for TSO MAC/IP/TCP headers */
+ txq->tso_hdrs = dma_alloc_coherent(mp->dev->dev.parent,
+ txq->tx_ring_size * TSO_HEADER_SIZE,
+ &txq->tso_hdrs_dma, GFP_KERNEL);
+ if (txq->tso_hdrs == NULL) {
+ dma_free_coherent(mp->dev->dev.parent, txq->tx_desc_area_size,
+ txq->tx_desc_area, txq->tx_desc_dma);
+ return -ENOMEM;
+ }
skb_queue_head_init(&txq->tx_skb);
return 0;
@@ -1891,6 +2075,10 @@ static void txq_deinit(struct tx_queue *txq)
else
dma_free_coherent(mp->dev->dev.parent, txq->tx_desc_area_size,
txq->tx_desc_area, txq->tx_desc_dma);
+ if (txq->tso_hdrs)
+ dma_free_coherent(mp->dev->dev.parent,
+ txq->tx_ring_size * TSO_HEADER_SIZE,
+ txq->tso_hdrs, txq->tso_hdrs_dma);
}
@@ -2303,7 +2491,7 @@ static int mv643xx_eth_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
ret = phy_mii_ioctl(mp->phy, ifr, cmd);
if (!ret)
- mv643xx_adjust_pscr(mp);
+ mv643xx_eth_adjust_link(dev);
return ret;
}
@@ -2678,6 +2866,7 @@ static void set_params(struct mv643xx_eth_private *mp,
struct mv643xx_eth_platform_data *pd)
{
struct net_device *dev = mp->dev;
+ unsigned int tx_ring_size;
if (is_valid_ether_addr(pd->mac_addr))
memcpy(dev->dev_addr, pd->mac_addr, ETH_ALEN);
@@ -2692,22 +2881,22 @@ static void set_params(struct mv643xx_eth_private *mp,
mp->rxq_count = pd->rx_queue_count ? : 1;
- mp->tx_ring_size = DEFAULT_TX_QUEUE_SIZE;
+ tx_ring_size = DEFAULT_TX_QUEUE_SIZE;
if (pd->tx_queue_size)
- mp->tx_ring_size = pd->tx_queue_size;
+ tx_ring_size = pd->tx_queue_size;
+
+ mp->tx_ring_size = clamp_t(unsigned int, tx_ring_size,
+ MV643XX_MAX_SKB_DESCS * 2, 4096);
+ if (mp->tx_ring_size != tx_ring_size)
+ netdev_warn(dev, "TX queue size set to %u (requested %u)\n",
+ mp->tx_ring_size, tx_ring_size);
+
mp->tx_desc_sram_addr = pd->tx_sram_addr;
mp->tx_desc_sram_size = pd->tx_sram_size;
mp->txq_count = pd->tx_queue_count ? : 1;
}
-static void mv643xx_eth_adjust_link(struct net_device *dev)
-{
- struct mv643xx_eth_private *mp = netdev_priv(dev);
-
- mv643xx_adjust_pscr(mp);
-}
-
static struct phy_device *phy_scan(struct mv643xx_eth_private *mp,
int phy_addr)
{
@@ -2889,7 +3078,7 @@ static int mv643xx_eth_probe(struct platform_device *pdev)
if (err)
goto out;
- SET_ETHTOOL_OPS(dev, &mv643xx_eth_ethtool_ops);
+ dev->ethtool_ops = &mv643xx_eth_ethtool_ops;
init_pscr(mp, pd->speed, pd->duplex);
@@ -2921,11 +3110,14 @@ static int mv643xx_eth_probe(struct platform_device *pdev)
dev->watchdog_timeo = 2 * HZ;
dev->base_addr = 0;
- dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
- dev->features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
- dev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM;
+ dev->features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO;
+ dev->vlan_features = dev->features;
+
+ dev->features |= NETIF_F_RXCSUM;
+ dev->hw_features = dev->features;
dev->priv_flags |= IFF_UNICAST_FLT;
+ dev->gso_max_segs = MV643XX_MAX_TSO_SEGS;
SET_NETDEV_DEV(dev, &pdev->dev);
diff --git a/drivers/net/ethernet/marvell/mvmdio.c b/drivers/net/ethernet/marvell/mvmdio.c
index 9d5ced263a5e..fc2fb25343f4 100644
--- a/drivers/net/ethernet/marvell/mvmdio.c
+++ b/drivers/net/ethernet/marvell/mvmdio.c
@@ -195,11 +195,10 @@ static int orion_mdio_probe(struct platform_device *pdev)
return -ENODEV;
}
- bus = mdiobus_alloc_size(sizeof(struct orion_mdio_dev));
- if (!bus) {
- dev_err(&pdev->dev, "Cannot allocate MDIO bus\n");
+ bus = devm_mdiobus_alloc_size(&pdev->dev,
+ sizeof(struct orion_mdio_dev));
+ if (!bus)
return -ENOMEM;
- }
bus->name = "orion_mdio_bus";
bus->read = orion_mdio_read;
@@ -208,11 +207,10 @@ static int orion_mdio_probe(struct platform_device *pdev)
dev_name(&pdev->dev));
bus->parent = &pdev->dev;
- bus->irq = kmalloc(sizeof(int) * PHY_MAX_ADDR, GFP_KERNEL);
- if (!bus->irq) {
- mdiobus_free(bus);
+ bus->irq = devm_kmalloc_array(&pdev->dev, PHY_MAX_ADDR, sizeof(int),
+ GFP_KERNEL);
+ if (!bus->irq)
return -ENOMEM;
- }
for (i = 0; i < PHY_MAX_ADDR; i++)
bus->irq[i] = PHY_POLL;
@@ -264,8 +262,6 @@ static int orion_mdio_probe(struct platform_device *pdev)
out_mdio:
if (!IS_ERR(dev->clk))
clk_disable_unprepare(dev->clk);
- kfree(bus->irq);
- mdiobus_free(bus);
return ret;
}
@@ -276,8 +272,6 @@ static int orion_mdio_remove(struct platform_device *pdev)
writel(0, dev->regs + MVMDIO_ERR_INT_MASK);
mdiobus_unregister(bus);
- kfree(bus->irq);
- mdiobus_free(bus);
if (!IS_ERR(dev->clk))
clk_disable_unprepare(dev->clk);
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index 14786c8bf99e..45beca17fa50 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -23,6 +23,7 @@
#include <net/ip.h>
#include <net/ipv6.h>
#include <linux/io.h>
+#include <net/tso.h>
#include <linux/of.h>
#include <linux/of_irq.h>
#include <linux/of_mdio.h>
@@ -218,9 +219,6 @@
#define MVNETA_RX_COAL_PKTS 32
#define MVNETA_RX_COAL_USEC 100
-/* Napi polling weight */
-#define MVNETA_RX_POLL_WEIGHT 64
-
/* The two bytes Marvell header. Either contains a special value used
* by Marvell switches when a specific hardware mode is enabled (not
* supported by this driver) or is filled automatically by zeroes on
@@ -244,12 +242,20 @@
#define MVNETA_TX_MTU_MAX 0x3ffff
+/* TSO header size */
+#define TSO_HEADER_SIZE 128
+
/* Max number of Rx descriptors */
#define MVNETA_MAX_RXD 128
/* Max number of Tx descriptors */
#define MVNETA_MAX_TXD 532
+/* Max number of allowed TCP segments for software TSO */
+#define MVNETA_MAX_TSO_SEGS 100
+
+#define MVNETA_MAX_SKB_DESCS (MVNETA_MAX_TSO_SEGS * 2 + MAX_SKB_FRAGS)
+
/* descriptor aligned size */
#define MVNETA_DESC_ALIGNED_SIZE 32
@@ -258,6 +264,10 @@
ETH_HLEN + ETH_FCS_LEN, \
MVNETA_CPU_D_CACHE_LINE_SIZE)
+#define IS_TSO_HEADER(txq, addr) \
+ ((addr >= txq->tso_hdrs_phys) && \
+ (addr < txq->tso_hdrs_phys + txq->size * TSO_HEADER_SIZE))
+
#define MVNETA_RX_BUF_SIZE(pkt_size) ((pkt_size) + NET_SKB_PAD)
struct mvneta_pcpu_stats {
@@ -279,9 +289,6 @@ struct mvneta_port {
u32 cause_rx_tx;
struct napi_struct napi;
- /* Napi weight */
- int weight;
-
/* Core clock */
struct clk *clk;
u8 mcast_count[256];
@@ -390,6 +397,8 @@ struct mvneta_tx_queue {
* descriptor ring
*/
int count;
+ int tx_stop_threshold;
+ int tx_wake_threshold;
/* Array of transmitted skb */
struct sk_buff **tx_skb;
@@ -413,6 +422,12 @@ struct mvneta_tx_queue {
/* Index of the next TX DMA descriptor to process */
int next_desc_to_proc;
+
+ /* DMA buffers for TSO headers */
+ char *tso_hdrs;
+
+ /* DMA address of TSO headers */
+ dma_addr_t tso_hdrs_phys;
};
struct mvneta_rx_queue {
@@ -441,7 +456,10 @@ struct mvneta_rx_queue {
int next_desc_to_proc;
};
-static int rxq_number = 8;
+/* The hardware supports eight (8) rx queues, but we are only allowing
+ * the first one to be used. Therefore, let's just allocate one queue.
+ */
+static int rxq_number = 1;
static int txq_number = 8;
static int rxq_def;
@@ -1277,11 +1295,12 @@ static void mvneta_txq_bufs_free(struct mvneta_port *pp,
mvneta_txq_inc_get(txq);
+ if (!IS_TSO_HEADER(txq, tx_desc->buf_phys_addr))
+ dma_unmap_single(pp->dev->dev.parent,
+ tx_desc->buf_phys_addr,
+ tx_desc->data_size, DMA_TO_DEVICE);
if (!skb)
continue;
-
- dma_unmap_single(pp->dev->dev.parent, tx_desc->buf_phys_addr,
- tx_desc->data_size, DMA_TO_DEVICE);
dev_kfree_skb_any(skb);
}
}
@@ -1302,7 +1321,7 @@ static void mvneta_txq_done(struct mvneta_port *pp,
txq->count -= tx_done;
if (netif_tx_queue_stopped(nq)) {
- if (txq->size - txq->count >= MAX_SKB_FRAGS + 1)
+ if (txq->count <= txq->tx_wake_threshold)
netif_tx_wake_queue(nq);
}
}
@@ -1519,14 +1538,134 @@ static int mvneta_rx(struct mvneta_port *pp, int rx_todo,
return rx_done;
}
+static inline void
+mvneta_tso_put_hdr(struct sk_buff *skb,
+ struct mvneta_port *pp, struct mvneta_tx_queue *txq)
+{
+ struct mvneta_tx_desc *tx_desc;
+ int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
+
+ txq->tx_skb[txq->txq_put_index] = NULL;
+ tx_desc = mvneta_txq_next_desc_get(txq);
+ tx_desc->data_size = hdr_len;
+ tx_desc->command = mvneta_skb_tx_csum(pp, skb);
+ tx_desc->command |= MVNETA_TXD_F_DESC;
+ tx_desc->buf_phys_addr = txq->tso_hdrs_phys +
+ txq->txq_put_index * TSO_HEADER_SIZE;
+ mvneta_txq_inc_put(txq);
+}
+
+static inline int
+mvneta_tso_put_data(struct net_device *dev, struct mvneta_tx_queue *txq,
+ struct sk_buff *skb, char *data, int size,
+ bool last_tcp, bool is_last)
+{
+ struct mvneta_tx_desc *tx_desc;
+
+ tx_desc = mvneta_txq_next_desc_get(txq);
+ tx_desc->data_size = size;
+ tx_desc->buf_phys_addr = dma_map_single(dev->dev.parent, data,
+ size, DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(dev->dev.parent,
+ tx_desc->buf_phys_addr))) {
+ mvneta_txq_desc_put(txq);
+ return -ENOMEM;
+ }
+
+ tx_desc->command = 0;
+ txq->tx_skb[txq->txq_put_index] = NULL;
+
+ if (last_tcp) {
+ /* last descriptor in the TCP packet */
+ tx_desc->command = MVNETA_TXD_L_DESC;
+
+ /* last descriptor in SKB */
+ if (is_last)
+ txq->tx_skb[txq->txq_put_index] = skb;
+ }
+ mvneta_txq_inc_put(txq);
+ return 0;
+}
+
+static int mvneta_tx_tso(struct sk_buff *skb, struct net_device *dev,
+ struct mvneta_tx_queue *txq)
+{
+ int total_len, data_left;
+ int desc_count = 0;
+ struct mvneta_port *pp = netdev_priv(dev);
+ struct tso_t tso;
+ int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
+ int i;
+
+ /* Count needed descriptors */
+ if ((txq->count + tso_count_descs(skb)) >= txq->size)
+ return 0;
+
+ if (skb_headlen(skb) < (skb_transport_offset(skb) + tcp_hdrlen(skb))) {
+ pr_info("*** Is this even possible???!?!?\n");
+ return 0;
+ }
+
+ /* Initialize the TSO handler, and prepare the first payload */
+ tso_start(skb, &tso);
+
+ total_len = skb->len - hdr_len;
+ while (total_len > 0) {
+ char *hdr;
+
+ data_left = min_t(int, skb_shinfo(skb)->gso_size, total_len);
+ total_len -= data_left;
+ desc_count++;
+
+ /* prepare packet headers: MAC + IP + TCP */
+ hdr = txq->tso_hdrs + txq->txq_put_index * TSO_HEADER_SIZE;
+ tso_build_hdr(skb, hdr, &tso, data_left, total_len == 0);
+
+ mvneta_tso_put_hdr(skb, pp, txq);
+
+ while (data_left > 0) {
+ int size;
+ desc_count++;
+
+ size = min_t(int, tso.size, data_left);
+
+ if (mvneta_tso_put_data(dev, txq, skb,
+ tso.data, size,
+ size == data_left,
+ total_len == 0))
+ goto err_release;
+ data_left -= size;
+
+ tso_build_data(skb, &tso, size);
+ }
+ }
+
+ return desc_count;
+
+err_release:
+ /* Release all used data descriptors; header descriptors must not
+ * be DMA-unmapped.
+ */
+ for (i = desc_count - 1; i >= 0; i--) {
+ struct mvneta_tx_desc *tx_desc = txq->descs + i;
+ if (!IS_TSO_HEADER(txq, tx_desc->buf_phys_addr))
+ dma_unmap_single(pp->dev->dev.parent,
+ tx_desc->buf_phys_addr,
+ tx_desc->data_size,
+ DMA_TO_DEVICE);
+ mvneta_txq_desc_put(txq);
+ }
+ return 0;
+}
+
/* Handle tx fragmentation processing */
static int mvneta_tx_frag_process(struct mvneta_port *pp, struct sk_buff *skb,
struct mvneta_tx_queue *txq)
{
struct mvneta_tx_desc *tx_desc;
- int i;
+ int i, nr_frags = skb_shinfo(skb)->nr_frags;
- for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ for (i = 0; i < nr_frags; i++) {
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
void *addr = page_address(frag->page.p) + frag->page_offset;
@@ -1543,20 +1682,16 @@ static int mvneta_tx_frag_process(struct mvneta_port *pp, struct sk_buff *skb,
goto error;
}
- if (i == (skb_shinfo(skb)->nr_frags - 1)) {
+ if (i == nr_frags - 1) {
/* Last descriptor */
tx_desc->command = MVNETA_TXD_L_DESC | MVNETA_TXD_Z_PAD;
-
txq->tx_skb[txq->txq_put_index] = skb;
-
- mvneta_txq_inc_put(txq);
} else {
/* Descriptor in the middle: Not First, Not Last */
tx_desc->command = 0;
-
txq->tx_skb[txq->txq_put_index] = NULL;
- mvneta_txq_inc_put(txq);
}
+ mvneta_txq_inc_put(txq);
}
return 0;
@@ -1584,15 +1719,18 @@ static int mvneta_tx(struct sk_buff *skb, struct net_device *dev)
u16 txq_id = skb_get_queue_mapping(skb);
struct mvneta_tx_queue *txq = &pp->txqs[txq_id];
struct mvneta_tx_desc *tx_desc;
- struct netdev_queue *nq;
int frags = 0;
u32 tx_cmd;
if (!netif_running(dev))
goto out;
+ if (skb_is_gso(skb)) {
+ frags = mvneta_tx_tso(skb, dev, txq);
+ goto out;
+ }
+
frags = skb_shinfo(skb)->nr_frags + 1;
- nq = netdev_get_tx_queue(dev, txq_id);
/* Get a descriptor for the first part of the packet */
tx_desc = mvneta_txq_next_desc_get(txq);
@@ -1635,15 +1773,16 @@ static int mvneta_tx(struct sk_buff *skb, struct net_device *dev)
}
}
- txq->count += frags;
- mvneta_txq_pend_desc_add(pp, txq, frags);
-
- if (txq->size - txq->count < MAX_SKB_FRAGS + 1)
- netif_tx_stop_queue(nq);
-
out:
if (frags > 0) {
struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats);
+ struct netdev_queue *nq = netdev_get_tx_queue(dev, txq_id);
+
+ txq->count += frags;
+ mvneta_txq_pend_desc_add(pp, txq, frags);
+
+ if (txq->count >= txq->tx_stop_threshold)
+ netif_tx_stop_queue(nq);
u64_stats_update_begin(&stats->syncp);
stats->tx_packets++;
@@ -2003,7 +2142,7 @@ static void mvneta_tx_reset(struct mvneta_port *pp)
{
int queue;
- /* free the skb's in the hal tx ring */
+ /* free the skb's in the tx ring */
for (queue = 0; queue < txq_number; queue++)
mvneta_txq_done_force(pp, &pp->txqs[queue]);
@@ -2081,6 +2220,14 @@ static int mvneta_txq_init(struct mvneta_port *pp,
{
txq->size = pp->tx_ring_size;
+ /* A queue must always have room for at least one skb.
+ * Therefore, stop the queue when the free entries reaches
+ * the maximum number of descriptors per skb.
+ */
+ txq->tx_stop_threshold = txq->size - MVNETA_MAX_SKB_DESCS;
+ txq->tx_wake_threshold = txq->tx_stop_threshold / 2;
+
+
/* Allocate memory for TX descriptors */
txq->descs = dma_alloc_coherent(pp->dev->dev.parent,
txq->size * MVNETA_DESC_ALIGNED_SIZE,
@@ -2109,6 +2256,18 @@ static int mvneta_txq_init(struct mvneta_port *pp,
txq->descs, txq->descs_phys);
return -ENOMEM;
}
+
+ /* Allocate DMA buffers for TSO MAC/IP/TCP headers */
+ txq->tso_hdrs = dma_alloc_coherent(pp->dev->dev.parent,
+ txq->size * TSO_HEADER_SIZE,
+ &txq->tso_hdrs_phys, GFP_KERNEL);
+ if (txq->tso_hdrs == NULL) {
+ kfree(txq->tx_skb);
+ dma_free_coherent(pp->dev->dev.parent,
+ txq->size * MVNETA_DESC_ALIGNED_SIZE,
+ txq->descs, txq->descs_phys);
+ return -ENOMEM;
+ }
mvneta_tx_done_pkts_coal_set(pp, txq, txq->done_pkts_coal);
return 0;
@@ -2120,6 +2279,10 @@ static void mvneta_txq_deinit(struct mvneta_port *pp,
{
kfree(txq->tx_skb);
+ if (txq->tso_hdrs)
+ dma_free_coherent(pp->dev->dev.parent,
+ txq->size * TSO_HEADER_SIZE,
+ txq->tso_hdrs, txq->tso_hdrs_phys);
if (txq->descs)
dma_free_coherent(pp->dev->dev.parent,
txq->size * MVNETA_DESC_ALIGNED_SIZE,
@@ -2279,24 +2442,28 @@ static int mvneta_change_mtu(struct net_device *dev, int mtu)
return 0;
/* The interface is running, so we have to force a
- * reallocation of the RXQs
+ * reallocation of the queues
*/
mvneta_stop_dev(pp);
mvneta_cleanup_txqs(pp);
mvneta_cleanup_rxqs(pp);
- pp->pkt_size = MVNETA_RX_PKT_SIZE(pp->dev->mtu);
+ pp->pkt_size = MVNETA_RX_PKT_SIZE(dev->mtu);
pp->frag_size = SKB_DATA_ALIGN(MVNETA_RX_BUF_SIZE(pp->pkt_size)) +
SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
ret = mvneta_setup_rxqs(pp);
if (ret) {
- netdev_err(pp->dev, "unable to setup rxqs after MTU change\n");
+ netdev_err(dev, "unable to setup rxqs after MTU change\n");
return ret;
}
- mvneta_setup_txqs(pp);
+ ret = mvneta_setup_txqs(pp);
+ if (ret) {
+ netdev_err(dev, "unable to setup txqs after MTU change\n");
+ return ret;
+ }
mvneta_start_dev(pp);
mvneta_port_up(pp);
@@ -2323,22 +2490,19 @@ static void mvneta_get_mac_addr(struct mvneta_port *pp, unsigned char *addr)
static int mvneta_set_mac_addr(struct net_device *dev, void *addr)
{
struct mvneta_port *pp = netdev_priv(dev);
- u8 *mac = addr + 2;
- int i;
-
- if (netif_running(dev))
- return -EBUSY;
+ struct sockaddr *sockaddr = addr;
+ int ret;
+ ret = eth_prepare_mac_addr_change(dev, addr);
+ if (ret < 0)
+ return ret;
/* Remove previous address table entry */
mvneta_mac_addr_set(pp, dev->dev_addr, -1);
/* Set new addr in hw */
- mvneta_mac_addr_set(pp, mac, rxq_def);
-
- /* Set addr in the device */
- for (i = 0; i < ETH_ALEN; i++)
- dev->dev_addr[i] = mac[i];
+ mvneta_mac_addr_set(pp, sockaddr->sa_data, rxq_def);
+ eth_commit_mac_addr_change(dev, addr);
return 0;
}
@@ -2433,8 +2597,6 @@ static int mvneta_open(struct net_device *dev)
struct mvneta_port *pp = netdev_priv(dev);
int ret;
- mvneta_mac_addr_set(pp, dev->dev_addr, rxq_def);
-
pp->pkt_size = MVNETA_RX_PKT_SIZE(pp->dev->mtu);
pp->frag_size = SKB_DATA_ALIGN(MVNETA_RX_BUF_SIZE(pp->pkt_size)) +
SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
@@ -2600,8 +2762,12 @@ static int mvneta_ethtool_set_ringparam(struct net_device *dev,
return -EINVAL;
pp->rx_ring_size = ring->rx_pending < MVNETA_MAX_RXD ?
ring->rx_pending : MVNETA_MAX_RXD;
- pp->tx_ring_size = ring->tx_pending < MVNETA_MAX_TXD ?
- ring->tx_pending : MVNETA_MAX_TXD;
+
+ pp->tx_ring_size = clamp_t(u16, ring->tx_pending,
+ MVNETA_MAX_SKB_DESCS * 2, MVNETA_MAX_TXD);
+ if (pp->tx_ring_size != ring->tx_pending)
+ netdev_warn(dev, "TX queue size set to %u (requested %u)\n",
+ pp->tx_ring_size, ring->tx_pending);
if (netif_running(dev)) {
mvneta_stop(dev);
@@ -2638,7 +2804,7 @@ const struct ethtool_ops mvneta_eth_tool_ops = {
};
/* Initialize hw */
-static int mvneta_init(struct mvneta_port *pp, int phy_addr)
+static int mvneta_init(struct device *dev, struct mvneta_port *pp)
{
int queue;
@@ -2648,8 +2814,8 @@ static int mvneta_init(struct mvneta_port *pp, int phy_addr)
/* Set port default values */
mvneta_defaults_set(pp);
- pp->txqs = kzalloc(txq_number * sizeof(struct mvneta_tx_queue),
- GFP_KERNEL);
+ pp->txqs = devm_kcalloc(dev, txq_number, sizeof(struct mvneta_tx_queue),
+ GFP_KERNEL);
if (!pp->txqs)
return -ENOMEM;
@@ -2661,12 +2827,10 @@ static int mvneta_init(struct mvneta_port *pp, int phy_addr)
txq->done_pkts_coal = MVNETA_TXDONE_COAL_PKTS;
}
- pp->rxqs = kzalloc(rxq_number * sizeof(struct mvneta_rx_queue),
- GFP_KERNEL);
- if (!pp->rxqs) {
- kfree(pp->txqs);
+ pp->rxqs = devm_kcalloc(dev, rxq_number, sizeof(struct mvneta_rx_queue),
+ GFP_KERNEL);
+ if (!pp->rxqs)
return -ENOMEM;
- }
/* Create Rx descriptor rings */
for (queue = 0; queue < rxq_number; queue++) {
@@ -2680,12 +2844,6 @@ static int mvneta_init(struct mvneta_port *pp, int phy_addr)
return 0;
}
-static void mvneta_deinit(struct mvneta_port *pp)
-{
- kfree(pp->txqs);
- kfree(pp->rxqs);
-}
-
/* platform glue : initialize decoding windows */
static void mvneta_conf_mbus_windows(struct mvneta_port *pp,
const struct mbus_dram_target_info *dram)
@@ -2768,7 +2926,6 @@ static int mvneta_probe(struct platform_device *pdev)
struct resource *res;
struct device_node *dn = pdev->dev.of_node;
struct device_node *phy_node;
- u32 phy_addr;
struct mvneta_port *pp;
struct net_device *dev;
const char *dt_mac_addr;
@@ -2797,9 +2954,22 @@ static int mvneta_probe(struct platform_device *pdev)
phy_node = of_parse_phandle(dn, "phy", 0);
if (!phy_node) {
- dev_err(&pdev->dev, "no associated PHY\n");
- err = -ENODEV;
- goto err_free_irq;
+ if (!of_phy_is_fixed_link(dn)) {
+ dev_err(&pdev->dev, "no PHY specified\n");
+ err = -ENODEV;
+ goto err_free_irq;
+ }
+
+ err = of_phy_register_fixed_link(dn);
+ if (err < 0) {
+ dev_err(&pdev->dev, "cannot register fixed PHY\n");
+ goto err_free_irq;
+ }
+
+ /* In the case of a fixed PHY, the DT node associated
+ * to the PHY is the Ethernet MAC DT node.
+ */
+ phy_node = dn;
}
phy_mode = of_get_phy_mode(dn);
@@ -2813,11 +2983,9 @@ static int mvneta_probe(struct platform_device *pdev)
dev->watchdog_timeo = 5 * HZ;
dev->netdev_ops = &mvneta_netdev_ops;
- SET_ETHTOOL_OPS(dev, &mvneta_eth_tool_ops);
+ dev->ethtool_ops = &mvneta_eth_tool_ops;
pp = netdev_priv(dev);
-
- pp->weight = MVNETA_RX_POLL_WEIGHT;
pp->phy_node = phy_node;
pp->phy_interface = phy_mode;
@@ -2864,33 +3032,32 @@ static int mvneta_probe(struct platform_device *pdev)
pp->dev = dev;
SET_NETDEV_DEV(dev, &pdev->dev);
- err = mvneta_init(pp, phy_addr);
- if (err < 0) {
- dev_err(&pdev->dev, "can't init eth hal\n");
+ err = mvneta_init(&pdev->dev, pp);
+ if (err < 0)
goto err_free_stats;
- }
err = mvneta_port_power_up(pp, phy_mode);
if (err < 0) {
dev_err(&pdev->dev, "can't power up port\n");
- goto err_deinit;
+ goto err_free_stats;
}
dram_target_info = mv_mbus_dram_info();
if (dram_target_info)
mvneta_conf_mbus_windows(pp, dram_target_info);
- netif_napi_add(dev, &pp->napi, mvneta_poll, pp->weight);
+ netif_napi_add(dev, &pp->napi, mvneta_poll, NAPI_POLL_WEIGHT);
- dev->features = NETIF_F_SG | NETIF_F_IP_CSUM;
- dev->hw_features |= NETIF_F_SG | NETIF_F_IP_CSUM;
- dev->vlan_features |= NETIF_F_SG | NETIF_F_IP_CSUM;
+ dev->features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO;
+ dev->hw_features |= dev->features;
+ dev->vlan_features |= dev->features;
dev->priv_flags |= IFF_UNICAST_FLT;
+ dev->gso_max_segs = MVNETA_MAX_TSO_SEGS;
err = register_netdev(dev);
if (err < 0) {
dev_err(&pdev->dev, "failed to register\n");
- goto err_deinit;
+ goto err_free_stats;
}
netdev_info(dev, "Using %s mac address %pM\n", mac_from,
@@ -2900,8 +3067,6 @@ static int mvneta_probe(struct platform_device *pdev)
return 0;
-err_deinit:
- mvneta_deinit(pp);
err_free_stats:
free_percpu(pp->stats);
err_clk:
@@ -2920,7 +3085,6 @@ static int mvneta_remove(struct platform_device *pdev)
struct mvneta_port *pp = netdev_priv(dev);
unregister_netdev(dev);
- mvneta_deinit(pp);
clk_disable_unprepare(pp->clk);
free_percpu(pp->stats);
irq_dispose_mapping(dev->irq);
diff --git a/drivers/net/ethernet/marvell/pxa168_eth.c b/drivers/net/ethernet/marvell/pxa168_eth.c
index b358c2f6f4bd..8f5aa7c62b18 100644
--- a/drivers/net/ethernet/marvell/pxa168_eth.c
+++ b/drivers/net/ethernet/marvell/pxa168_eth.c
@@ -1488,7 +1488,7 @@ static int pxa168_eth_probe(struct platform_device *pdev)
dev->netdev_ops = &pxa168_eth_netdev_ops;
dev->watchdog_timeo = 2 * HZ;
dev->base_addr = 0;
- SET_ETHTOOL_OPS(dev, &pxa168_ethtool_ops);
+ dev->ethtool_ops = &pxa168_ethtool_ops;
INIT_WORK(&pep->tx_timeout_task, pxa168_eth_tx_timeout_task);
diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c
index b81106451a0a..69693384b58c 100644
--- a/drivers/net/ethernet/marvell/sky2.c
+++ b/drivers/net/ethernet/marvell/sky2.c
@@ -4760,7 +4760,7 @@ static struct net_device *sky2_init_netdev(struct sky2_hw *hw, unsigned port,
SET_NETDEV_DEV(dev, &hw->pdev->dev);
dev->irq = hw->pdev->irq;
- SET_ETHTOOL_OPS(dev, &sky2_ethtool_ops);
+ dev->ethtool_ops = &sky2_ethtool_ops;
dev->watchdog_timeo = TX_WATCHDOG;
dev->netdev_ops = &sky2_netdev_ops[port];
diff --git a/drivers/net/ethernet/mellanox/mlx4/cmd.c b/drivers/net/ethernet/mellanox/mlx4/cmd.c
index 29b616990e52..5d940a26055c 100644
--- a/drivers/net/ethernet/mellanox/mlx4/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx4/cmd.c
@@ -212,8 +212,7 @@ static int mlx4_comm_cmd_poll(struct mlx4_dev *dev, u8 cmd, u16 param,
/* First, verify that the master reports correct status */
if (comm_pending(dev)) {
- mlx4_warn(dev, "Communication channel is not idle."
- "my toggle is %d (cmd:0x%x)\n",
+ mlx4_warn(dev, "Communication channel is not idle - my toggle is %d (cmd:0x%x)\n",
priv->cmd.comm_toggle, cmd);
return -EAGAIN;
}
@@ -422,9 +421,8 @@ static int mlx4_slave_cmd(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
*out_param =
be64_to_cpu(vhcr->out_param);
else {
- mlx4_err(dev, "response expected while"
- "output mailbox is NULL for "
- "command 0x%x\n", op);
+ mlx4_err(dev, "response expected while output mailbox is NULL for command 0x%x\n",
+ op);
vhcr->status = CMD_STAT_BAD_PARAM;
}
}
@@ -439,16 +437,15 @@ static int mlx4_slave_cmd(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
*out_param =
be64_to_cpu(vhcr->out_param);
else {
- mlx4_err(dev, "response expected while"
- "output mailbox is NULL for "
- "command 0x%x\n", op);
+ mlx4_err(dev, "response expected while output mailbox is NULL for command 0x%x\n",
+ op);
vhcr->status = CMD_STAT_BAD_PARAM;
}
}
ret = mlx4_status_to_errno(vhcr->status);
} else
- mlx4_err(dev, "failed execution of VHCR_POST command"
- "opcode 0x%x\n", op);
+ mlx4_err(dev, "failed execution of VHCR_POST command opcode 0x%x\n",
+ op);
}
mutex_unlock(&priv->cmd.slave_cmd_mutex);
@@ -476,6 +473,13 @@ static int mlx4_cmd_poll(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
goto out;
}
+ if (out_is_imm && !out_param) {
+ mlx4_err(dev, "response expected while output mailbox is NULL for command 0x%x\n",
+ op);
+ err = -EINVAL;
+ goto out;
+ }
+
err = mlx4_cmd_post(dev, in_param, out_param ? *out_param : 0,
in_modifier, op_modifier, op, CMD_POLL_TOKEN, 0);
if (err)
@@ -554,6 +558,13 @@ static int mlx4_cmd_wait(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
cmd->free_head = context->next;
spin_unlock(&cmd->context_lock);
+ if (out_is_imm && !out_param) {
+ mlx4_err(dev, "response expected while output mailbox is NULL for command 0x%x\n",
+ op);
+ err = -EINVAL;
+ goto out;
+ }
+
init_completion(&context->done);
mlx4_cmd_post(dev, in_param, out_param ? *out_param : 0,
@@ -625,9 +636,8 @@ static int mlx4_ACCESS_MEM(struct mlx4_dev *dev, u64 master_addr,
if ((slave_addr & 0xfff) | (master_addr & 0xfff) |
(slave & ~0x7f) | (size & 0xff)) {
- mlx4_err(dev, "Bad access mem params - slave_addr:0x%llx "
- "master_addr:0x%llx slave_id:%d size:%d\n",
- slave_addr, master_addr, slave, size);
+ mlx4_err(dev, "Bad access mem params - slave_addr:0x%llx master_addr:0x%llx slave_id:%d size:%d\n",
+ slave_addr, master_addr, slave, size);
return -EINVAL;
}
@@ -1422,8 +1432,8 @@ static int mlx4_master_process_vhcr(struct mlx4_dev *dev, int slave,
ALIGN(sizeof(struct mlx4_vhcr_cmd),
MLX4_ACCESS_MEM_ALIGN), 1);
if (ret) {
- mlx4_err(dev, "%s:Failed reading vhcr"
- "ret: 0x%x\n", __func__, ret);
+ mlx4_err(dev, "%s: Failed reading vhcr ret: 0x%x\n",
+ __func__, ret);
kfree(vhcr);
return ret;
}
@@ -1474,9 +1484,8 @@ static int mlx4_master_process_vhcr(struct mlx4_dev *dev, int slave,
/* Apply permission and bound checks if applicable */
if (cmd->verify && cmd->verify(dev, slave, vhcr, inbox)) {
- mlx4_warn(dev, "Command:0x%x from slave: %d failed protection "
- "checks for resource_id:%d\n", vhcr->op, slave,
- vhcr->in_modifier);
+ mlx4_warn(dev, "Command:0x%x from slave: %d failed protection checks for resource_id:%d\n",
+ vhcr->op, slave, vhcr->in_modifier);
vhcr_cmd->status = CMD_STAT_BAD_OP;
goto out_status;
}
@@ -1515,8 +1524,7 @@ static int mlx4_master_process_vhcr(struct mlx4_dev *dev, int slave,
}
if (err) {
- mlx4_warn(dev, "vhcr command:0x%x slave:%d failed with"
- " error:%d, status %d\n",
+ mlx4_warn(dev, "vhcr command:0x%x slave:%d failed with error:%d, status %d\n",
vhcr->op, slave, vhcr->errno, err);
vhcr_cmd->status = mlx4_errno_to_status(err);
goto out_status;
@@ -1550,8 +1558,8 @@ out_status:
__func__);
else if (vhcr->e_bit &&
mlx4_GEN_EQE(dev, slave, &priv->mfunc.master.cmd_eqe))
- mlx4_warn(dev, "Failed to generate command completion "
- "eqe for slave %d\n", slave);
+ mlx4_warn(dev, "Failed to generate command completion eqe for slave %d\n",
+ slave);
}
out:
@@ -1590,8 +1598,9 @@ static int mlx4_master_immediate_activate_vlan_qos(struct mlx4_priv *priv,
mlx4_dbg(dev, "updating immediately admin params slave %d port %d\n",
slave, port);
- mlx4_dbg(dev, "vlan %d QoS %d link down %d\n", vp_admin->default_vlan,
- vp_admin->default_qos, vp_admin->link_state);
+ mlx4_dbg(dev, "vlan %d QoS %d link down %d\n",
+ vp_admin->default_vlan, vp_admin->default_qos,
+ vp_admin->link_state);
work = kzalloc(sizeof(*work), GFP_KERNEL);
if (!work)
@@ -1604,7 +1613,7 @@ static int mlx4_master_immediate_activate_vlan_qos(struct mlx4_priv *priv,
&admin_vlan_ix);
if (err) {
kfree(work);
- mlx4_warn((&priv->dev),
+ mlx4_warn(&priv->dev,
"No vlan resources slave %d, port %d\n",
slave, port);
return err;
@@ -1613,7 +1622,7 @@ static int mlx4_master_immediate_activate_vlan_qos(struct mlx4_priv *priv,
admin_vlan_ix = NO_INDX;
}
work->flags |= MLX4_VF_IMMED_VLAN_FLAG_VLAN;
- mlx4_dbg((&(priv->dev)),
+ mlx4_dbg(&priv->dev,
"alloc vlan %d idx %d slave %d port %d\n",
(int)(vp_admin->default_vlan),
admin_vlan_ix, slave, port);
@@ -1676,12 +1685,12 @@ static int mlx4_master_activate_admin_state(struct mlx4_priv *priv, int slave)
vp_admin->default_vlan, &(vp_oper->vlan_idx));
if (err) {
vp_oper->vlan_idx = NO_INDX;
- mlx4_warn((&priv->dev),
+ mlx4_warn(&priv->dev,
"No vlan resorces slave %d, port %d\n",
slave, port);
return err;
}
- mlx4_dbg((&(priv->dev)), "alloc vlan %d idx %d slave %d port %d\n",
+ mlx4_dbg(&priv->dev, "alloc vlan %d idx %d slave %d port %d\n",
(int)(vp_oper->state.default_vlan),
vp_oper->vlan_idx, slave, port);
}
@@ -1692,12 +1701,12 @@ static int mlx4_master_activate_admin_state(struct mlx4_priv *priv, int slave)
if (0 > vp_oper->mac_idx) {
err = vp_oper->mac_idx;
vp_oper->mac_idx = NO_INDX;
- mlx4_warn((&priv->dev),
+ mlx4_warn(&priv->dev,
"No mac resorces slave %d, port %d\n",
slave, port);
return err;
}
- mlx4_dbg((&(priv->dev)), "alloc mac %llx idx %d slave %d port %d\n",
+ mlx4_dbg(&priv->dev, "alloc mac %llx idx %d slave %d port %d\n",
vp_oper->state.mac, vp_oper->mac_idx, slave, port);
}
}
@@ -1748,8 +1757,8 @@ static void mlx4_master_do_cmd(struct mlx4_dev *dev, int slave, u8 cmd,
slave_state[slave].comm_toggle ^= 1;
reply = (u32) slave_state[slave].comm_toggle << 31;
if (toggle != slave_state[slave].comm_toggle) {
- mlx4_warn(dev, "Incorrect toggle %d from slave %d. *** MASTER"
- "STATE COMPROMISIED ***\n", toggle, slave);
+ mlx4_warn(dev, "Incorrect toggle %d from slave %d. *** MASTER STATE COMPROMISED ***\n",
+ toggle, slave);
goto reset_slave;
}
if (cmd == MLX4_COMM_CMD_RESET) {
@@ -1776,8 +1785,8 @@ static void mlx4_master_do_cmd(struct mlx4_dev *dev, int slave, u8 cmd,
/*command from slave in the middle of FLR*/
if (cmd != MLX4_COMM_CMD_RESET &&
MLX4_COMM_CMD_FLR == slave_state[slave].last_cmd) {
- mlx4_warn(dev, "slave:%d is Trying to run cmd(0x%x) "
- "in the middle of FLR\n", slave, cmd);
+ mlx4_warn(dev, "slave:%d is Trying to run cmd(0x%x) in the middle of FLR\n",
+ slave, cmd);
return;
}
@@ -1815,8 +1824,8 @@ static void mlx4_master_do_cmd(struct mlx4_dev *dev, int slave, u8 cmd,
mutex_lock(&priv->cmd.slave_cmd_mutex);
if (mlx4_master_process_vhcr(dev, slave, NULL)) {
- mlx4_err(dev, "Failed processing vhcr for slave:%d,"
- " resetting slave.\n", slave);
+ mlx4_err(dev, "Failed processing vhcr for slave:%d, resetting slave\n",
+ slave);
mutex_unlock(&priv->cmd.slave_cmd_mutex);
goto reset_slave;
}
@@ -1833,8 +1842,7 @@ static void mlx4_master_do_cmd(struct mlx4_dev *dev, int slave, u8 cmd,
is_going_down = 1;
spin_unlock_irqrestore(&priv->mfunc.master.slave_state_lock, flags);
if (is_going_down) {
- mlx4_warn(dev, "Slave is going down aborting command(%d)"
- " executing from slave:%d\n",
+ mlx4_warn(dev, "Slave is going down aborting command(%d) executing from slave:%d\n",
cmd, slave);
return;
}
@@ -1897,10 +1905,9 @@ void mlx4_master_comm_channel(struct work_struct *work)
if (toggle != slt) {
if (master->slave_state[slave].comm_toggle
!= slt) {
- printk(KERN_INFO "slave %d out of sync."
- " read toggle %d, state toggle %d. "
- "Resynching.\n", slave, slt,
- master->slave_state[slave].comm_toggle);
+ pr_info("slave %d out of sync. read toggle %d, state toggle %d. Resynching.\n",
+ slave, slt,
+ master->slave_state[slave].comm_toggle);
master->slave_state[slave].comm_toggle =
slt;
}
@@ -1913,8 +1920,7 @@ void mlx4_master_comm_channel(struct work_struct *work)
}
if (reported && reported != served)
- mlx4_warn(dev, "Got command event with bitmask from %d slaves"
- " but %d were served\n",
+ mlx4_warn(dev, "Got command event with bitmask from %d slaves but %d were served\n",
reported, served);
if (mlx4_ARM_COMM_CHANNEL(dev))
@@ -1970,7 +1976,7 @@ int mlx4_multi_func_init(struct mlx4_dev *dev)
ioremap(pci_resource_start(dev->pdev, 2) +
MLX4_SLAVE_COMM_BASE, MLX4_COMM_PAGESIZE);
if (!priv->mfunc.comm) {
- mlx4_err(dev, "Couldn't map communication vector.\n");
+ mlx4_err(dev, "Couldn't map communication vector\n");
goto err_vhcr;
}
@@ -2097,7 +2103,7 @@ int mlx4_cmd_init(struct mlx4_dev *dev)
priv->cmd.hcr = ioremap(pci_resource_start(dev->pdev, 0) +
MLX4_HCR_BASE, MLX4_HCR_SIZE);
if (!priv->cmd.hcr) {
- mlx4_err(dev, "Couldn't map command register.\n");
+ mlx4_err(dev, "Couldn't map command register\n");
return -ENOMEM;
}
}
@@ -2498,11 +2504,12 @@ int mlx4_get_vf_config(struct mlx4_dev *dev, int port, int vf, struct ifla_vf_in
ivf->mac[4] = ((s_info->mac >> (1*8)) & 0xff);
ivf->mac[5] = ((s_info->mac) & 0xff);
- ivf->vlan = s_info->default_vlan;
- ivf->qos = s_info->default_qos;
- ivf->tx_rate = s_info->tx_rate;
- ivf->spoofchk = s_info->spoofchk;
- ivf->linkstate = s_info->link_state;
+ ivf->vlan = s_info->default_vlan;
+ ivf->qos = s_info->default_qos;
+ ivf->max_tx_rate = s_info->tx_rate;
+ ivf->min_tx_rate = 0;
+ ivf->spoofchk = s_info->spoofchk;
+ ivf->linkstate = s_info->link_state;
return 0;
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/cq.c b/drivers/net/ethernet/mellanox/mlx4/cq.c
index c90cde5b4aee..80f725228f5b 100644
--- a/drivers/net/ethernet/mellanox/mlx4/cq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/cq.c
@@ -293,6 +293,9 @@ int mlx4_cq_alloc(struct mlx4_dev *dev, int nent,
atomic_set(&cq->refcount, 1);
init_completion(&cq->free);
+ cq->irq = priv->eq_table.eq[cq->vector].irq;
+ cq->irq_affinity_change = false;
+
return 0;
err_radix:
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_cq.c b/drivers/net/ethernet/mellanox/mlx4/en_cq.c
index c2cd8d31bcad..4b2130760eed 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_cq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_cq.c
@@ -125,8 +125,7 @@ int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq,
&cq->vector)) {
cq->vector = (cq->ring + 1 + priv->port)
% mdev->dev->caps.num_comp_vectors;
- mlx4_warn(mdev, "Failed Assigning an EQ to "
- "%s ,Falling back to legacy EQ's\n",
+ mlx4_warn(mdev, "Failed assigning an EQ to %s, falling back to legacy EQ's\n",
name);
}
}
@@ -164,6 +163,13 @@ int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq,
netif_napi_add(cq->dev, &cq->napi, mlx4_en_poll_tx_cq,
NAPI_POLL_WEIGHT);
} else {
+ struct mlx4_en_rx_ring *ring = priv->rx_ring[cq->ring];
+
+ err = irq_set_affinity_hint(cq->mcq.irq,
+ ring->affinity_mask);
+ if (err)
+ mlx4_warn(mdev, "Failed setting affinity hint\n");
+
netif_napi_add(cq->dev, &cq->napi, mlx4_en_poll_rx_cq, 64);
napi_hash_add(&cq->napi);
}
@@ -180,8 +186,11 @@ void mlx4_en_destroy_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq **pcq)
mlx4_en_unmap_buffer(&cq->wqres.buf);
mlx4_free_hwq_res(mdev->dev, &cq->wqres, cq->buf_size);
- if (priv->mdev->dev->caps.comp_pool && cq->vector)
+ if (priv->mdev->dev->caps.comp_pool && cq->vector) {
+ if (!cq->is_tx)
+ irq_set_affinity_hint(cq->mcq.irq, NULL);
mlx4_release_eq(priv->mdev->dev, cq->vector);
+ }
cq->vector = 0;
cq->buf_size = 0;
cq->buf = NULL;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
index 3e8d33605fe7..fa1a069e14e6 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
@@ -378,8 +378,8 @@ static int mlx4_en_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
ethtool_cmd_speed_set(cmd, priv->port_state.link_speed);
cmd->duplex = DUPLEX_FULL;
} else {
- ethtool_cmd_speed_set(cmd, -1);
- cmd->duplex = -1;
+ ethtool_cmd_speed_set(cmd, SPEED_UNKNOWN);
+ cmd->duplex = DUPLEX_UNKNOWN;
}
if (trans_type > 0 && trans_type <= 0xC) {
@@ -564,7 +564,7 @@ static u32 mlx4_en_get_rxfh_indir_size(struct net_device *dev)
return priv->rx_ring_num;
}
-static int mlx4_en_get_rxfh_indir(struct net_device *dev, u32 *ring_index)
+static int mlx4_en_get_rxfh(struct net_device *dev, u32 *ring_index, u8 *key)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
struct mlx4_en_rss_map *rss_map = &priv->rss_map;
@@ -582,8 +582,8 @@ static int mlx4_en_get_rxfh_indir(struct net_device *dev, u32 *ring_index)
return err;
}
-static int mlx4_en_set_rxfh_indir(struct net_device *dev,
- const u32 *ring_index)
+static int mlx4_en_set_rxfh(struct net_device *dev, const u32 *ring_index,
+ const u8 *key)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
struct mlx4_en_dev *mdev = priv->mdev;
@@ -925,13 +925,13 @@ static int mlx4_en_flow_replace(struct net_device *dev,
qpn = cmd->fs.ring_cookie & (EN_ETHTOOL_QP_ATTACH - 1);
} else {
if (cmd->fs.ring_cookie >= priv->rx_ring_num) {
- en_warn(priv, "rxnfc: RX ring (%llu) doesn't exist.\n",
+ en_warn(priv, "rxnfc: RX ring (%llu) doesn't exist\n",
cmd->fs.ring_cookie);
return -EINVAL;
}
qpn = priv->rss_map.qps[cmd->fs.ring_cookie].qpn;
if (!qpn) {
- en_warn(priv, "rxnfc: RX ring (%llu) is inactive.\n",
+ en_warn(priv, "rxnfc: RX ring (%llu) is inactive\n",
cmd->fs.ring_cookie);
return -EINVAL;
}
@@ -956,7 +956,7 @@ static int mlx4_en_flow_replace(struct net_device *dev,
}
err = mlx4_flow_attach(priv->mdev->dev, &rule, &reg_id);
if (err) {
- en_err(priv, "Fail to attach network rule at location %d.\n",
+ en_err(priv, "Fail to attach network rule at location %d\n",
cmd->fs.location);
goto out_free_list;
}
@@ -1121,7 +1121,7 @@ static int mlx4_en_set_channels(struct net_device *dev,
{
struct mlx4_en_priv *priv = netdev_priv(dev);
struct mlx4_en_dev *mdev = priv->mdev;
- int port_up;
+ int port_up = 0;
int err = 0;
if (channel->other_count || channel->combined_count ||
@@ -1151,7 +1151,8 @@ static int mlx4_en_set_channels(struct net_device *dev,
netif_set_real_num_tx_queues(dev, priv->tx_ring_num);
netif_set_real_num_rx_queues(dev, priv->rx_ring_num);
- mlx4_en_setup_tc(dev, MLX4_EN_NUM_UP);
+ if (dev->num_tc)
+ mlx4_en_setup_tc(dev, MLX4_EN_NUM_UP);
en_warn(priv, "Using %d TX rings\n", priv->tx_ring_num);
en_warn(priv, "Using %d RX rings\n", priv->rx_ring_num);
@@ -1223,8 +1224,8 @@ const struct ethtool_ops mlx4_en_ethtool_ops = {
.get_rxnfc = mlx4_en_get_rxnfc,
.set_rxnfc = mlx4_en_set_rxnfc,
.get_rxfh_indir_size = mlx4_en_get_rxfh_indir_size,
- .get_rxfh_indir = mlx4_en_get_rxfh_indir,
- .set_rxfh_indir = mlx4_en_set_rxfh_indir,
+ .get_rxfh = mlx4_en_get_rxfh,
+ .set_rxfh = mlx4_en_set_rxfh,
.get_channels = mlx4_en_get_channels,
.set_channels = mlx4_en_set_channels,
.get_ts_info = mlx4_en_get_ts_info,
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_main.c b/drivers/net/ethernet/mellanox/mlx4/en_main.c
index 0c59d4fe7e3a..f953c1d7eae6 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_main.c
@@ -133,7 +133,7 @@ static int mlx4_en_get_profile(struct mlx4_en_dev *mdev)
MLX4_EN_MAX_TX_RING_P_UP);
if (params->udp_rss && !(mdev->dev->caps.flags
& MLX4_DEV_CAP_FLAG_UDP_RSS)) {
- mlx4_warn(mdev, "UDP RSS is not supported on this device.\n");
+ mlx4_warn(mdev, "UDP RSS is not supported on this device\n");
params->udp_rss = 0;
}
for (i = 1; i <= MLX4_MAX_PORTS; i++) {
@@ -251,8 +251,7 @@ static void *mlx4_en_add(struct mlx4_dev *dev)
mdev->LSO_support = !!(dev->caps.flags & (1 << 15));
if (!mdev->LSO_support)
- mlx4_warn(mdev, "LSO not supported, please upgrade to later "
- "FW version to enable LSO\n");
+ mlx4_warn(mdev, "LSO not supported, please upgrade to later FW version to enable LSO\n");
if (mlx4_mr_alloc(mdev->dev, mdev->priv_pdn, 0, ~0ull,
MLX4_PERM_LOCAL_WRITE | MLX4_PERM_LOCAL_READ,
@@ -268,7 +267,7 @@ static void *mlx4_en_add(struct mlx4_dev *dev)
/* Build device profile according to supplied module parameters */
err = mlx4_en_get_profile(mdev);
if (err) {
- mlx4_err(mdev, "Bad module parameters, aborting.\n");
+ mlx4_err(mdev, "Bad module parameters, aborting\n");
goto err_mr;
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
index 7e4b1720c3d1..7d4fb7bf2593 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
@@ -130,7 +130,7 @@ static enum mlx4_net_trans_rule_id mlx4_ip_proto_to_trans_rule_id(u8 ip_proto)
case IPPROTO_TCP:
return MLX4_NET_TRANS_RULE_ID_TCP;
default:
- return -EPROTONOSUPPORT;
+ return MLX4_NET_TRANS_RULE_NUM;
}
};
@@ -177,7 +177,7 @@ static void mlx4_en_filter_work(struct work_struct *work)
int rc;
__be64 mac_mask = cpu_to_be64(MLX4_MAC_MASK << 16);
- if (spec_tcp_udp.id < 0) {
+ if (spec_tcp_udp.id >= MLX4_NET_TRANS_RULE_NUM) {
en_warn(priv, "RFS: ignoring unsupported ip protocol (%d)\n",
filter->ip_proto);
goto ignore;
@@ -770,11 +770,12 @@ static int mlx4_en_do_set_mac(struct mlx4_en_priv *priv)
priv->dev->dev_addr, priv->prev_mac);
if (err)
en_err(priv, "Failed changing HW MAC address\n");
- memcpy(priv->prev_mac, priv->dev->dev_addr,
- sizeof(priv->prev_mac));
} else
en_dbg(HW, priv, "Port is down while registering mac, exiting...\n");
+ memcpy(priv->prev_mac, priv->dev->dev_addr,
+ sizeof(priv->prev_mac));
+
return err;
}
@@ -788,9 +789,8 @@ static int mlx4_en_set_mac(struct net_device *dev, void *addr)
if (!is_valid_ether_addr(saddr->sa_data))
return -EADDRNOTAVAIL;
- memcpy(dev->dev_addr, saddr->sa_data, ETH_ALEN);
-
mutex_lock(&mdev->state_lock);
+ memcpy(dev->dev_addr, saddr->sa_data, ETH_ALEN);
err = mlx4_en_do_set_mac(priv);
mutex_unlock(&mdev->state_lock);
@@ -1526,6 +1526,27 @@ static void mlx4_en_linkstate(struct work_struct *work)
mutex_unlock(&mdev->state_lock);
}
+static int mlx4_en_init_affinity_hint(struct mlx4_en_priv *priv, int ring_idx)
+{
+ struct mlx4_en_rx_ring *ring = priv->rx_ring[ring_idx];
+ int numa_node = priv->mdev->dev->numa_node;
+ int ret = 0;
+
+ if (!zalloc_cpumask_var(&ring->affinity_mask, GFP_KERNEL))
+ return -ENOMEM;
+
+ ret = cpumask_set_cpu_local_first(ring_idx, numa_node,
+ ring->affinity_mask);
+ if (ret)
+ free_cpumask_var(ring->affinity_mask);
+
+ return ret;
+}
+
+static void mlx4_en_free_affinity_hint(struct mlx4_en_priv *priv, int ring_idx)
+{
+ free_cpumask_var(priv->rx_ring[ring_idx]->affinity_mask);
+}
int mlx4_en_start_port(struct net_device *dev)
{
@@ -1567,17 +1588,25 @@ int mlx4_en_start_port(struct net_device *dev)
mlx4_en_cq_init_lock(cq);
+ err = mlx4_en_init_affinity_hint(priv, i);
+ if (err) {
+ en_err(priv, "Failed preparing IRQ affinity hint\n");
+ goto cq_err;
+ }
+
err = mlx4_en_activate_cq(priv, cq, i);
if (err) {
en_err(priv, "Failed activating Rx CQ\n");
+ mlx4_en_free_affinity_hint(priv, i);
goto cq_err;
}
for (j = 0; j < cq->size; j++)
cq->buf[j].owner_sr_opcode = MLX4_CQE_OWNER_MASK;
err = mlx4_en_set_cq_moder(priv, cq);
if (err) {
- en_err(priv, "Failed setting cq moderation parameters");
+ en_err(priv, "Failed setting cq moderation parameters\n");
mlx4_en_deactivate_cq(priv, cq);
+ mlx4_en_free_affinity_hint(priv, i);
goto cq_err;
}
mlx4_en_arm_cq(priv, cq);
@@ -1615,7 +1644,7 @@ int mlx4_en_start_port(struct net_device *dev)
}
err = mlx4_en_set_cq_moder(priv, cq);
if (err) {
- en_err(priv, "Failed setting cq moderation parameters");
+ en_err(priv, "Failed setting cq moderation parameters\n");
mlx4_en_deactivate_cq(priv, cq);
goto tx_err;
}
@@ -1715,8 +1744,10 @@ rss_err:
mac_err:
mlx4_en_put_qp(priv);
cq_err:
- while (rx_index--)
+ while (rx_index--) {
mlx4_en_deactivate_cq(priv, priv->rx_cq[rx_index]);
+ mlx4_en_free_affinity_hint(priv, i);
+ }
for (i = 0; i < priv->rx_ring_num; i++)
mlx4_en_deactivate_rx_ring(priv, priv->rx_ring[i]);
@@ -1847,6 +1878,8 @@ void mlx4_en_stop_port(struct net_device *dev, int detach)
msleep(1);
mlx4_en_deactivate_rx_ring(priv, priv->rx_ring[i]);
mlx4_en_deactivate_cq(priv, cq);
+
+ mlx4_en_free_affinity_hint(priv, i);
}
}
@@ -2539,7 +2572,7 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
netif_set_real_num_tx_queues(dev, priv->tx_ring_num);
netif_set_real_num_rx_queues(dev, priv->rx_ring_num);
- SET_ETHTOOL_OPS(dev, &mlx4_en_ethtool_ops);
+ dev->ethtool_ops = &mlx4_en_ethtool_ops;
/*
* Set driver features
@@ -2594,8 +2627,8 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
prof->tx_pause, prof->tx_ppp,
prof->rx_pause, prof->rx_ppp);
if (err) {
- en_err(priv, "Failed setting port general configurations "
- "for port %d, with error %d\n", priv->port, err);
+ en_err(priv, "Failed setting port general configurations for port %d, with error %d\n",
+ priv->port, err);
goto out;
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
index 87857a6463eb..d2d415732d99 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
@@ -270,13 +270,11 @@ static int mlx4_en_fill_rx_buffers(struct mlx4_en_priv *priv)
ring->actual_size,
GFP_KERNEL)) {
if (ring->actual_size < MLX4_EN_MIN_RX_SIZE) {
- en_err(priv, "Failed to allocate "
- "enough rx buffers\n");
+ en_err(priv, "Failed to allocate enough rx buffers\n");
return -ENOMEM;
} else {
new_size = rounddown_pow_of_two(ring->actual_size);
- en_warn(priv, "Only %d buffers allocated "
- "reducing ring size to %d",
+ en_warn(priv, "Only %d buffers allocated reducing ring size to %d\n",
ring->actual_size, new_size);
goto reduce_rings;
}
@@ -685,10 +683,9 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
/* Drop packet on bad receive or bad checksum */
if (unlikely((cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) ==
MLX4_CQE_OPCODE_ERROR)) {
- en_err(priv, "CQE completed in error - vendor "
- "syndrom:%d syndrom:%d\n",
- ((struct mlx4_err_cqe *) cqe)->vendor_err_syndrome,
- ((struct mlx4_err_cqe *) cqe)->syndrome);
+ en_err(priv, "CQE completed in error - vendor syndrom:%d syndrom:%d\n",
+ ((struct mlx4_err_cqe *)cqe)->vendor_err_syndrome,
+ ((struct mlx4_err_cqe *)cqe)->syndrome);
goto next;
}
if (unlikely(cqe->badfcs_enc & MLX4_CQE_BAD_FCS)) {
@@ -898,10 +895,17 @@ int mlx4_en_poll_rx_cq(struct napi_struct *napi, int budget)
mlx4_en_cq_unlock_napi(cq);
/* If we used up all the quota - we're probably not done yet... */
- if (done == budget)
+ if (done == budget) {
INC_PERF_COUNTER(priv->pstats.napi_quota);
- else {
+ if (unlikely(cq->mcq.irq_affinity_change)) {
+ cq->mcq.irq_affinity_change = false;
+ napi_complete(napi);
+ mlx4_en_arm_cq(priv, cq);
+ return 0;
+ }
+ } else {
/* Done for now */
+ cq->mcq.irq_affinity_change = false;
napi_complete(napi);
mlx4_en_arm_cq(priv, cq);
}
@@ -944,8 +948,8 @@ void mlx4_en_calc_rx_buf(struct net_device *dev)
priv->rx_skb_size = eff_mtu;
priv->log_rx_info = ROUNDUP_LOG2(i * sizeof(struct mlx4_en_rx_alloc));
- en_dbg(DRV, priv, "Rx buffer scatter-list (effective-mtu:%d "
- "num_frags:%d):\n", eff_mtu, priv->num_frags);
+ en_dbg(DRV, priv, "Rx buffer scatter-list (effective-mtu:%d num_frags:%d):\n",
+ eff_mtu, priv->num_frags);
for (i = 0; i < priv->num_frags; i++) {
en_err(priv,
" frag:%d - size:%d prefix:%d align:%d stride:%d\n",
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
index bc0cc1eb214d..8be7483f8236 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
@@ -108,9 +108,9 @@ int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv,
ring->buf = ring->wqres.buf.direct.buf;
- en_dbg(DRV, priv, "Allocated TX ring (addr:%p) - buf:%p size:%d "
- "buf_size:%d dma:%llx\n", ring, ring->buf, ring->size,
- ring->buf_size, (unsigned long long) ring->wqres.buf.direct.map);
+ en_dbg(DRV, priv, "Allocated TX ring (addr:%p) - buf:%p size:%d buf_size:%d dma:%llx\n",
+ ring, ring->buf, ring->size, ring->buf_size,
+ (unsigned long long) ring->wqres.buf.direct.map);
ring->qpn = qpn;
err = mlx4_qp_alloc(mdev->dev, ring->qpn, &ring->qp, GFP_KERNEL);
@@ -122,7 +122,7 @@ int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv,
err = mlx4_bf_alloc(mdev->dev, &ring->bf, node);
if (err) {
- en_dbg(DRV, priv, "working without blueflame (%d)", err);
+ en_dbg(DRV, priv, "working without blueflame (%d)\n", err);
ring->bf.uar = &mdev->priv_uar;
ring->bf.uar->map = mdev->uar_map;
ring->bf_enabled = false;
@@ -474,9 +474,15 @@ int mlx4_en_poll_tx_cq(struct napi_struct *napi, int budget)
/* If we used up all the quota - we're probably not done yet... */
if (done < budget) {
/* Done for now */
+ cq->mcq.irq_affinity_change = false;
napi_complete(napi);
mlx4_en_arm_cq(priv, cq);
return done;
+ } else if (unlikely(cq->mcq.irq_affinity_change)) {
+ cq->mcq.irq_affinity_change = false;
+ napi_complete(napi);
+ mlx4_en_arm_cq(priv, cq);
+ return 0;
}
return budget;
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/eq.c b/drivers/net/ethernet/mellanox/mlx4/eq.c
index d501a2b0fb79..d954ec1eac17 100644
--- a/drivers/net/ethernet/mellanox/mlx4/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/eq.c
@@ -53,6 +53,11 @@ enum {
MLX4_EQ_ENTRY_SIZE = 0x20
};
+struct mlx4_irq_notify {
+ void *arg;
+ struct irq_affinity_notify notify;
+};
+
#define MLX4_EQ_STATUS_OK ( 0 << 28)
#define MLX4_EQ_STATUS_WRITE_FAIL (10 << 28)
#define MLX4_EQ_OWNER_SW ( 0 << 24)
@@ -152,14 +157,13 @@ void mlx4_gen_slave_eqe(struct work_struct *work)
if (i != dev->caps.function &&
master->slave_state[i].active)
if (mlx4_GEN_EQE(dev, i, eqe))
- mlx4_warn(dev, "Failed to "
- " generate event "
- "for slave %d\n", i);
+ mlx4_warn(dev, "Failed to generate event for slave %d\n",
+ i);
}
} else {
if (mlx4_GEN_EQE(dev, slave, eqe))
- mlx4_warn(dev, "Failed to generate event "
- "for slave %d\n", slave);
+ mlx4_warn(dev, "Failed to generate event for slave %d\n",
+ slave);
}
++slave_eq->cons;
}
@@ -177,8 +181,8 @@ static void slave_event(struct mlx4_dev *dev, u8 slave, struct mlx4_eqe *eqe)
s_eqe = &slave_eq->event_eqe[slave_eq->prod & (SLAVE_EVENT_EQ_SIZE - 1)];
if ((!!(s_eqe->owner & 0x80)) ^
(!!(slave_eq->prod & SLAVE_EVENT_EQ_SIZE))) {
- mlx4_warn(dev, "Master failed to generate an EQE for slave: %d. "
- "No free EQE on slave events queue\n", slave);
+ mlx4_warn(dev, "Master failed to generate an EQE for slave: %d. No free EQE on slave events queue\n",
+ slave);
spin_unlock_irqrestore(&slave_eq->event_lock, flags);
return;
}
@@ -375,9 +379,9 @@ int set_and_calc_slave_port_state(struct mlx4_dev *dev, int slave,
}
break;
default:
- pr_err("%s: BUG!!! UNKNOWN state: "
- "slave:%d, port:%d\n", __func__, slave, port);
- goto out;
+ pr_err("%s: BUG!!! UNKNOWN state: slave:%d, port:%d\n",
+ __func__, slave, port);
+ goto out;
}
ret = mlx4_get_slave_port_state(dev, slave, port);
@@ -425,8 +429,8 @@ void mlx4_master_handle_slave_flr(struct work_struct *work)
for (i = 0 ; i < dev->num_slaves; i++) {
if (MLX4_COMM_CMD_FLR == slave_state[i].last_cmd) {
- mlx4_dbg(dev, "mlx4_handle_slave_flr: "
- "clean slave: %d\n", i);
+ mlx4_dbg(dev, "mlx4_handle_slave_flr: clean slave: %d\n",
+ i);
mlx4_delete_all_resources_for_slave(dev, i);
/*return the slave to running mode*/
@@ -438,8 +442,8 @@ void mlx4_master_handle_slave_flr(struct work_struct *work)
err = mlx4_cmd(dev, 0, i, 0, MLX4_CMD_INFORM_FLR_DONE,
MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
if (err)
- mlx4_warn(dev, "Failed to notify FW on "
- "FLR done (slave:%d)\n", i);
+ mlx4_warn(dev, "Failed to notify FW on FLR done (slave:%d)\n",
+ i);
}
}
}
@@ -490,9 +494,7 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
be32_to_cpu(eqe->event.qp.qpn)
& 0xffffff, &slave);
if (ret && ret != -ENOENT) {
- mlx4_dbg(dev, "QP event %02x(%02x) on "
- "EQ %d at index %u: could "
- "not get slave id (%d)\n",
+ mlx4_dbg(dev, "QP event %02x(%02x) on EQ %d at index %u: could not get slave id (%d)\n",
eqe->type, eqe->subtype,
eq->eqn, eq->cons_index, ret);
break;
@@ -520,23 +522,19 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
& 0xffffff,
&slave);
if (ret && ret != -ENOENT) {
- mlx4_warn(dev, "SRQ event %02x(%02x) "
- "on EQ %d at index %u: could"
- " not get slave id (%d)\n",
+ mlx4_warn(dev, "SRQ event %02x(%02x) on EQ %d at index %u: could not get slave id (%d)\n",
eqe->type, eqe->subtype,
eq->eqn, eq->cons_index, ret);
break;
}
- mlx4_warn(dev, "%s: slave:%d, srq_no:0x%x,"
- " event: %02x(%02x)\n", __func__,
- slave,
+ mlx4_warn(dev, "%s: slave:%d, srq_no:0x%x, event: %02x(%02x)\n",
+ __func__, slave,
be32_to_cpu(eqe->event.srq.srqn),
eqe->type, eqe->subtype);
if (!ret && slave != dev->caps.function) {
- mlx4_warn(dev, "%s: sending event "
- "%02x(%02x) to slave:%d\n",
- __func__, eqe->type,
+ mlx4_warn(dev, "%s: sending event %02x(%02x) to slave:%d\n",
+ __func__, eqe->type,
eqe->subtype, slave);
mlx4_slave_event(dev, slave, eqe);
break;
@@ -569,8 +567,7 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
if (dev->caps.port_type[port] == MLX4_PORT_TYPE_ETH) {
if (i == mlx4_master_func_num(dev))
continue;
- mlx4_dbg(dev, "%s: Sending MLX4_PORT_CHANGE_SUBTYPE_DOWN"
- " to slave: %d, port:%d\n",
+ mlx4_dbg(dev, "%s: Sending MLX4_PORT_CHANGE_SUBTYPE_DOWN to slave: %d, port:%d\n",
__func__, i, port);
s_info = &priv->mfunc.master.vf_oper[slave].vport[port].state;
if (IFLA_VF_LINK_STATE_AUTO == s_info->link_state) {
@@ -634,11 +631,9 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
be32_to_cpu(eqe->event.cq_err.cqn)
& 0xffffff, &slave);
if (ret && ret != -ENOENT) {
- mlx4_dbg(dev, "CQ event %02x(%02x) on "
- "EQ %d at index %u: could "
- "not get slave id (%d)\n",
- eqe->type, eqe->subtype,
- eq->eqn, eq->cons_index, ret);
+ mlx4_dbg(dev, "CQ event %02x(%02x) on EQ %d at index %u: could not get slave id (%d)\n",
+ eqe->type, eqe->subtype,
+ eq->eqn, eq->cons_index, ret);
break;
}
@@ -667,8 +662,7 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
case MLX4_EVENT_TYPE_COMM_CHANNEL:
if (!mlx4_is_master(dev)) {
- mlx4_warn(dev, "Received comm channel event "
- "for non master device\n");
+ mlx4_warn(dev, "Received comm channel event for non master device\n");
break;
}
memcpy(&priv->mfunc.master.comm_arm_bit_vector,
@@ -681,8 +675,7 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
case MLX4_EVENT_TYPE_FLR_EVENT:
flr_slave = be32_to_cpu(eqe->event.flr_event.slave_id);
if (!mlx4_is_master(dev)) {
- mlx4_warn(dev, "Non-master function received"
- "FLR event\n");
+ mlx4_warn(dev, "Non-master function received FLR event\n");
break;
}
@@ -711,22 +704,17 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
if (eqe->subtype == MLX4_FATAL_WARNING_SUBTYPE_WARMING) {
if (mlx4_is_master(dev))
for (i = 0; i < dev->num_slaves; i++) {
- mlx4_dbg(dev, "%s: Sending "
- "MLX4_FATAL_WARNING_SUBTYPE_WARMING"
- " to slave: %d\n", __func__, i);
+ mlx4_dbg(dev, "%s: Sending MLX4_FATAL_WARNING_SUBTYPE_WARMING to slave: %d\n",
+ __func__, i);
if (i == dev->caps.function)
continue;
mlx4_slave_event(dev, i, eqe);
}
- mlx4_err(dev, "Temperature Threshold was reached! "
- "Threshold: %d celsius degrees; "
- "Current Temperature: %d\n",
- be16_to_cpu(eqe->event.warming.warning_threshold),
- be16_to_cpu(eqe->event.warming.current_temperature));
+ mlx4_err(dev, "Temperature Threshold was reached! Threshold: %d celsius degrees; Current Temperature: %d\n",
+ be16_to_cpu(eqe->event.warming.warning_threshold),
+ be16_to_cpu(eqe->event.warming.current_temperature));
} else
- mlx4_warn(dev, "Unhandled event FATAL WARNING (%02x), "
- "subtype %02x on EQ %d at index %u. owner=%x, "
- "nent=0x%x, slave=%x, ownership=%s\n",
+ mlx4_warn(dev, "Unhandled event FATAL WARNING (%02x), subtype %02x on EQ %d at index %u. owner=%x, nent=0x%x, slave=%x, ownership=%s\n",
eqe->type, eqe->subtype, eq->eqn,
eq->cons_index, eqe->owner, eq->nent,
eqe->slave_id,
@@ -743,9 +731,7 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
case MLX4_EVENT_TYPE_EEC_CATAS_ERROR:
case MLX4_EVENT_TYPE_ECC_DETECT:
default:
- mlx4_warn(dev, "Unhandled event %02x(%02x) on EQ %d at "
- "index %u. owner=%x, nent=0x%x, slave=%x, "
- "ownership=%s\n",
+ mlx4_warn(dev, "Unhandled event %02x(%02x) on EQ %d at index %u. owner=%x, nent=0x%x, slave=%x, ownership=%s\n",
eqe->type, eqe->subtype, eq->eqn,
eq->cons_index, eqe->owner, eq->nent,
eqe->slave_id,
@@ -1088,7 +1074,7 @@ static int mlx4_map_clr_int(struct mlx4_dev *dev)
priv->clr_base = ioremap(pci_resource_start(dev->pdev, priv->fw.clr_int_bar) +
priv->fw.clr_int_base, MLX4_CLR_INT_SIZE);
if (!priv->clr_base) {
- mlx4_err(dev, "Couldn't map interrupt clear register, aborting.\n");
+ mlx4_err(dev, "Couldn't map interrupt clear register, aborting\n");
return -ENOMEM;
}
@@ -1102,6 +1088,57 @@ static void mlx4_unmap_clr_int(struct mlx4_dev *dev)
iounmap(priv->clr_base);
}
+static void mlx4_irq_notifier_notify(struct irq_affinity_notify *notify,
+ const cpumask_t *mask)
+{
+ struct mlx4_irq_notify *n = container_of(notify,
+ struct mlx4_irq_notify,
+ notify);
+ struct mlx4_priv *priv = (struct mlx4_priv *)n->arg;
+ struct radix_tree_iter iter;
+ void **slot;
+
+ radix_tree_for_each_slot(slot, &priv->cq_table.tree, &iter, 0) {
+ struct mlx4_cq *cq = (struct mlx4_cq *)(*slot);
+
+ if (cq->irq == notify->irq)
+ cq->irq_affinity_change = true;
+ }
+}
+
+static void mlx4_release_irq_notifier(struct kref *ref)
+{
+ struct mlx4_irq_notify *n = container_of(ref, struct mlx4_irq_notify,
+ notify.kref);
+ kfree(n);
+}
+
+static void mlx4_assign_irq_notifier(struct mlx4_priv *priv,
+ struct mlx4_dev *dev, int irq)
+{
+ struct mlx4_irq_notify *irq_notifier = NULL;
+ int err = 0;
+
+ irq_notifier = kzalloc(sizeof(*irq_notifier), GFP_KERNEL);
+ if (!irq_notifier) {
+ mlx4_warn(dev, "Failed to allocate irq notifier. irq %d\n",
+ irq);
+ return;
+ }
+
+ irq_notifier->notify.irq = irq;
+ irq_notifier->notify.notify = mlx4_irq_notifier_notify;
+ irq_notifier->notify.release = mlx4_release_irq_notifier;
+ irq_notifier->arg = priv;
+ err = irq_set_affinity_notifier(irq, &irq_notifier->notify);
+ if (err) {
+ kfree(irq_notifier);
+ irq_notifier = NULL;
+ mlx4_warn(dev, "Failed to set irq notifier. irq %d\n", irq);
+ }
+}
+
+
int mlx4_alloc_eq_table(struct mlx4_dev *dev)
{
struct mlx4_priv *priv = mlx4_priv(dev);
@@ -1372,6 +1409,9 @@ int mlx4_assign_eq(struct mlx4_dev *dev, char *name, struct cpu_rmap *rmap,
continue;
/*we dont want to break here*/
}
+ mlx4_assign_irq_notifier(priv, dev,
+ priv->eq_table.eq[vec].irq);
+
eq_set_ci(&priv->eq_table.eq[vec], 1);
}
}
@@ -1398,6 +1438,9 @@ void mlx4_release_eq(struct mlx4_dev *dev, int vec)
Belonging to a legacy EQ*/
mutex_lock(&priv->msix_ctl.pool_lock);
if (priv->msix_ctl.pool_bm & 1ULL << i) {
+ irq_set_affinity_notifier(
+ priv->eq_table.eq[vec].irq,
+ NULL);
free_irq(priv->eq_table.eq[vec].irq,
&priv->eq_table.eq[vec]);
priv->msix_ctl.pool_bm &= ~(1ULL << i);
diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.c b/drivers/net/ethernet/mellanox/mlx4/fw.c
index 01e6dd61ee3c..688e1eabab29 100644
--- a/drivers/net/ethernet/mellanox/mlx4/fw.c
+++ b/drivers/net/ethernet/mellanox/mlx4/fw.c
@@ -437,8 +437,7 @@ int mlx4_QUERY_FUNC_CAP(struct mlx4_dev *dev, u32 gen_or_port,
} else if (dev->caps.port_type[gen_or_port] == MLX4_PORT_TYPE_IB) {
MLX4_GET(field, outbox, QUERY_FUNC_CAP_FLAGS0_OFFSET);
if (field & QUERY_FUNC_CAP_FLAGS0_FORCE_PHY_WQE_GID) {
- mlx4_err(dev, "phy_wqe_gid is "
- "enforced on this ib port\n");
+ mlx4_err(dev, "phy_wqe_gid is enforced on this ib port\n");
err = -EPROTONOSUPPORT;
goto out;
}
@@ -1070,10 +1069,10 @@ int mlx4_map_cmd(struct mlx4_dev *dev, u16 op, struct mlx4_icm *icm, u64 virt)
*/
lg = ffs(mlx4_icm_addr(&iter) | mlx4_icm_size(&iter)) - 1;
if (lg < MLX4_ICM_PAGE_SHIFT) {
- mlx4_warn(dev, "Got FW area not aligned to %d (%llx/%lx).\n",
- MLX4_ICM_PAGE_SIZE,
- (unsigned long long) mlx4_icm_addr(&iter),
- mlx4_icm_size(&iter));
+ mlx4_warn(dev, "Got FW area not aligned to %d (%llx/%lx)\n",
+ MLX4_ICM_PAGE_SIZE,
+ (unsigned long long) mlx4_icm_addr(&iter),
+ mlx4_icm_size(&iter));
err = -EINVAL;
goto out;
}
@@ -1109,14 +1108,14 @@ int mlx4_map_cmd(struct mlx4_dev *dev, u16 op, struct mlx4_icm *icm, u64 virt)
switch (op) {
case MLX4_CMD_MAP_FA:
- mlx4_dbg(dev, "Mapped %d chunks/%d KB for FW.\n", tc, ts);
+ mlx4_dbg(dev, "Mapped %d chunks/%d KB for FW\n", tc, ts);
break;
case MLX4_CMD_MAP_ICM_AUX:
- mlx4_dbg(dev, "Mapped %d chunks/%d KB for ICM aux.\n", tc, ts);
+ mlx4_dbg(dev, "Mapped %d chunks/%d KB for ICM aux\n", tc, ts);
break;
case MLX4_CMD_MAP_ICM:
- mlx4_dbg(dev, "Mapped %d chunks/%d KB at %llx for ICM.\n",
- tc, ts, (unsigned long long) virt - (ts << 10));
+ mlx4_dbg(dev, "Mapped %d chunks/%d KB at %llx for ICM\n",
+ tc, ts, (unsigned long long) virt - (ts << 10));
break;
}
@@ -1202,14 +1201,13 @@ int mlx4_QUERY_FW(struct mlx4_dev *dev)
MLX4_GET(cmd_if_rev, outbox, QUERY_FW_CMD_IF_REV_OFFSET);
if (cmd_if_rev < MLX4_COMMAND_INTERFACE_MIN_REV ||
cmd_if_rev > MLX4_COMMAND_INTERFACE_MAX_REV) {
- mlx4_err(dev, "Installed FW has unsupported "
- "command interface revision %d.\n",
+ mlx4_err(dev, "Installed FW has unsupported command interface revision %d\n",
cmd_if_rev);
mlx4_err(dev, "(Installed FW version is %d.%d.%03d)\n",
(int) (dev->caps.fw_ver >> 32),
(int) (dev->caps.fw_ver >> 16) & 0xffff,
(int) dev->caps.fw_ver & 0xffff);
- mlx4_err(dev, "This driver version supports only revisions %d to %d.\n",
+ mlx4_err(dev, "This driver version supports only revisions %d to %d\n",
MLX4_COMMAND_INTERFACE_MIN_REV, MLX4_COMMAND_INTERFACE_MAX_REV);
err = -ENODEV;
goto out;
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
index 26169b3eaed8..5f42f6d6e4c6 100644
--- a/drivers/net/ethernet/mellanox/mlx4/main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
@@ -104,8 +104,6 @@ module_param(enable_64b_cqe_eqe, bool, 0444);
MODULE_PARM_DESC(enable_64b_cqe_eqe,
"Enable 64 byte CQEs/EQEs when the FW supports this (default: True)");
-#define HCA_GLOBAL_CAP_MASK 0
-
#define PF_CONTEXT_BEHAVIOUR_MASK MLX4_FUNC_CAP_64B_EQE_CQE
static char mlx4_version[] =
@@ -134,8 +132,7 @@ MODULE_PARM_DESC(log_num_vlan, "Log2 max number of VLANs per ETH port (0-7)");
static bool use_prio;
module_param_named(use_prio, use_prio, bool, 0444);
-MODULE_PARM_DESC(use_prio, "Enable steering by VLAN priority on ETH ports "
- "(0/1, default 0)");
+MODULE_PARM_DESC(use_prio, "Enable steering by VLAN priority on ETH ports (deprecated)");
int log_mtts_per_seg = ilog2(MLX4_MTT_ENTRY_PER_SEG);
module_param_named(log_mtts_per_seg, log_mtts_per_seg, int, 0444);
@@ -163,8 +160,7 @@ int mlx4_check_port_params(struct mlx4_dev *dev,
for (i = 0; i < dev->caps.num_ports - 1; i++) {
if (port_type[i] != port_type[i + 1]) {
if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_DPDP)) {
- mlx4_err(dev, "Only same port types supported "
- "on this HCA, aborting.\n");
+ mlx4_err(dev, "Only same port types supported on this HCA, aborting\n");
return -EINVAL;
}
}
@@ -172,8 +168,8 @@ int mlx4_check_port_params(struct mlx4_dev *dev,
for (i = 0; i < dev->caps.num_ports; i++) {
if (!(port_type[i] & dev->caps.supported_type[i+1])) {
- mlx4_err(dev, "Requested port type for port %d is not "
- "supported on this HCA\n", i + 1);
+ mlx4_err(dev, "Requested port type for port %d is not supported on this HCA\n",
+ i + 1);
return -EINVAL;
}
}
@@ -195,26 +191,23 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
err = mlx4_QUERY_DEV_CAP(dev, dev_cap);
if (err) {
- mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting.\n");
+ mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting\n");
return err;
}
if (dev_cap->min_page_sz > PAGE_SIZE) {
- mlx4_err(dev, "HCA minimum page size of %d bigger than "
- "kernel PAGE_SIZE of %ld, aborting.\n",
+ mlx4_err(dev, "HCA minimum page size of %d bigger than kernel PAGE_SIZE of %ld, aborting\n",
dev_cap->min_page_sz, PAGE_SIZE);
return -ENODEV;
}
if (dev_cap->num_ports > MLX4_MAX_PORTS) {
- mlx4_err(dev, "HCA has %d ports, but we only support %d, "
- "aborting.\n",
+ mlx4_err(dev, "HCA has %d ports, but we only support %d, aborting\n",
dev_cap->num_ports, MLX4_MAX_PORTS);
return -ENODEV;
}
if (dev_cap->uar_size > pci_resource_len(dev->pdev, 2)) {
- mlx4_err(dev, "HCA reported UAR size of 0x%x bigger than "
- "PCI resource 2 size of 0x%llx, aborting.\n",
+ mlx4_err(dev, "HCA reported UAR size of 0x%x bigger than PCI resource 2 size of 0x%llx, aborting\n",
dev_cap->uar_size,
(unsigned long long) pci_resource_len(dev->pdev, 2));
return -ENODEV;
@@ -296,7 +289,6 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
dev->caps.log_num_macs = log_num_mac;
dev->caps.log_num_vlans = MLX4_LOG_NUM_VLANS;
- dev->caps.log_num_prios = use_prio ? 3 : 0;
for (i = 1; i <= dev->caps.num_ports; ++i) {
dev->caps.port_type[i] = MLX4_PORT_TYPE_NONE;
@@ -347,14 +339,12 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
if (dev->caps.log_num_macs > dev_cap->log_max_macs[i]) {
dev->caps.log_num_macs = dev_cap->log_max_macs[i];
- mlx4_warn(dev, "Requested number of MACs is too much "
- "for port %d, reducing to %d.\n",
+ mlx4_warn(dev, "Requested number of MACs is too much for port %d, reducing to %d\n",
i, 1 << dev->caps.log_num_macs);
}
if (dev->caps.log_num_vlans > dev_cap->log_max_vlans[i]) {
dev->caps.log_num_vlans = dev_cap->log_max_vlans[i];
- mlx4_warn(dev, "Requested number of VLANs is too much "
- "for port %d, reducing to %d.\n",
+ mlx4_warn(dev, "Requested number of VLANs is too much for port %d, reducing to %d\n",
i, 1 << dev->caps.log_num_vlans);
}
}
@@ -366,7 +356,6 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FC_ADDR] =
(1 << dev->caps.log_num_macs) *
(1 << dev->caps.log_num_vlans) *
- (1 << dev->caps.log_num_prios) *
dev->caps.num_ports;
dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FC_EXCH] = MLX4_NUM_FEXCH;
@@ -584,13 +573,14 @@ static int mlx4_slave_cap(struct mlx4_dev *dev)
memset(&hca_param, 0, sizeof(hca_param));
err = mlx4_QUERY_HCA(dev, &hca_param);
if (err) {
- mlx4_err(dev, "QUERY_HCA command failed, aborting.\n");
+ mlx4_err(dev, "QUERY_HCA command failed, aborting\n");
return err;
}
- /*fail if the hca has an unknown capability */
- if ((hca_param.global_caps | HCA_GLOBAL_CAP_MASK) !=
- HCA_GLOBAL_CAP_MASK) {
+ /* fail if the hca has an unknown global capability
+ * at this time global_caps should be always zeroed
+ */
+ if (hca_param.global_caps) {
mlx4_err(dev, "Unknown hca global capabilities\n");
return -ENOSYS;
}
@@ -603,19 +593,18 @@ static int mlx4_slave_cap(struct mlx4_dev *dev)
dev->caps.max_qp_dest_rdma = 1 << hca_param.log_rd_per_qp;
err = mlx4_dev_cap(dev, &dev_cap);
if (err) {
- mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting.\n");
+ mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting\n");
return err;
}
err = mlx4_QUERY_FW(dev);
if (err)
- mlx4_err(dev, "QUERY_FW command failed: could not get FW version.\n");
+ mlx4_err(dev, "QUERY_FW command failed: could not get FW version\n");
page_size = ~dev->caps.page_size_cap + 1;
mlx4_warn(dev, "HCA minimum page size:%d\n", page_size);
if (page_size > PAGE_SIZE) {
- mlx4_err(dev, "HCA minimum page size of %d bigger than "
- "kernel PAGE_SIZE of %ld, aborting.\n",
+ mlx4_err(dev, "HCA minimum page size of %d bigger than kernel PAGE_SIZE of %ld, aborting\n",
page_size, PAGE_SIZE);
return -ENODEV;
}
@@ -633,8 +622,8 @@ static int mlx4_slave_cap(struct mlx4_dev *dev)
memset(&func_cap, 0, sizeof(func_cap));
err = mlx4_QUERY_FUNC_CAP(dev, 0, &func_cap);
if (err) {
- mlx4_err(dev, "QUERY_FUNC_CAP general command failed, aborting (%d).\n",
- err);
+ mlx4_err(dev, "QUERY_FUNC_CAP general command failed, aborting (%d)\n",
+ err);
return err;
}
@@ -661,8 +650,8 @@ static int mlx4_slave_cap(struct mlx4_dev *dev)
dev->caps.num_amgms = 0;
if (dev->caps.num_ports > MLX4_MAX_PORTS) {
- mlx4_err(dev, "HCA has %d ports, but we only support %d, "
- "aborting.\n", dev->caps.num_ports, MLX4_MAX_PORTS);
+ mlx4_err(dev, "HCA has %d ports, but we only support %d, aborting\n",
+ dev->caps.num_ports, MLX4_MAX_PORTS);
return -ENODEV;
}
@@ -682,8 +671,8 @@ static int mlx4_slave_cap(struct mlx4_dev *dev)
for (i = 1; i <= dev->caps.num_ports; ++i) {
err = mlx4_QUERY_FUNC_CAP(dev, (u32) i, &func_cap);
if (err) {
- mlx4_err(dev, "QUERY_FUNC_CAP port command failed for"
- " port %d, aborting (%d).\n", i, err);
+ mlx4_err(dev, "QUERY_FUNC_CAP port command failed for port %d, aborting (%d)\n",
+ i, err);
goto err_mem;
}
dev->caps.qp0_qkey[i - 1] = func_cap.qp0_qkey;
@@ -702,8 +691,7 @@ static int mlx4_slave_cap(struct mlx4_dev *dev)
if (dev->caps.uar_page_size * (dev->caps.num_uars -
dev->caps.reserved_uars) >
pci_resource_len(dev->pdev, 2)) {
- mlx4_err(dev, "HCA reported UAR region size of 0x%x bigger than "
- "PCI resource 2 size of 0x%llx, aborting.\n",
+ mlx4_err(dev, "HCA reported UAR region size of 0x%x bigger than PCI resource 2 size of 0x%llx, aborting\n",
dev->caps.uar_page_size * dev->caps.num_uars,
(unsigned long long) pci_resource_len(dev->pdev, 2));
goto err_mem;
@@ -725,7 +713,7 @@ static int mlx4_slave_cap(struct mlx4_dev *dev)
}
dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_TS;
- mlx4_warn(dev, "Timestamping is not supported in slave mode.\n");
+ mlx4_warn(dev, "Timestamping is not supported in slave mode\n");
slave_adjust_steering_mode(dev, &dev_cap, &hca_param);
@@ -791,8 +779,8 @@ int mlx4_change_port_types(struct mlx4_dev *dev,
dev->caps.port_type[port] = port_types[port - 1];
err = mlx4_SET_PORT(dev, port, -1);
if (err) {
- mlx4_err(dev, "Failed to set port %d, "
- "aborting\n", port);
+ mlx4_err(dev, "Failed to set port %d, aborting\n",
+ port);
goto out;
}
}
@@ -875,9 +863,7 @@ static ssize_t set_port_type(struct device *dev,
}
}
if (err) {
- mlx4_err(mdev, "Auto sensing is not supported on this HCA. "
- "Set only 'eth' or 'ib' for both ports "
- "(should be the same)\n");
+ mlx4_err(mdev, "Auto sensing is not supported on this HCA. Set only 'eth' or 'ib' for both ports (should be the same)\n");
goto out;
}
@@ -982,8 +968,8 @@ static ssize_t set_port_ib_mtu(struct device *dev,
mlx4_CLOSE_PORT(mdev, port);
err = mlx4_SET_PORT(mdev, port, -1);
if (err) {
- mlx4_err(mdev, "Failed to set port %d, "
- "aborting\n", port);
+ mlx4_err(mdev, "Failed to set port %d, aborting\n",
+ port);
goto err_set_port;
}
}
@@ -1002,19 +988,19 @@ static int mlx4_load_fw(struct mlx4_dev *dev)
priv->fw.fw_icm = mlx4_alloc_icm(dev, priv->fw.fw_pages,
GFP_HIGHUSER | __GFP_NOWARN, 0);
if (!priv->fw.fw_icm) {
- mlx4_err(dev, "Couldn't allocate FW area, aborting.\n");
+ mlx4_err(dev, "Couldn't allocate FW area, aborting\n");
return -ENOMEM;
}
err = mlx4_MAP_FA(dev, priv->fw.fw_icm);
if (err) {
- mlx4_err(dev, "MAP_FA command failed, aborting.\n");
+ mlx4_err(dev, "MAP_FA command failed, aborting\n");
goto err_free;
}
err = mlx4_RUN_FW(dev);
if (err) {
- mlx4_err(dev, "RUN_FW command failed, aborting.\n");
+ mlx4_err(dev, "RUN_FW command failed, aborting\n");
goto err_unmap_fa;
}
@@ -1098,30 +1084,30 @@ static int mlx4_init_icm(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap,
err = mlx4_SET_ICM_SIZE(dev, icm_size, &aux_pages);
if (err) {
- mlx4_err(dev, "SET_ICM_SIZE command failed, aborting.\n");
+ mlx4_err(dev, "SET_ICM_SIZE command failed, aborting\n");
return err;
}
- mlx4_dbg(dev, "%lld KB of HCA context requires %lld KB aux memory.\n",
+ mlx4_dbg(dev, "%lld KB of HCA context requires %lld KB aux memory\n",
(unsigned long long) icm_size >> 10,
(unsigned long long) aux_pages << 2);
priv->fw.aux_icm = mlx4_alloc_icm(dev, aux_pages,
GFP_HIGHUSER | __GFP_NOWARN, 0);
if (!priv->fw.aux_icm) {
- mlx4_err(dev, "Couldn't allocate aux memory, aborting.\n");
+ mlx4_err(dev, "Couldn't allocate aux memory, aborting\n");
return -ENOMEM;
}
err = mlx4_MAP_ICM_AUX(dev, priv->fw.aux_icm);
if (err) {
- mlx4_err(dev, "MAP_ICM_AUX command failed, aborting.\n");
+ mlx4_err(dev, "MAP_ICM_AUX command failed, aborting\n");
goto err_free_aux;
}
err = mlx4_init_cmpt_table(dev, init_hca->cmpt_base, dev_cap->cmpt_entry_sz);
if (err) {
- mlx4_err(dev, "Failed to map cMPT context memory, aborting.\n");
+ mlx4_err(dev, "Failed to map cMPT context memory, aborting\n");
goto err_unmap_aux;
}
@@ -1132,7 +1118,7 @@ static int mlx4_init_icm(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap,
init_hca->eqc_base, dev_cap->eqc_entry_sz,
num_eqs, num_eqs, 0, 0);
if (err) {
- mlx4_err(dev, "Failed to map EQ context memory, aborting.\n");
+ mlx4_err(dev, "Failed to map EQ context memory, aborting\n");
goto err_unmap_cmpt;
}
@@ -1153,7 +1139,7 @@ static int mlx4_init_icm(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap,
dev->caps.num_mtts,
dev->caps.reserved_mtts, 1, 0);
if (err) {
- mlx4_err(dev, "Failed to map MTT context memory, aborting.\n");
+ mlx4_err(dev, "Failed to map MTT context memory, aborting\n");
goto err_unmap_eq;
}
@@ -1163,7 +1149,7 @@ static int mlx4_init_icm(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap,
dev->caps.num_mpts,
dev->caps.reserved_mrws, 1, 1);
if (err) {
- mlx4_err(dev, "Failed to map dMPT context memory, aborting.\n");
+ mlx4_err(dev, "Failed to map dMPT context memory, aborting\n");
goto err_unmap_mtt;
}
@@ -1174,7 +1160,7 @@ static int mlx4_init_icm(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap,
dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW],
0, 0);
if (err) {
- mlx4_err(dev, "Failed to map QP context memory, aborting.\n");
+ mlx4_err(dev, "Failed to map QP context memory, aborting\n");
goto err_unmap_dmpt;
}
@@ -1185,7 +1171,7 @@ static int mlx4_init_icm(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap,
dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW],
0, 0);
if (err) {
- mlx4_err(dev, "Failed to map AUXC context memory, aborting.\n");
+ mlx4_err(dev, "Failed to map AUXC context memory, aborting\n");
goto err_unmap_qp;
}
@@ -1196,7 +1182,7 @@ static int mlx4_init_icm(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap,
dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW],
0, 0);
if (err) {
- mlx4_err(dev, "Failed to map ALTC context memory, aborting.\n");
+ mlx4_err(dev, "Failed to map ALTC context memory, aborting\n");
goto err_unmap_auxc;
}
@@ -1217,7 +1203,7 @@ static int mlx4_init_icm(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap,
dev->caps.num_cqs,
dev->caps.reserved_cqs, 0, 0);
if (err) {
- mlx4_err(dev, "Failed to map CQ context memory, aborting.\n");
+ mlx4_err(dev, "Failed to map CQ context memory, aborting\n");
goto err_unmap_rdmarc;
}
@@ -1227,7 +1213,7 @@ static int mlx4_init_icm(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap,
dev->caps.num_srqs,
dev->caps.reserved_srqs, 0, 0);
if (err) {
- mlx4_err(dev, "Failed to map SRQ context memory, aborting.\n");
+ mlx4_err(dev, "Failed to map SRQ context memory, aborting\n");
goto err_unmap_cq;
}
@@ -1245,7 +1231,7 @@ static int mlx4_init_icm(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap,
dev->caps.num_mgms + dev->caps.num_amgms,
0, 0);
if (err) {
- mlx4_err(dev, "Failed to map MCG context memory, aborting.\n");
+ mlx4_err(dev, "Failed to map MCG context memory, aborting\n");
goto err_unmap_srq;
}
@@ -1322,7 +1308,7 @@ static void mlx4_slave_exit(struct mlx4_dev *dev)
mutex_lock(&priv->cmd.slave_cmd_mutex);
if (mlx4_comm_cmd(dev, MLX4_COMM_CMD_RESET, 0, MLX4_COMM_TIME))
- mlx4_warn(dev, "Failed to close slave function.\n");
+ mlx4_warn(dev, "Failed to close slave function\n");
mutex_unlock(&priv->cmd.slave_cmd_mutex);
}
@@ -1420,7 +1406,7 @@ static int mlx4_init_slave(struct mlx4_dev *dev)
u32 cmd_channel_ver;
if (atomic_read(&pf_loading)) {
- mlx4_warn(dev, "PF is not ready. Deferring probe\n");
+ mlx4_warn(dev, "PF is not ready - Deferring probe\n");
return -EPROBE_DEFER;
}
@@ -1433,8 +1419,7 @@ static int mlx4_init_slave(struct mlx4_dev *dev)
* NUM_OF_RESET_RETRIES times before leaving.*/
if (ret_from_reset) {
if (MLX4_DELAY_RESET_SLAVE == ret_from_reset) {
- mlx4_warn(dev, "slave is currently in the "
- "middle of FLR. Deferring probe.\n");
+ mlx4_warn(dev, "slave is currently in the middle of FLR - Deferring probe\n");
mutex_unlock(&priv->cmd.slave_cmd_mutex);
return -EPROBE_DEFER;
} else
@@ -1448,8 +1433,7 @@ static int mlx4_init_slave(struct mlx4_dev *dev)
if (MLX4_COMM_GET_IF_REV(cmd_channel_ver) !=
MLX4_COMM_GET_IF_REV(slave_read)) {
- mlx4_err(dev, "slave driver version is not supported"
- " by the master\n");
+ mlx4_err(dev, "slave driver version is not supported by the master\n");
goto err;
}
@@ -1527,8 +1511,7 @@ static void choose_steering_mode(struct mlx4_dev *dev,
if (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER ||
dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER)
- mlx4_warn(dev, "Must have both UC_STEER and MC_STEER flags "
- "set to use B0 steering. Falling back to A0 steering mode.\n");
+ mlx4_warn(dev, "Must have both UC_STEER and MC_STEER flags set to use B0 steering - falling back to A0 steering mode\n");
}
dev->oper_log_mgm_entry_size =
mlx4_log_num_mgm_entry_size > 0 ?
@@ -1536,8 +1519,7 @@ static void choose_steering_mode(struct mlx4_dev *dev,
MLX4_DEFAULT_MGM_LOG_ENTRY_SIZE;
dev->caps.num_qp_per_mgm = mlx4_get_qp_per_mgm(dev);
}
- mlx4_dbg(dev, "Steering mode is: %s, oper_log_mgm_entry_size = %d, "
- "modparam log_num_mgm_entry_size = %d\n",
+ mlx4_dbg(dev, "Steering mode is: %s, oper_log_mgm_entry_size = %d, modparam log_num_mgm_entry_size = %d\n",
mlx4_steering_mode_str(dev->caps.steering_mode),
dev->oper_log_mgm_entry_size,
mlx4_log_num_mgm_entry_size);
@@ -1571,15 +1553,15 @@ static int mlx4_init_hca(struct mlx4_dev *dev)
err = mlx4_QUERY_FW(dev);
if (err) {
if (err == -EACCES)
- mlx4_info(dev, "non-primary physical function, skipping.\n");
+ mlx4_info(dev, "non-primary physical function, skipping\n");
else
- mlx4_err(dev, "QUERY_FW command failed, aborting.\n");
+ mlx4_err(dev, "QUERY_FW command failed, aborting\n");
return err;
}
err = mlx4_load_fw(dev);
if (err) {
- mlx4_err(dev, "Failed to start FW, aborting.\n");
+ mlx4_err(dev, "Failed to start FW, aborting\n");
return err;
}
@@ -1591,7 +1573,7 @@ static int mlx4_init_hca(struct mlx4_dev *dev)
err = mlx4_dev_cap(dev, &dev_cap);
if (err) {
- mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting.\n");
+ mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting\n");
goto err_stop_fw;
}
@@ -1632,7 +1614,7 @@ static int mlx4_init_hca(struct mlx4_dev *dev)
err = mlx4_INIT_HCA(dev, &init_hca);
if (err) {
- mlx4_err(dev, "INIT_HCA command failed, aborting.\n");
+ mlx4_err(dev, "INIT_HCA command failed, aborting\n");
goto err_free_icm;
}
/*
@@ -1643,7 +1625,7 @@ static int mlx4_init_hca(struct mlx4_dev *dev)
memset(&init_hca, 0, sizeof(init_hca));
err = mlx4_QUERY_HCA(dev, &init_hca);
if (err) {
- mlx4_err(dev, "QUERY_HCA command failed, disable timestamp.\n");
+ mlx4_err(dev, "QUERY_HCA command failed, disable timestamp\n");
dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_TS;
} else {
dev->caps.hca_core_clock =
@@ -1656,14 +1638,14 @@ static int mlx4_init_hca(struct mlx4_dev *dev)
if (!dev->caps.hca_core_clock) {
dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_TS;
mlx4_err(dev,
- "HCA frequency is 0. Timestamping is not supported.");
+ "HCA frequency is 0 - timestamping is not supported\n");
} else if (map_internal_clock(dev)) {
/*
* Map internal clock,
* in case of failure disable timestamping
*/
dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_TS;
- mlx4_err(dev, "Failed to map internal clock. Timestamping is not supported.\n");
+ mlx4_err(dev, "Failed to map internal clock. Timestamping is not supported\n");
}
}
} else {
@@ -1690,7 +1672,7 @@ static int mlx4_init_hca(struct mlx4_dev *dev)
err = mlx4_QUERY_ADAPTER(dev, &adapter);
if (err) {
- mlx4_err(dev, "QUERY_ADAPTER command failed, aborting.\n");
+ mlx4_err(dev, "QUERY_ADAPTER command failed, aborting\n");
goto unmap_bf;
}
@@ -1808,79 +1790,69 @@ static int mlx4_setup_hca(struct mlx4_dev *dev)
err = mlx4_init_uar_table(dev);
if (err) {
- mlx4_err(dev, "Failed to initialize "
- "user access region table, aborting.\n");
- return err;
+ mlx4_err(dev, "Failed to initialize user access region table, aborting\n");
+ return err;
}
err = mlx4_uar_alloc(dev, &priv->driver_uar);
if (err) {
- mlx4_err(dev, "Failed to allocate driver access region, "
- "aborting.\n");
+ mlx4_err(dev, "Failed to allocate driver access region, aborting\n");
goto err_uar_table_free;
}
priv->kar = ioremap((phys_addr_t) priv->driver_uar.pfn << PAGE_SHIFT, PAGE_SIZE);
if (!priv->kar) {
- mlx4_err(dev, "Couldn't map kernel access region, "
- "aborting.\n");
+ mlx4_err(dev, "Couldn't map kernel access region, aborting\n");
err = -ENOMEM;
goto err_uar_free;
}
err = mlx4_init_pd_table(dev);
if (err) {
- mlx4_err(dev, "Failed to initialize "
- "protection domain table, aborting.\n");
+ mlx4_err(dev, "Failed to initialize protection domain table, aborting\n");
goto err_kar_unmap;
}
err = mlx4_init_xrcd_table(dev);
if (err) {
- mlx4_err(dev, "Failed to initialize "
- "reliable connection domain table, aborting.\n");
+ mlx4_err(dev, "Failed to initialize reliable connection domain table, aborting\n");
goto err_pd_table_free;
}
err = mlx4_init_mr_table(dev);
if (err) {
- mlx4_err(dev, "Failed to initialize "
- "memory region table, aborting.\n");
+ mlx4_err(dev, "Failed to initialize memory region table, aborting\n");
goto err_xrcd_table_free;
}
if (!mlx4_is_slave(dev)) {
err = mlx4_init_mcg_table(dev);
if (err) {
- mlx4_err(dev, "Failed to initialize multicast group table, aborting.\n");
+ mlx4_err(dev, "Failed to initialize multicast group table, aborting\n");
goto err_mr_table_free;
}
}
err = mlx4_init_eq_table(dev);
if (err) {
- mlx4_err(dev, "Failed to initialize "
- "event queue table, aborting.\n");
+ mlx4_err(dev, "Failed to initialize event queue table, aborting\n");
goto err_mcg_table_free;
}
err = mlx4_cmd_use_events(dev);
if (err) {
- mlx4_err(dev, "Failed to switch to event-driven "
- "firmware commands, aborting.\n");
+ mlx4_err(dev, "Failed to switch to event-driven firmware commands, aborting\n");
goto err_eq_table_free;
}
err = mlx4_NOP(dev);
if (err) {
if (dev->flags & MLX4_FLAG_MSI_X) {
- mlx4_warn(dev, "NOP command failed to generate MSI-X "
- "interrupt IRQ %d).\n",
+ mlx4_warn(dev, "NOP command failed to generate MSI-X interrupt IRQ %d)\n",
priv->eq_table.eq[dev->caps.num_comp_vectors].irq);
- mlx4_warn(dev, "Trying again without MSI-X.\n");
+ mlx4_warn(dev, "Trying again without MSI-X\n");
} else {
- mlx4_err(dev, "NOP command failed to generate interrupt "
- "(IRQ %d), aborting.\n",
+ mlx4_err(dev, "NOP command failed to generate interrupt (IRQ %d), aborting\n",
priv->eq_table.eq[dev->caps.num_comp_vectors].irq);
mlx4_err(dev, "BIOS or ACPI interrupt routing problem?\n");
}
@@ -1892,28 +1864,25 @@ static int mlx4_setup_hca(struct mlx4_dev *dev)
err = mlx4_init_cq_table(dev);
if (err) {
- mlx4_err(dev, "Failed to initialize "
- "completion queue table, aborting.\n");
+ mlx4_err(dev, "Failed to initialize completion queue table, aborting\n");
goto err_cmd_poll;
}
err = mlx4_init_srq_table(dev);
if (err) {
- mlx4_err(dev, "Failed to initialize "
- "shared receive queue table, aborting.\n");
+ mlx4_err(dev, "Failed to initialize shared receive queue table, aborting\n");
goto err_cq_table_free;
}
err = mlx4_init_qp_table(dev);
if (err) {
- mlx4_err(dev, "Failed to initialize "
- "queue pair table, aborting.\n");
+ mlx4_err(dev, "Failed to initialize queue pair table, aborting\n");
goto err_srq_table_free;
}
err = mlx4_init_counters_table(dev);
if (err && err != -ENOENT) {
- mlx4_err(dev, "Failed to initialize counters table, aborting.\n");
+ mlx4_err(dev, "Failed to initialize counters table, aborting\n");
goto err_qp_table_free;
}
@@ -1923,9 +1892,8 @@ static int mlx4_setup_hca(struct mlx4_dev *dev)
err = mlx4_get_port_ib_caps(dev, port,
&ib_port_default_caps);
if (err)
- mlx4_warn(dev, "failed to get port %d default "
- "ib capabilities (%d). Continuing "
- "with caps = 0\n", port, err);
+ mlx4_warn(dev, "failed to get port %d default ib capabilities (%d). Continuing with caps = 0\n",
+ port, err);
dev->caps.ib_port_def_cap[port] = ib_port_default_caps;
/* initialize per-slave default ib port capabilities */
@@ -1935,7 +1903,7 @@ static int mlx4_setup_hca(struct mlx4_dev *dev)
if (i == mlx4_master_func_num(dev))
continue;
priv->mfunc.master.slave_state[i].ib_cap_mask[port] =
- ib_port_default_caps;
+ ib_port_default_caps;
}
}
@@ -1948,7 +1916,7 @@ static int mlx4_setup_hca(struct mlx4_dev *dev)
dev->caps.pkey_table_len[port] : -1);
if (err) {
mlx4_err(dev, "Failed to set port %d, aborting\n",
- port);
+ port);
goto err_counters_table_free;
}
}
@@ -2024,7 +1992,7 @@ static void mlx4_enable_msi_x(struct mlx4_dev *dev)
kfree(entries);
goto no_msi;
} else if (nreq < MSIX_LEGACY_SZ +
- dev->caps.num_ports * MIN_MSIX_P_PORT) {
+ dev->caps.num_ports * MIN_MSIX_P_PORT) {
/*Working in legacy mode , all EQ's shared*/
dev->caps.comp_pool = 0;
dev->caps.num_comp_vectors = nreq - 1;
@@ -2225,8 +2193,7 @@ static int __mlx4_init_one(struct pci_dev *pdev, int pci_dev_data)
err = pci_enable_device(pdev);
if (err) {
- dev_err(&pdev->dev, "Cannot enable PCI device, "
- "aborting.\n");
+ dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
return err;
}
@@ -2273,14 +2240,13 @@ static int __mlx4_init_one(struct pci_dev *pdev, int pci_dev_data)
*/
if (!(pci_dev_data & MLX4_PCI_DEV_IS_VF) &&
!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
- dev_err(&pdev->dev, "Missing DCS, aborting."
- "(driver_data: 0x%x, pci_resource_flags(pdev, 0):0x%lx)\n",
+ dev_err(&pdev->dev, "Missing DCS, aborting (driver_data: 0x%x, pci_resource_flags(pdev, 0):0x%lx)\n",
pci_dev_data, pci_resource_flags(pdev, 0));
err = -ENODEV;
goto err_disable_pdev;
}
if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
- dev_err(&pdev->dev, "Missing UAR, aborting.\n");
+ dev_err(&pdev->dev, "Missing UAR, aborting\n");
err = -ENODEV;
goto err_disable_pdev;
}
@@ -2295,21 +2261,19 @@ static int __mlx4_init_one(struct pci_dev *pdev, int pci_dev_data)
err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
if (err) {
- dev_warn(&pdev->dev, "Warning: couldn't set 64-bit PCI DMA mask.\n");
+ dev_warn(&pdev->dev, "Warning: couldn't set 64-bit PCI DMA mask\n");
err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
if (err) {
- dev_err(&pdev->dev, "Can't set PCI DMA mask, aborting.\n");
+ dev_err(&pdev->dev, "Can't set PCI DMA mask, aborting\n");
goto err_release_regions;
}
}
err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
if (err) {
- dev_warn(&pdev->dev, "Warning: couldn't set 64-bit "
- "consistent PCI DMA mask.\n");
+ dev_warn(&pdev->dev, "Warning: couldn't set 64-bit consistent PCI DMA mask\n");
err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
if (err) {
- dev_err(&pdev->dev, "Can't set consistent PCI DMA mask, "
- "aborting.\n");
+ dev_err(&pdev->dev, "Can't set consistent PCI DMA mask, aborting\n");
goto err_release_regions;
}
}
@@ -2340,7 +2304,7 @@ static int __mlx4_init_one(struct pci_dev *pdev, int pci_dev_data)
if (total_vfs) {
unsigned vfs_offset = 0;
for (i = 0; i < sizeof(nvfs)/sizeof(nvfs[0]) &&
- vfs_offset + nvfs[i] < extended_func_num(pdev);
+ vfs_offset + nvfs[i] < extended_func_num(pdev);
vfs_offset += nvfs[i], i++)
;
if (i == sizeof(nvfs)/sizeof(nvfs[0])) {
@@ -2366,8 +2330,7 @@ static int __mlx4_init_one(struct pci_dev *pdev, int pci_dev_data)
if (err < 0)
goto err_free_dev;
else {
- mlx4_warn(dev, "Multiple PFs not yet supported."
- " Skipping PF.\n");
+ mlx4_warn(dev, "Multiple PFs not yet supported - Skipping PF\n");
err = -EINVAL;
goto err_free_dev;
}
@@ -2377,8 +2340,8 @@ static int __mlx4_init_one(struct pci_dev *pdev, int pci_dev_data)
mlx4_warn(dev, "Enabling SR-IOV with %d VFs\n",
total_vfs);
dev->dev_vfs = kzalloc(
- total_vfs * sizeof(*dev->dev_vfs),
- GFP_KERNEL);
+ total_vfs * sizeof(*dev->dev_vfs),
+ GFP_KERNEL);
if (NULL == dev->dev_vfs) {
mlx4_err(dev, "Failed to allocate memory for VFs\n");
err = 0;
@@ -2386,14 +2349,14 @@ static int __mlx4_init_one(struct pci_dev *pdev, int pci_dev_data)
atomic_inc(&pf_loading);
err = pci_enable_sriov(pdev, total_vfs);
if (err) {
- mlx4_err(dev, "Failed to enable SR-IOV, continuing without SR-IOV (err = %d).\n",
+ mlx4_err(dev, "Failed to enable SR-IOV, continuing without SR-IOV (err = %d)\n",
err);
atomic_dec(&pf_loading);
err = 0;
} else {
mlx4_warn(dev, "Running in master mode\n");
dev->flags |= MLX4_FLAG_SRIOV |
- MLX4_FLAG_MASTER;
+ MLX4_FLAG_MASTER;
dev->num_vfs = total_vfs;
sriov_initialized = 1;
}
@@ -2410,7 +2373,7 @@ static int __mlx4_init_one(struct pci_dev *pdev, int pci_dev_data)
*/
err = mlx4_reset(dev);
if (err) {
- mlx4_err(dev, "Failed to reset HCA, aborting.\n");
+ mlx4_err(dev, "Failed to reset HCA, aborting\n");
goto err_rel_own;
}
}
@@ -2418,7 +2381,7 @@ static int __mlx4_init_one(struct pci_dev *pdev, int pci_dev_data)
slave_start:
err = mlx4_cmd_init(dev);
if (err) {
- mlx4_err(dev, "Failed to init command interface, aborting.\n");
+ mlx4_err(dev, "Failed to init command interface, aborting\n");
goto err_sriov;
}
@@ -2432,8 +2395,7 @@ slave_start:
dev->num_slaves = 0;
err = mlx4_multi_func_init(dev);
if (err) {
- mlx4_err(dev, "Failed to init slave mfunc"
- " interface, aborting.\n");
+ mlx4_err(dev, "Failed to init slave mfunc interface, aborting\n");
goto err_cmd;
}
}
@@ -2465,8 +2427,7 @@ slave_start:
unsigned sum = 0;
err = mlx4_multi_func_init(dev);
if (err) {
- mlx4_err(dev, "Failed to init master mfunc"
- "interface, aborting.\n");
+ mlx4_err(dev, "Failed to init master mfunc interface, aborting\n");
goto err_close;
}
if (sriov_initialized) {
@@ -2477,10 +2438,7 @@ slave_start:
if (ib_ports &&
(num_vfs_argc > 1 || probe_vfs_argc > 1)) {
mlx4_err(dev,
- "Invalid syntax of num_vfs/probe_vfs "
- "with IB port. Single port VFs syntax"
- " is only supported when all ports "
- "are configured as ethernet\n");
+ "Invalid syntax of num_vfs/probe_vfs with IB port - single port VFs syntax is only supported when all ports are configured as ethernet\n");
goto err_close;
}
for (i = 0; i < sizeof(nvfs)/sizeof(nvfs[0]); i++) {
@@ -2506,8 +2464,7 @@ slave_start:
if ((mlx4_is_mfunc(dev)) &&
!(dev->flags & MLX4_FLAG_MSI_X)) {
err = -ENOSYS;
- mlx4_err(dev, "INTx is not supported in multi-function mode."
- " aborting.\n");
+ mlx4_err(dev, "INTx is not supported in multi-function mode, aborting\n");
goto err_free_eq;
}
@@ -2660,7 +2617,7 @@ static void __mlx4_remove_one(struct pci_dev *pdev)
/* in SRIOV it is not allowed to unload the pf's
* driver while there are alive vf's */
if (mlx4_is_master(dev) && mlx4_how_many_lives_vf(dev))
- printk(KERN_ERR "Removing PF when there are assigned VF's !!!\n");
+ pr_warn("Removing PF when there are assigned VF's !!!\n");
mlx4_stop_sense(dev);
mlx4_unregister_device(dev);
@@ -2824,7 +2781,7 @@ static struct pci_driver mlx4_driver = {
.name = DRV_NAME,
.id_table = mlx4_pci_table,
.probe = mlx4_init_one,
- .shutdown = mlx4_remove_one,
+ .shutdown = __mlx4_remove_one,
.remove = mlx4_remove_one,
.err_handler = &mlx4_err_handler,
};
@@ -2832,33 +2789,36 @@ static struct pci_driver mlx4_driver = {
static int __init mlx4_verify_params(void)
{
if ((log_num_mac < 0) || (log_num_mac > 7)) {
- pr_warning("mlx4_core: bad num_mac: %d\n", log_num_mac);
+ pr_warn("mlx4_core: bad num_mac: %d\n", log_num_mac);
return -1;
}
if (log_num_vlan != 0)
- pr_warning("mlx4_core: log_num_vlan - obsolete module param, using %d\n",
- MLX4_LOG_NUM_VLANS);
+ pr_warn("mlx4_core: log_num_vlan - obsolete module param, using %d\n",
+ MLX4_LOG_NUM_VLANS);
+
+ if (use_prio != 0)
+ pr_warn("mlx4_core: use_prio - obsolete module param, ignored\n");
if ((log_mtts_per_seg < 1) || (log_mtts_per_seg > 7)) {
- pr_warning("mlx4_core: bad log_mtts_per_seg: %d\n", log_mtts_per_seg);
+ pr_warn("mlx4_core: bad log_mtts_per_seg: %d\n",
+ log_mtts_per_seg);
return -1;
}
/* Check if module param for ports type has legal combination */
if (port_type_array[0] == false && port_type_array[1] == true) {
- printk(KERN_WARNING "Module parameter configuration ETH/IB is not supported. Switching to default configuration IB/IB\n");
+ pr_warn("Module parameter configuration ETH/IB is not supported. Switching to default configuration IB/IB\n");
port_type_array[0] = true;
}
if (mlx4_log_num_mgm_entry_size != -1 &&
(mlx4_log_num_mgm_entry_size < MLX4_MIN_MGM_LOG_ENTRY_SIZE ||
mlx4_log_num_mgm_entry_size > MLX4_MAX_MGM_LOG_ENTRY_SIZE)) {
- pr_warning("mlx4_core: mlx4_log_num_mgm_entry_size (%d) not "
- "in legal range (-1 or %d..%d)\n",
- mlx4_log_num_mgm_entry_size,
- MLX4_MIN_MGM_LOG_ENTRY_SIZE,
- MLX4_MAX_MGM_LOG_ENTRY_SIZE);
+ pr_warn("mlx4_core: mlx4_log_num_mgm_entry_size (%d) not in legal range (-1 or %d..%d)\n",
+ mlx4_log_num_mgm_entry_size,
+ MLX4_MIN_MGM_LOG_ENTRY_SIZE,
+ MLX4_MAX_MGM_LOG_ENTRY_SIZE);
return -1;
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/mcg.c b/drivers/net/ethernet/mellanox/mlx4/mcg.c
index 80ccb4edf825..4c36def8e10f 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mcg.c
+++ b/drivers/net/ethernet/mellanox/mlx4/mcg.c
@@ -638,7 +638,7 @@ static int find_entry(struct mlx4_dev *dev, u8 port,
if (!(be32_to_cpu(mgm->members_count) & 0xffffff)) {
if (*index != hash) {
- mlx4_err(dev, "Found zero MGID in AMGM.\n");
+ mlx4_err(dev, "Found zero MGID in AMGM\n");
err = -EINVAL;
}
return err;
@@ -874,7 +874,7 @@ static void mlx4_err_rule(struct mlx4_dev *dev, char *str,
mlx4_err(dev, "%s", buf);
if (len >= BUF_SIZE)
- mlx4_err(dev, "Network rule error message was truncated, print buffer is too small.\n");
+ mlx4_err(dev, "Network rule error message was truncated, print buffer is too small\n");
}
int mlx4_flow_attach(struct mlx4_dev *dev,
@@ -897,7 +897,7 @@ int mlx4_flow_attach(struct mlx4_dev *dev,
ret = parse_trans_rule(dev, cur, mailbox->buf + size);
if (ret < 0) {
mlx4_free_cmd_mailbox(dev, mailbox);
- return -EINVAL;
+ return ret;
}
size += ret;
}
@@ -905,10 +905,10 @@ int mlx4_flow_attach(struct mlx4_dev *dev,
ret = mlx4_QP_FLOW_STEERING_ATTACH(dev, mailbox, size >> 2, reg_id);
if (ret == -ENOMEM)
mlx4_err_rule(dev,
- "mcg table is full. Fail to register network rule.\n",
+ "mcg table is full. Fail to register network rule\n",
rule);
else if (ret)
- mlx4_err_rule(dev, "Fail to register network rule.\n", rule);
+ mlx4_err_rule(dev, "Fail to register network rule\n", rule);
mlx4_free_cmd_mailbox(dev, mailbox);
@@ -994,7 +994,7 @@ int mlx4_qp_attach_common(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
members_count = be32_to_cpu(mgm->members_count) & 0xffffff;
if (members_count == dev->caps.num_qp_per_mgm) {
- mlx4_err(dev, "MGM at index %x is full.\n", index);
+ mlx4_err(dev, "MGM at index %x is full\n", index);
err = -ENOMEM;
goto out;
}
@@ -1042,7 +1042,7 @@ out:
}
if (err && link && index != -1) {
if (index < dev->caps.num_mgms)
- mlx4_warn(dev, "Got AMGM index %d < %d",
+ mlx4_warn(dev, "Got AMGM index %d < %d\n",
index, dev->caps.num_mgms);
else
mlx4_bitmap_free(&priv->mcg_table.bitmap,
@@ -1133,7 +1133,7 @@ int mlx4_qp_detach_common(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
if (amgm_index) {
if (amgm_index < dev->caps.num_mgms)
- mlx4_warn(dev, "MGM entry %d had AMGM index %d < %d",
+ mlx4_warn(dev, "MGM entry %d had AMGM index %d < %d\n",
index, amgm_index, dev->caps.num_mgms);
else
mlx4_bitmap_free(&priv->mcg_table.bitmap,
@@ -1153,7 +1153,7 @@ int mlx4_qp_detach_common(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
goto out;
if (index < dev->caps.num_mgms)
- mlx4_warn(dev, "entry %d had next AMGM index %d < %d",
+ mlx4_warn(dev, "entry %d had next AMGM index %d < %d\n",
prev, index, dev->caps.num_mgms);
else
mlx4_bitmap_free(&priv->mcg_table.bitmap,
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4.h b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
index 7a0665beebb1..1d8af7336807 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
@@ -221,18 +221,19 @@ extern int mlx4_debug_level;
#define mlx4_debug_level (0)
#endif /* CONFIG_MLX4_DEBUG */
-#define mlx4_dbg(mdev, format, arg...) \
+#define mlx4_dbg(mdev, format, ...) \
do { \
if (mlx4_debug_level) \
- dev_printk(KERN_DEBUG, &mdev->pdev->dev, format, ##arg); \
+ dev_printk(KERN_DEBUG, &(mdev)->pdev->dev, format, \
+ ##__VA_ARGS__); \
} while (0)
-#define mlx4_err(mdev, format, arg...) \
- dev_err(&mdev->pdev->dev, format, ##arg)
-#define mlx4_info(mdev, format, arg...) \
- dev_info(&mdev->pdev->dev, format, ##arg)
-#define mlx4_warn(mdev, format, arg...) \
- dev_warn(&mdev->pdev->dev, format, ##arg)
+#define mlx4_err(mdev, format, ...) \
+ dev_err(&(mdev)->pdev->dev, format, ##__VA_ARGS__)
+#define mlx4_info(mdev, format, ...) \
+ dev_info(&(mdev)->pdev->dev, format, ##__VA_ARGS__)
+#define mlx4_warn(mdev, format, ...) \
+ dev_warn(&(mdev)->pdev->dev, format, ##__VA_ARGS__)
extern int mlx4_log_num_mgm_entry_size;
extern int log_mtts_per_seg;
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
index 04d9b6fe3e80..0e15295bedd6 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
@@ -313,6 +313,7 @@ struct mlx4_en_rx_ring {
unsigned long csum_ok;
unsigned long csum_none;
int hwtstamp_rx_filter;
+ cpumask_var_t affinity_mask;
};
struct mlx4_en_cq {
@@ -830,26 +831,26 @@ __printf(3, 4)
int en_print(const char *level, const struct mlx4_en_priv *priv,
const char *format, ...);
-#define en_dbg(mlevel, priv, format, arg...) \
-do { \
- if (NETIF_MSG_##mlevel & priv->msg_enable) \
- en_print(KERN_DEBUG, priv, format, ##arg); \
+#define en_dbg(mlevel, priv, format, ...) \
+do { \
+ if (NETIF_MSG_##mlevel & (priv)->msg_enable) \
+ en_print(KERN_DEBUG, priv, format, ##__VA_ARGS__); \
} while (0)
-#define en_warn(priv, format, arg...) \
- en_print(KERN_WARNING, priv, format, ##arg)
-#define en_err(priv, format, arg...) \
- en_print(KERN_ERR, priv, format, ##arg)
-#define en_info(priv, format, arg...) \
- en_print(KERN_INFO, priv, format, ## arg)
-
-#define mlx4_err(mdev, format, arg...) \
- pr_err("%s %s: " format, DRV_NAME, \
- dev_name(&mdev->pdev->dev), ##arg)
-#define mlx4_info(mdev, format, arg...) \
- pr_info("%s %s: " format, DRV_NAME, \
- dev_name(&mdev->pdev->dev), ##arg)
-#define mlx4_warn(mdev, format, arg...) \
- pr_warning("%s %s: " format, DRV_NAME, \
- dev_name(&mdev->pdev->dev), ##arg)
+#define en_warn(priv, format, ...) \
+ en_print(KERN_WARNING, priv, format, ##__VA_ARGS__)
+#define en_err(priv, format, ...) \
+ en_print(KERN_ERR, priv, format, ##__VA_ARGS__)
+#define en_info(priv, format, ...) \
+ en_print(KERN_INFO, priv, format, ##__VA_ARGS__)
+
+#define mlx4_err(mdev, format, ...) \
+ pr_err(DRV_NAME " %s: " format, \
+ dev_name(&(mdev)->pdev->dev), ##__VA_ARGS__)
+#define mlx4_info(mdev, format, ...) \
+ pr_info(DRV_NAME " %s: " format, \
+ dev_name(&(mdev)->pdev->dev), ##__VA_ARGS__)
+#define mlx4_warn(mdev, format, ...) \
+ pr_warn(DRV_NAME " %s: " format, \
+ dev_name(&(mdev)->pdev->dev), ##__VA_ARGS__)
#endif
diff --git a/drivers/net/ethernet/mellanox/mlx4/mr.c b/drivers/net/ethernet/mellanox/mlx4/mr.c
index 4c71dafad217..2839abb878a6 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mr.c
+++ b/drivers/net/ethernet/mellanox/mlx4/mr.c
@@ -250,8 +250,8 @@ static void mlx4_free_mtt_range(struct mlx4_dev *dev, u32 offset, int order)
MLX4_CMD_TIME_CLASS_A,
MLX4_CMD_WRAPPED);
if (err)
- mlx4_warn(dev, "Failed to free mtt range at:"
- "%d order:%d\n", offset, order);
+ mlx4_warn(dev, "Failed to free mtt range at:%d order:%d\n",
+ offset, order);
return;
}
__mlx4_free_mtt_range(dev, offset, order);
@@ -436,8 +436,8 @@ static int mlx4_mr_free_reserved(struct mlx4_dev *dev, struct mlx4_mr *mr)
key_to_hw_index(mr->key) &
(dev->caps.num_mpts - 1));
if (err) {
- mlx4_warn(dev, "HW2SW_MPT failed (%d),", err);
- mlx4_warn(dev, "MR has MWs bound to it.\n");
+ mlx4_warn(dev, "HW2SW_MPT failed (%d), MR has MWs bound to it\n",
+ err);
return err;
}
@@ -774,7 +774,7 @@ int mlx4_init_mr_table(struct mlx4_dev *dev)
mlx4_alloc_mtt_range(dev,
fls(dev->caps.reserved_mtts - 1));
if (priv->reserved_mtts < 0) {
- mlx4_warn(dev, "MTT table of order %u is too small.\n",
+ mlx4_warn(dev, "MTT table of order %u is too small\n",
mr_table->mtt_buddy.max_order);
err = -ENOMEM;
goto err_reserve_mtts;
@@ -955,8 +955,7 @@ void mlx4_fmr_unmap(struct mlx4_dev *dev, struct mlx4_fmr *fmr,
mailbox = mlx4_alloc_cmd_mailbox(dev);
if (IS_ERR(mailbox)) {
err = PTR_ERR(mailbox);
- printk(KERN_WARNING "mlx4_ib: mlx4_alloc_cmd_mailbox"
- " failed (%d)\n", err);
+ pr_warn("mlx4_ib: mlx4_alloc_cmd_mailbox failed (%d)\n", err);
return;
}
@@ -965,8 +964,7 @@ void mlx4_fmr_unmap(struct mlx4_dev *dev, struct mlx4_fmr *fmr,
(dev->caps.num_mpts - 1));
mlx4_free_cmd_mailbox(dev, mailbox);
if (err) {
- printk(KERN_WARNING "mlx4_ib: mlx4_HW2SW_MPT failed (%d)\n",
- err);
+ pr_warn("mlx4_ib: mlx4_HW2SW_MPT failed (%d)\n", err);
return;
}
fmr->mr.enabled = MLX4_MPT_EN_SW;
diff --git a/drivers/net/ethernet/mellanox/mlx4/port.c b/drivers/net/ethernet/mellanox/mlx4/port.c
index 5ec6f203c6e6..7ab97174886d 100644
--- a/drivers/net/ethernet/mellanox/mlx4/port.c
+++ b/drivers/net/ethernet/mellanox/mlx4/port.c
@@ -254,8 +254,8 @@ void __mlx4_unregister_mac(struct mlx4_dev *dev, u8 port, u64 mac)
if (validate_index(dev, table, index))
goto out;
if (--table->refs[index]) {
- mlx4_dbg(dev, "Have more references for index %d,"
- "no need to modify mac table\n", index);
+ mlx4_dbg(dev, "Have more references for index %d, no need to modify mac table\n",
+ index);
goto out;
}
@@ -453,9 +453,8 @@ void __mlx4_unregister_vlan(struct mlx4_dev *dev, u8 port, u16 vlan)
}
if (--table->refs[index]) {
- mlx4_dbg(dev, "Have %d more references for index %d,"
- "no need to modify vlan table\n", table->refs[index],
- index);
+ mlx4_dbg(dev, "Have %d more references for index %d, no need to modify vlan table\n",
+ table->refs[index], index);
goto out;
}
table->entries[index] = 0;
@@ -796,8 +795,7 @@ static int mlx4_common_set_port(struct mlx4_dev *dev, int slave, u32 in_mod,
if (!memcmp(gid_entry_mbox->raw, gid_entry_tbl->raw,
sizeof(gid_entry_tbl->raw))) {
/* found duplicate */
- mlx4_warn(dev, "requested gid entry for slave:%d "
- "is a duplicate of gid at index %d\n",
+ mlx4_warn(dev, "requested gid entry for slave:%d is a duplicate of gid at index %d\n",
slave, i);
mutex_unlock(&(priv->port[port].gid_table.mutex));
return -EINVAL;
diff --git a/drivers/net/ethernet/mellanox/mlx4/profile.c b/drivers/net/ethernet/mellanox/mlx4/profile.c
index 8e0c3cc2a1ec..14089d9e1667 100644
--- a/drivers/net/ethernet/mellanox/mlx4/profile.c
+++ b/drivers/net/ethernet/mellanox/mlx4/profile.c
@@ -164,18 +164,17 @@ u64 mlx4_make_profile(struct mlx4_dev *dev,
}
if (total_size > dev_cap->max_icm_sz) {
- mlx4_err(dev, "Profile requires 0x%llx bytes; "
- "won't fit in 0x%llx bytes of context memory.\n",
- (unsigned long long) total_size,
- (unsigned long long) dev_cap->max_icm_sz);
+ mlx4_err(dev, "Profile requires 0x%llx bytes; won't fit in 0x%llx bytes of context memory\n",
+ (unsigned long long) total_size,
+ (unsigned long long) dev_cap->max_icm_sz);
kfree(profile);
return -ENOMEM;
}
if (profile[i].size)
- mlx4_dbg(dev, " profile[%2d] (%6s): 2^%02d entries @ 0x%10llx, "
- "size 0x%10llx\n",
- i, res_name[profile[i].type], profile[i].log_num,
+ mlx4_dbg(dev, " profile[%2d] (%6s): 2^%02d entries @ 0x%10llx, size 0x%10llx\n",
+ i, res_name[profile[i].type],
+ profile[i].log_num,
(unsigned long long) profile[i].start,
(unsigned long long) profile[i].size);
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/qp.c b/drivers/net/ethernet/mellanox/mlx4/qp.c
index 40af61947925..0dc31d85fc3b 100644
--- a/drivers/net/ethernet/mellanox/mlx4/qp.c
+++ b/drivers/net/ethernet/mellanox/mlx4/qp.c
@@ -264,8 +264,8 @@ void mlx4_qp_release_range(struct mlx4_dev *dev, int base_qpn, int cnt)
MLX4_CMD_FREE_RES,
MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
if (err) {
- mlx4_warn(dev, "Failed to release qp range"
- " base:%d cnt:%d\n", base_qpn, cnt);
+ mlx4_warn(dev, "Failed to release qp range base:%d cnt:%d\n",
+ base_qpn, cnt);
}
} else
__mlx4_qp_release_range(dev, base_qpn, cnt);
@@ -612,8 +612,7 @@ int mlx4_qp_to_ready(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
err = mlx4_qp_modify(dev, mtt, states[i], states[i + 1],
context, 0, 0, qp);
if (err) {
- mlx4_err(dev, "Failed to bring QP to state: "
- "%d with error: %d\n",
+ mlx4_err(dev, "Failed to bring QP to state: %d with error: %d\n",
states[i + 1], err);
return err;
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/reset.c b/drivers/net/ethernet/mellanox/mlx4/reset.c
index dd1b5093d8b1..ea1c6d092145 100644
--- a/drivers/net/ethernet/mellanox/mlx4/reset.c
+++ b/drivers/net/ethernet/mellanox/mlx4/reset.c
@@ -72,8 +72,7 @@ int mlx4_reset(struct mlx4_dev *dev)
hca_header = kmalloc(256, GFP_KERNEL);
if (!hca_header) {
err = -ENOMEM;
- mlx4_err(dev, "Couldn't allocate memory to save HCA "
- "PCI header, aborting.\n");
+ mlx4_err(dev, "Couldn't allocate memory to save HCA PCI header, aborting\n");
goto out;
}
@@ -84,8 +83,7 @@ int mlx4_reset(struct mlx4_dev *dev)
continue;
if (pci_read_config_dword(dev->pdev, i * 4, hca_header + i)) {
err = -ENODEV;
- mlx4_err(dev, "Couldn't save HCA "
- "PCI header, aborting.\n");
+ mlx4_err(dev, "Couldn't save HCA PCI header, aborting\n");
goto out;
}
}
@@ -94,7 +92,7 @@ int mlx4_reset(struct mlx4_dev *dev)
MLX4_RESET_SIZE);
if (!reset) {
err = -ENOMEM;
- mlx4_err(dev, "Couldn't map HCA reset register, aborting.\n");
+ mlx4_err(dev, "Couldn't map HCA reset register, aborting\n");
goto out;
}
@@ -133,8 +131,7 @@ int mlx4_reset(struct mlx4_dev *dev)
if (vendor == 0xffff) {
err = -ENODEV;
- mlx4_err(dev, "PCI device did not come back after reset, "
- "aborting.\n");
+ mlx4_err(dev, "PCI device did not come back after reset, aborting\n");
goto out;
}
@@ -144,16 +141,14 @@ int mlx4_reset(struct mlx4_dev *dev)
if (pcie_capability_write_word(dev->pdev, PCI_EXP_DEVCTL,
devctl)) {
err = -ENODEV;
- mlx4_err(dev, "Couldn't restore HCA PCI Express "
- "Device Control register, aborting.\n");
+ mlx4_err(dev, "Couldn't restore HCA PCI Express Device Control register, aborting\n");
goto out;
}
linkctl = hca_header[(pcie_cap + PCI_EXP_LNKCTL) / 4];
if (pcie_capability_write_word(dev->pdev, PCI_EXP_LNKCTL,
linkctl)) {
err = -ENODEV;
- mlx4_err(dev, "Couldn't restore HCA PCI Express "
- "Link control register, aborting.\n");
+ mlx4_err(dev, "Couldn't restore HCA PCI Express Link control register, aborting\n");
goto out;
}
}
@@ -164,8 +159,8 @@ int mlx4_reset(struct mlx4_dev *dev)
if (pci_write_config_dword(dev->pdev, i * 4, hca_header[i])) {
err = -ENODEV;
- mlx4_err(dev, "Couldn't restore HCA reg %x, "
- "aborting.\n", i);
+ mlx4_err(dev, "Couldn't restore HCA reg %x, aborting\n",
+ i);
goto out;
}
}
@@ -173,8 +168,7 @@ int mlx4_reset(struct mlx4_dev *dev)
if (pci_write_config_dword(dev->pdev, PCI_COMMAND,
hca_header[PCI_COMMAND / 4])) {
err = -ENODEV;
- mlx4_err(dev, "Couldn't restore HCA COMMAND, "
- "aborting.\n");
+ mlx4_err(dev, "Couldn't restore HCA COMMAND, aborting\n");
goto out;
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
index 2ba3b7623960..0efc1368e5a8 100644
--- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
+++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
@@ -279,7 +279,7 @@ enum qp_transition {
};
/* For Debug uses */
-static const char *ResourceType(enum mlx4_resource rt)
+static const char *resource_str(enum mlx4_resource rt)
{
switch (rt) {
case RES_QP: return "RES_QP";
@@ -307,6 +307,7 @@ static inline int mlx4_grant_resource(struct mlx4_dev *dev, int slave,
&priv->mfunc.master.res_tracker.res_alloc[res_type];
int err = -EINVAL;
int allocated, free, reserved, guaranteed, from_free;
+ int from_rsvd;
if (slave > dev->num_vfs)
return -EINVAL;
@@ -321,11 +322,16 @@ static inline int mlx4_grant_resource(struct mlx4_dev *dev, int slave,
res_alloc->res_reserved;
guaranteed = res_alloc->guaranteed[slave];
- if (allocated + count > res_alloc->quota[slave])
+ if (allocated + count > res_alloc->quota[slave]) {
+ mlx4_warn(dev, "VF %d port %d res %s: quota exceeded, count %d alloc %d quota %d\n",
+ slave, port, resource_str(res_type), count,
+ allocated, res_alloc->quota[slave]);
goto out;
+ }
if (allocated + count <= guaranteed) {
err = 0;
+ from_rsvd = count;
} else {
/* portion may need to be obtained from free area */
if (guaranteed - allocated > 0)
@@ -333,8 +339,14 @@ static inline int mlx4_grant_resource(struct mlx4_dev *dev, int slave,
else
from_free = count;
- if (free - from_free > reserved)
+ from_rsvd = count - from_free;
+
+ if (free - from_free >= reserved)
err = 0;
+ else
+ mlx4_warn(dev, "VF %d port %d res %s: free pool empty, free %d from_free %d rsvd %d\n",
+ slave, port, resource_str(res_type), free,
+ from_free, reserved);
}
if (!err) {
@@ -342,9 +354,11 @@ static inline int mlx4_grant_resource(struct mlx4_dev *dev, int slave,
if (port > 0) {
res_alloc->allocated[(port - 1) * (dev->num_vfs + 1) + slave] += count;
res_alloc->res_port_free[port - 1] -= count;
+ res_alloc->res_port_rsvd[port - 1] -= from_rsvd;
} else {
res_alloc->allocated[slave] += count;
res_alloc->res_free -= count;
+ res_alloc->res_reserved -= from_rsvd;
}
}
@@ -360,17 +374,36 @@ static inline void mlx4_release_resource(struct mlx4_dev *dev, int slave,
struct mlx4_priv *priv = mlx4_priv(dev);
struct resource_allocator *res_alloc =
&priv->mfunc.master.res_tracker.res_alloc[res_type];
+ int allocated, guaranteed, from_rsvd;
if (slave > dev->num_vfs)
return;
spin_lock(&res_alloc->alloc_lock);
+
+ allocated = (port > 0) ?
+ res_alloc->allocated[(port - 1) * (dev->num_vfs + 1) + slave] :
+ res_alloc->allocated[slave];
+ guaranteed = res_alloc->guaranteed[slave];
+
+ if (allocated - count >= guaranteed) {
+ from_rsvd = 0;
+ } else {
+ /* portion may need to be returned to reserved area */
+ if (allocated - guaranteed > 0)
+ from_rsvd = count - (allocated - guaranteed);
+ else
+ from_rsvd = count;
+ }
+
if (port > 0) {
res_alloc->allocated[(port - 1) * (dev->num_vfs + 1) + slave] -= count;
res_alloc->res_port_free[port - 1] += count;
+ res_alloc->res_port_rsvd[port - 1] += from_rsvd;
} else {
res_alloc->allocated[slave] -= count;
res_alloc->res_free += count;
+ res_alloc->res_reserved += from_rsvd;
}
spin_unlock(&res_alloc->alloc_lock);
@@ -963,7 +996,7 @@ static struct res_common *alloc_tr(u64 id, enum mlx4_resource type, int slave,
ret = alloc_srq_tr(id);
break;
case RES_MAC:
- printk(KERN_ERR "implementation missing\n");
+ pr_err("implementation missing\n");
return NULL;
case RES_COUNTER:
ret = alloc_counter_tr(id);
@@ -1057,10 +1090,10 @@ static int remove_mtt_ok(struct res_mtt *res, int order)
{
if (res->com.state == RES_MTT_BUSY ||
atomic_read(&res->ref_count)) {
- printk(KERN_DEBUG "%s-%d: state %s, ref_count %d\n",
- __func__, __LINE__,
- mtt_states_str(res->com.state),
- atomic_read(&res->ref_count));
+ pr_devel("%s-%d: state %s, ref_count %d\n",
+ __func__, __LINE__,
+ mtt_states_str(res->com.state),
+ atomic_read(&res->ref_count));
return -EBUSY;
} else if (res->com.state != RES_MTT_ALLOCATED)
return -EPERM;
@@ -3897,7 +3930,7 @@ static int add_eth_header(struct mlx4_dev *dev, int slave,
}
}
if (!be_mac) {
- pr_err("Failed adding eth header to FS rule, Can't find matching MAC for port %d .\n",
+ pr_err("Failed adding eth header to FS rule, Can't find matching MAC for port %d\n",
port);
return -EINVAL;
}
@@ -3994,7 +4027,7 @@ int mlx4_QP_FLOW_STEERING_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
qpn = be32_to_cpu(ctrl->qpn) & 0xffffff;
err = get_res(dev, slave, qpn, RES_QP, &rqp);
if (err) {
- pr_err("Steering rule with qpn 0x%x rejected.\n", qpn);
+ pr_err("Steering rule with qpn 0x%x rejected\n", qpn);
return err;
}
rule_header = (struct _rule_hw *)(ctrl + 1);
@@ -4012,7 +4045,7 @@ int mlx4_QP_FLOW_STEERING_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
case MLX4_NET_TRANS_RULE_ID_IPV4:
case MLX4_NET_TRANS_RULE_ID_TCP:
case MLX4_NET_TRANS_RULE_ID_UDP:
- pr_warn("Can't attach FS rule without L2 headers, adding L2 header.\n");
+ pr_warn("Can't attach FS rule without L2 headers, adding L2 header\n");
if (add_eth_header(dev, slave, inbox, rlist, header_id)) {
err = -EINVAL;
goto err_put;
@@ -4021,7 +4054,7 @@ int mlx4_QP_FLOW_STEERING_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
sizeof(struct mlx4_net_trans_rule_hw_eth) >> 2;
break;
default:
- pr_err("Corrupted mailbox.\n");
+ pr_err("Corrupted mailbox\n");
err = -EINVAL;
goto err_put;
}
@@ -4035,7 +4068,7 @@ int mlx4_QP_FLOW_STEERING_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
err = add_res_range(dev, slave, vhcr->out_param, 1, RES_FS_RULE, qpn);
if (err) {
- mlx4_err(dev, "Fail to add flow steering resources.\n ");
+ mlx4_err(dev, "Fail to add flow steering resources\n");
/* detach rule*/
mlx4_cmd(dev, vhcr->out_param, 0, 0,
MLX4_QP_FLOW_STEERING_DETACH, MLX4_CMD_TIME_CLASS_A,
@@ -4073,7 +4106,7 @@ int mlx4_QP_FLOW_STEERING_DETACH_wrapper(struct mlx4_dev *dev, int slave,
err = rem_res_range(dev, slave, vhcr->in_param, 1, RES_FS_RULE, 0);
if (err) {
- mlx4_err(dev, "Fail to remove flow steering resources.\n ");
+ mlx4_err(dev, "Fail to remove flow steering resources\n");
goto out;
}
@@ -4151,7 +4184,7 @@ static int _move_all_busy(struct mlx4_dev *dev, int slave,
if (print)
mlx4_dbg(dev,
"%s id 0x%llx is busy\n",
- ResourceType(type),
+ resource_str(type),
r->res_id);
++busy;
} else {
@@ -4202,8 +4235,8 @@ static void rem_slave_qps(struct mlx4_dev *dev, int slave)
err = move_all_busy(dev, slave, RES_QP);
if (err)
- mlx4_warn(dev, "rem_slave_qps: Could not move all qps to busy"
- "for slave %d\n", slave);
+ mlx4_warn(dev, "rem_slave_qps: Could not move all qps to busy for slave %d\n",
+ slave);
spin_lock_irq(mlx4_tlock(dev));
list_for_each_entry_safe(qp, tmp, qp_list, com.list) {
@@ -4241,10 +4274,8 @@ static void rem_slave_qps(struct mlx4_dev *dev, int slave)
MLX4_CMD_TIME_CLASS_A,
MLX4_CMD_NATIVE);
if (err)
- mlx4_dbg(dev, "rem_slave_qps: failed"
- " to move slave %d qpn %d to"
- " reset\n", slave,
- qp->local_qpn);
+ mlx4_dbg(dev, "rem_slave_qps: failed to move slave %d qpn %d to reset\n",
+ slave, qp->local_qpn);
atomic_dec(&qp->rcq->ref_count);
atomic_dec(&qp->scq->ref_count);
atomic_dec(&qp->mtt->ref_count);
@@ -4278,8 +4309,8 @@ static void rem_slave_srqs(struct mlx4_dev *dev, int slave)
err = move_all_busy(dev, slave, RES_SRQ);
if (err)
- mlx4_warn(dev, "rem_slave_srqs: Could not move all srqs to "
- "busy for slave %d\n", slave);
+ mlx4_warn(dev, "rem_slave_srqs: Could not move all srqs - too busy for slave %d\n",
+ slave);
spin_lock_irq(mlx4_tlock(dev));
list_for_each_entry_safe(srq, tmp, srq_list, com.list) {
@@ -4309,9 +4340,7 @@ static void rem_slave_srqs(struct mlx4_dev *dev, int slave)
MLX4_CMD_TIME_CLASS_A,
MLX4_CMD_NATIVE);
if (err)
- mlx4_dbg(dev, "rem_slave_srqs: failed"
- " to move slave %d srq %d to"
- " SW ownership\n",
+ mlx4_dbg(dev, "rem_slave_srqs: failed to move slave %d srq %d to SW ownership\n",
slave, srqn);
atomic_dec(&srq->mtt->ref_count);
@@ -4346,8 +4375,8 @@ static void rem_slave_cqs(struct mlx4_dev *dev, int slave)
err = move_all_busy(dev, slave, RES_CQ);
if (err)
- mlx4_warn(dev, "rem_slave_cqs: Could not move all cqs to "
- "busy for slave %d\n", slave);
+ mlx4_warn(dev, "rem_slave_cqs: Could not move all cqs - too busy for slave %d\n",
+ slave);
spin_lock_irq(mlx4_tlock(dev));
list_for_each_entry_safe(cq, tmp, cq_list, com.list) {
@@ -4377,9 +4406,7 @@ static void rem_slave_cqs(struct mlx4_dev *dev, int slave)
MLX4_CMD_TIME_CLASS_A,
MLX4_CMD_NATIVE);
if (err)
- mlx4_dbg(dev, "rem_slave_cqs: failed"
- " to move slave %d cq %d to"
- " SW ownership\n",
+ mlx4_dbg(dev, "rem_slave_cqs: failed to move slave %d cq %d to SW ownership\n",
slave, cqn);
atomic_dec(&cq->mtt->ref_count);
state = RES_CQ_ALLOCATED;
@@ -4411,8 +4438,8 @@ static void rem_slave_mrs(struct mlx4_dev *dev, int slave)
err = move_all_busy(dev, slave, RES_MPT);
if (err)
- mlx4_warn(dev, "rem_slave_mrs: Could not move all mpts to "
- "busy for slave %d\n", slave);
+ mlx4_warn(dev, "rem_slave_mrs: Could not move all mpts - too busy for slave %d\n",
+ slave);
spin_lock_irq(mlx4_tlock(dev));
list_for_each_entry_safe(mpt, tmp, mpt_list, com.list) {
@@ -4447,9 +4474,7 @@ static void rem_slave_mrs(struct mlx4_dev *dev, int slave)
MLX4_CMD_TIME_CLASS_A,
MLX4_CMD_NATIVE);
if (err)
- mlx4_dbg(dev, "rem_slave_mrs: failed"
- " to move slave %d mpt %d to"
- " SW ownership\n",
+ mlx4_dbg(dev, "rem_slave_mrs: failed to move slave %d mpt %d to SW ownership\n",
slave, mptn);
if (mpt->mtt)
atomic_dec(&mpt->mtt->ref_count);
@@ -4481,8 +4506,8 @@ static void rem_slave_mtts(struct mlx4_dev *dev, int slave)
err = move_all_busy(dev, slave, RES_MTT);
if (err)
- mlx4_warn(dev, "rem_slave_mtts: Could not move all mtts to "
- "busy for slave %d\n", slave);
+ mlx4_warn(dev, "rem_slave_mtts: Could not move all mtts - too busy for slave %d\n",
+ slave);
spin_lock_irq(mlx4_tlock(dev));
list_for_each_entry_safe(mtt, tmp, mtt_list, com.list) {
@@ -4584,8 +4609,8 @@ static void rem_slave_eqs(struct mlx4_dev *dev, int slave)
err = move_all_busy(dev, slave, RES_EQ);
if (err)
- mlx4_warn(dev, "rem_slave_eqs: Could not move all eqs to "
- "busy for slave %d\n", slave);
+ mlx4_warn(dev, "rem_slave_eqs: Could not move all eqs - too busy for slave %d\n",
+ slave);
spin_lock_irq(mlx4_tlock(dev));
list_for_each_entry_safe(eq, tmp, eq_list, com.list) {
@@ -4617,9 +4642,8 @@ static void rem_slave_eqs(struct mlx4_dev *dev, int slave)
MLX4_CMD_TIME_CLASS_A,
MLX4_CMD_NATIVE);
if (err)
- mlx4_dbg(dev, "rem_slave_eqs: failed"
- " to move slave %d eqs %d to"
- " SW ownership\n", slave, eqn);
+ mlx4_dbg(dev, "rem_slave_eqs: failed to move slave %d eqs %d to SW ownership\n",
+ slave, eqn);
mlx4_free_cmd_mailbox(dev, mailbox);
atomic_dec(&eq->mtt->ref_count);
state = RES_EQ_RESERVED;
@@ -4648,8 +4672,8 @@ static void rem_slave_counters(struct mlx4_dev *dev, int slave)
err = move_all_busy(dev, slave, RES_COUNTER);
if (err)
- mlx4_warn(dev, "rem_slave_counters: Could not move all counters to "
- "busy for slave %d\n", slave);
+ mlx4_warn(dev, "rem_slave_counters: Could not move all counters - too busy for slave %d\n",
+ slave);
spin_lock_irq(mlx4_tlock(dev));
list_for_each_entry_safe(counter, tmp, counter_list, com.list) {
@@ -4679,8 +4703,8 @@ static void rem_slave_xrcdns(struct mlx4_dev *dev, int slave)
err = move_all_busy(dev, slave, RES_XRCD);
if (err)
- mlx4_warn(dev, "rem_slave_xrcdns: Could not move all xrcdns to "
- "busy for slave %d\n", slave);
+ mlx4_warn(dev, "rem_slave_xrcdns: Could not move all xrcdns - too busy for slave %d\n",
+ slave);
spin_lock_irq(mlx4_tlock(dev));
list_for_each_entry_safe(xrcd, tmp, xrcdn_list, com.list) {
@@ -4825,10 +4849,8 @@ void mlx4_vf_immed_vlan_work_handler(struct work_struct *_work)
0, MLX4_CMD_UPDATE_QP,
MLX4_CMD_TIME_CLASS_C, MLX4_CMD_NATIVE);
if (err) {
- mlx4_info(dev, "UPDATE_QP failed for slave %d, "
- "port %d, qpn %d (%d)\n",
- work->slave, port, qp->local_qpn,
- err);
+ mlx4_info(dev, "UPDATE_QP failed for slave %d, port %d, qpn %d (%d)\n",
+ work->slave, port, qp->local_qpn, err);
errors++;
}
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
index 405c4fbcd0ad..87d1b018a9c3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
@@ -620,8 +620,8 @@ static int wait_func(struct mlx5_core_dev *dev, struct mlx5_cmd_work_ent *ent)
mlx5_command_str(msg_to_opcode(ent->in)),
msg_to_opcode(ent->in));
}
- mlx5_core_dbg(dev, "err %d, delivery status %s(%d)\n", err,
- deliv_status_to_str(ent->status), ent->status);
+ mlx5_core_dbg(dev, "err %d, delivery status %s(%d)\n",
+ err, deliv_status_to_str(ent->status), ent->status);
return err;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
index 64a61b286b2c..7f39ebcd6ad0 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
@@ -208,7 +208,8 @@ static int mlx5_eq_int(struct mlx5_core_dev *dev, struct mlx5_eq *eq)
*/
rmb();
- mlx5_core_dbg(eq->dev, "eqn %d, eqe type %s\n", eq->eqn, eqe_type_str(eqe->type));
+ mlx5_core_dbg(eq->dev, "eqn %d, eqe type %s\n",
+ eq->eqn, eqe_type_str(eqe->type));
switch (eqe->type) {
case MLX5_EVENT_TYPE_COMP:
cqn = be32_to_cpu(eqe->data.comp.cqn) & 0xffffff;
@@ -270,14 +271,16 @@ static int mlx5_eq_int(struct mlx5_core_dev *dev, struct mlx5_eq *eq)
u16 func_id = be16_to_cpu(eqe->data.req_pages.func_id);
s32 npages = be32_to_cpu(eqe->data.req_pages.num_pages);
- mlx5_core_dbg(dev, "page request for func 0x%x, napges %d\n", func_id, npages);
+ mlx5_core_dbg(dev, "page request for func 0x%x, npages %d\n",
+ func_id, npages);
mlx5_core_req_pages_handler(dev, func_id, npages);
}
break;
default:
- mlx5_core_warn(dev, "Unhandled event 0x%x on EQ 0x%x\n", eqe->type, eq->eqn);
+ mlx5_core_warn(dev, "Unhandled event 0x%x on EQ 0x%x\n",
+ eqe->type, eq->eqn);
break;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index c3eee5f70051..ee24f132e319 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -66,10 +66,10 @@ static int set_dma_caps(struct pci_dev *pdev)
err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
if (err) {
- dev_warn(&pdev->dev, "Warning: couldn't set 64-bit PCI DMA mask.\n");
+ dev_warn(&pdev->dev, "Warning: couldn't set 64-bit PCI DMA mask\n");
err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
if (err) {
- dev_err(&pdev->dev, "Can't set PCI DMA mask, aborting.\n");
+ dev_err(&pdev->dev, "Can't set PCI DMA mask, aborting\n");
return err;
}
}
@@ -77,11 +77,11 @@ static int set_dma_caps(struct pci_dev *pdev)
err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
if (err) {
dev_warn(&pdev->dev,
- "Warning: couldn't set 64-bit consistent PCI DMA mask.\n");
+ "Warning: couldn't set 64-bit consistent PCI DMA mask\n");
err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
if (err) {
dev_err(&pdev->dev,
- "Can't set consistent PCI DMA mask, aborting.\n");
+ "Can't set consistent PCI DMA mask, aborting\n");
return err;
}
}
@@ -95,7 +95,7 @@ static int request_bar(struct pci_dev *pdev)
int err = 0;
if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
- dev_err(&pdev->dev, "Missing registers BAR, aborting.\n");
+ dev_err(&pdev->dev, "Missing registers BAR, aborting\n");
return -ENODEV;
}
@@ -319,13 +319,13 @@ int mlx5_dev_init(struct mlx5_core_dev *dev, struct pci_dev *pdev)
err = pci_enable_device(pdev);
if (err) {
- dev_err(&pdev->dev, "Cannot enable PCI device, aborting.\n");
+ dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
goto err_dbg;
}
err = request_bar(pdev);
if (err) {
- dev_err(&pdev->dev, "error requesting BARs, aborting.\n");
+ dev_err(&pdev->dev, "error requesting BARs, aborting\n");
goto err_disable;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
index 68b74e1ae1b0..f0c9f9a7a361 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
@@ -39,24 +39,26 @@
extern int mlx5_core_debug_mask;
-#define mlx5_core_dbg(dev, format, arg...) \
-pr_debug("%s:%s:%d:(pid %d): " format, (dev)->priv.name, __func__, __LINE__, \
- current->pid, ##arg)
+#define mlx5_core_dbg(dev, format, ...) \
+ pr_debug("%s:%s:%d:(pid %d): " format, \
+ (dev)->priv.name, __func__, __LINE__, current->pid, \
+ ##__VA_ARGS__)
-#define mlx5_core_dbg_mask(dev, mask, format, arg...) \
-do { \
- if ((mask) & mlx5_core_debug_mask) \
- pr_debug("%s:%s:%d:(pid %d): " format, (dev)->priv.name, \
- __func__, __LINE__, current->pid, ##arg); \
+#define mlx5_core_dbg_mask(dev, mask, format, ...) \
+do { \
+ if ((mask) & mlx5_core_debug_mask) \
+ mlx5_core_dbg(dev, format, ##__VA_ARGS__); \
} while (0)
-#define mlx5_core_err(dev, format, arg...) \
-pr_err("%s:%s:%d:(pid %d): " format, (dev)->priv.name, __func__, __LINE__, \
- current->pid, ##arg)
+#define mlx5_core_err(dev, format, ...) \
+ pr_err("%s:%s:%d:(pid %d): " format, \
+ (dev)->priv.name, __func__, __LINE__, current->pid, \
+ ##__VA_ARGS__)
-#define mlx5_core_warn(dev, format, arg...) \
-pr_warn("%s:%s:%d:(pid %d): " format, (dev)->priv.name, __func__, __LINE__, \
- current->pid, ##arg)
+#define mlx5_core_warn(dev, format, ...) \
+ pr_warn("%s:%s:%d:(pid %d): " format, \
+ (dev)->priv.name, __func__, __LINE__, current->pid, \
+ ##__VA_ARGS__)
enum {
MLX5_CMD_DATA, /* print command payload only */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mr.c b/drivers/net/ethernet/mellanox/mlx5/core/mr.c
index ac52a0fe2d3a..ba0401d4af50 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/mr.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/mr.c
@@ -73,7 +73,7 @@ int mlx5_core_create_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mr *mr,
}
if (err) {
- mlx5_core_dbg(dev, "cmd exec faile %d\n", err);
+ mlx5_core_dbg(dev, "cmd exec failed %d\n", err);
return err;
}
@@ -195,7 +195,8 @@ int mlx5_core_create_psv(struct mlx5_core_dev *dev, u32 pdn,
}
if (out.hdr.status) {
- mlx5_core_err(dev, "create_psv bad status %d\n", out.hdr.status);
+ mlx5_core_err(dev, "create_psv bad status %d\n",
+ out.hdr.status);
return mlx5_cmd_status_to_err(&out.hdr);
}
@@ -224,7 +225,8 @@ int mlx5_core_destroy_psv(struct mlx5_core_dev *dev, int psv_num)
}
if (out.hdr.status) {
- mlx5_core_err(dev, "destroy_psv bad status %d\n", out.hdr.status);
+ mlx5_core_err(dev, "destroy_psv bad status %d\n",
+ out.hdr.status);
err = mlx5_cmd_status_to_err(&out.hdr);
goto out;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
index d59790a82bc3..c2a953ef0e67 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
@@ -311,7 +311,8 @@ retry:
in->num_entries = cpu_to_be32(npages);
err = mlx5_cmd_exec(dev, in, inlen, &out, sizeof(out));
if (err) {
- mlx5_core_warn(dev, "func_id 0x%x, npages %d, err %d\n", func_id, npages, err);
+ mlx5_core_warn(dev, "func_id 0x%x, npages %d, err %d\n",
+ func_id, npages, err);
goto out_alloc;
}
dev->priv.fw_pages += npages;
@@ -319,7 +320,8 @@ retry:
if (out.hdr.status) {
err = mlx5_cmd_status_to_err(&out.hdr);
if (err) {
- mlx5_core_warn(dev, "func_id 0x%x, npages %d, status %d\n", func_id, npages, out.hdr.status);
+ mlx5_core_warn(dev, "func_id 0x%x, npages %d, status %d\n",
+ func_id, npages, out.hdr.status);
goto out_alloc;
}
}
@@ -378,7 +380,7 @@ static int reclaim_pages(struct mlx5_core_dev *dev, u32 func_id, int npages,
mlx5_core_dbg(dev, "npages %d, outlen %d\n", npages, outlen);
err = mlx5_cmd_exec(dev, &in, sizeof(in), out, outlen);
if (err) {
- mlx5_core_err(dev, "failed recliaming pages\n");
+ mlx5_core_err(dev, "failed reclaiming pages\n");
goto out_free;
}
dev->priv.fw_pages -= npages;
@@ -414,8 +416,8 @@ static void pages_work_handler(struct work_struct *work)
err = give_pages(dev, req->func_id, req->npages, 1);
if (err)
- mlx5_core_warn(dev, "%s fail %d\n", req->npages < 0 ?
- "reclaim" : "give", err);
+ mlx5_core_warn(dev, "%s fail %d\n",
+ req->npages < 0 ? "reclaim" : "give", err);
kfree(req);
}
@@ -487,7 +489,8 @@ int mlx5_reclaim_startup_pages(struct mlx5_core_dev *dev)
optimal_reclaimed_pages(),
&nclaimed);
if (err) {
- mlx5_core_warn(dev, "failed reclaiming pages (%d)\n", err);
+ mlx5_core_warn(dev, "failed reclaiming pages (%d)\n",
+ err);
return err;
}
if (nclaimed)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/qp.c b/drivers/net/ethernet/mellanox/mlx5/core/qp.c
index 510576213dd0..8145b4668229 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/qp.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/qp.c
@@ -79,7 +79,7 @@ int mlx5_core_create_qp(struct mlx5_core_dev *dev,
err = mlx5_cmd_exec(dev, in, inlen, &out, sizeof(out));
if (err) {
- mlx5_core_warn(dev, "ret %d", err);
+ mlx5_core_warn(dev, "ret %d\n", err);
return err;
}
@@ -96,7 +96,7 @@ int mlx5_core_create_qp(struct mlx5_core_dev *dev,
err = radix_tree_insert(&table->tree, qp->qpn, qp);
spin_unlock_irq(&table->lock);
if (err) {
- mlx5_core_warn(dev, "err %d", err);
+ mlx5_core_warn(dev, "err %d\n", err);
goto err_cmd;
}
diff --git a/drivers/net/ethernet/micrel/ks8695net.c b/drivers/net/ethernet/micrel/ks8695net.c
index 16435b3cfa9f..6c7c78baedca 100644
--- a/drivers/net/ethernet/micrel/ks8695net.c
+++ b/drivers/net/ethernet/micrel/ks8695net.c
@@ -1504,15 +1504,15 @@ ks8695_probe(struct platform_device *pdev)
if (ksp->phyiface_regs && ksp->link_irq == -1) {
ks8695_init_switch(ksp);
ksp->dtype = KS8695_DTYPE_LAN;
- SET_ETHTOOL_OPS(ndev, &ks8695_ethtool_ops);
+ ndev->ethtool_ops = &ks8695_ethtool_ops;
} else if (ksp->phyiface_regs && ksp->link_irq != -1) {
ks8695_init_wan_phy(ksp);
ksp->dtype = KS8695_DTYPE_WAN;
- SET_ETHTOOL_OPS(ndev, &ks8695_wan_ethtool_ops);
+ ndev->ethtool_ops = &ks8695_wan_ethtool_ops;
} else {
/* No initialisation since HPNA does not have a PHY */
ksp->dtype = KS8695_DTYPE_HPNA;
- SET_ETHTOOL_OPS(ndev, &ks8695_ethtool_ops);
+ ndev->ethtool_ops = &ks8695_ethtool_ops;
}
/* And bring up the net_device with the net core */
diff --git a/drivers/net/ethernet/micrel/ks8851.c b/drivers/net/ethernet/micrel/ks8851.c
index e0c92e0e5e1d..66d4ab703f45 100644
--- a/drivers/net/ethernet/micrel/ks8851.c
+++ b/drivers/net/ethernet/micrel/ks8851.c
@@ -26,6 +26,8 @@
#include <linux/regulator/consumer.h>
#include <linux/spi/spi.h>
+#include <linux/gpio.h>
+#include <linux/of_gpio.h>
#include "ks8851.h"
@@ -85,6 +87,8 @@ union ks8851_tx_hdr {
* @eeprom_size: Companion eeprom size in Bytes, 0 if no eeprom
* @eeprom: 93CX6 EEPROM state for accessing on-board EEPROM.
* @vdd_reg: Optional regulator supplying the chip
+ * @vdd_io: Optional digital power supply for IO
+ * @gpio: Optional reset_n gpio
*
* The @lock ensures that the chip is protected when certain operations are
* in progress. When the read or write packet transfer is in progress, most
@@ -133,6 +137,8 @@ struct ks8851_net {
struct eeprom_93cx6 eeprom;
struct regulator *vdd_reg;
+ struct regulator *vdd_io;
+ int gpio;
};
static int msg_enable;
@@ -1404,6 +1410,7 @@ static int ks8851_probe(struct spi_device *spi)
struct ks8851_net *ks;
int ret;
unsigned cider;
+ int gpio;
ndev = alloc_etherdev(sizeof(struct ks8851_net));
if (!ndev)
@@ -1417,20 +1424,53 @@ static int ks8851_probe(struct spi_device *spi)
ks->spidev = spi;
ks->tx_space = 6144;
- ks->vdd_reg = regulator_get_optional(&spi->dev, "vdd");
- if (IS_ERR(ks->vdd_reg)) {
- ret = PTR_ERR(ks->vdd_reg);
- if (ret == -EPROBE_DEFER)
- goto err_reg;
- } else {
- ret = regulator_enable(ks->vdd_reg);
+ gpio = of_get_named_gpio_flags(spi->dev.of_node, "reset-gpios",
+ 0, NULL);
+ if (gpio == -EPROBE_DEFER) {
+ ret = gpio;
+ goto err_gpio;
+ }
+
+ ks->gpio = gpio;
+ if (gpio_is_valid(gpio)) {
+ ret = devm_gpio_request_one(&spi->dev, gpio,
+ GPIOF_OUT_INIT_LOW, "ks8851_rst_n");
if (ret) {
- dev_err(&spi->dev, "regulator enable fail: %d\n",
- ret);
- goto err_reg_en;
+ dev_err(&spi->dev, "reset gpio request failed\n");
+ goto err_gpio;
}
}
+ ks->vdd_io = devm_regulator_get(&spi->dev, "vdd-io");
+ if (IS_ERR(ks->vdd_io)) {
+ ret = PTR_ERR(ks->vdd_io);
+ goto err_reg_io;
+ }
+
+ ret = regulator_enable(ks->vdd_io);
+ if (ret) {
+ dev_err(&spi->dev, "regulator vdd_io enable fail: %d\n",
+ ret);
+ goto err_reg_io;
+ }
+
+ ks->vdd_reg = devm_regulator_get(&spi->dev, "vdd");
+ if (IS_ERR(ks->vdd_reg)) {
+ ret = PTR_ERR(ks->vdd_reg);
+ goto err_reg;
+ }
+
+ ret = regulator_enable(ks->vdd_reg);
+ if (ret) {
+ dev_err(&spi->dev, "regulator vdd enable fail: %d\n",
+ ret);
+ goto err_reg;
+ }
+
+ if (gpio_is_valid(gpio)) {
+ usleep_range(10000, 11000);
+ gpio_set_value(gpio, 1);
+ }
mutex_init(&ks->lock);
spin_lock_init(&ks->statelock);
@@ -1471,7 +1511,7 @@ static int ks8851_probe(struct spi_device *spi)
skb_queue_head_init(&ks->txq);
- SET_ETHTOOL_OPS(ndev, &ks8851_ethtool_ops);
+ ndev->ethtool_ops = &ks8851_ethtool_ops;
SET_NETDEV_DEV(ndev, &spi->dev);
spi_set_drvdata(spi, ks);
@@ -1527,13 +1567,14 @@ err_netdev:
free_irq(ndev->irq, ks);
err_irq:
+ if (gpio_is_valid(gpio))
+ gpio_set_value(gpio, 0);
err_id:
- if (!IS_ERR(ks->vdd_reg))
- regulator_disable(ks->vdd_reg);
-err_reg_en:
- if (!IS_ERR(ks->vdd_reg))
- regulator_put(ks->vdd_reg);
+ regulator_disable(ks->vdd_reg);
err_reg:
+ regulator_disable(ks->vdd_io);
+err_reg_io:
+err_gpio:
free_netdev(ndev);
return ret;
}
@@ -1547,18 +1588,24 @@ static int ks8851_remove(struct spi_device *spi)
unregister_netdev(priv->netdev);
free_irq(spi->irq, priv);
- if (!IS_ERR(priv->vdd_reg)) {
- regulator_disable(priv->vdd_reg);
- regulator_put(priv->vdd_reg);
- }
+ if (gpio_is_valid(priv->gpio))
+ gpio_set_value(priv->gpio, 0);
+ regulator_disable(priv->vdd_reg);
+ regulator_disable(priv->vdd_io);
free_netdev(priv->netdev);
return 0;
}
+static const struct of_device_id ks8851_match_table[] = {
+ { .compatible = "micrel,ks8851" },
+ { }
+};
+
static struct spi_driver ks8851_driver = {
.driver = {
.name = "ks8851",
+ .of_match_table = ks8851_match_table,
.owner = THIS_MODULE,
.pm = &ks8851_pm_ops,
},
diff --git a/drivers/net/ethernet/micrel/ksz884x.c b/drivers/net/ethernet/micrel/ksz884x.c
index 14ac0e2bc09f..064a48d0c368 100644
--- a/drivers/net/ethernet/micrel/ksz884x.c
+++ b/drivers/net/ethernet/micrel/ksz884x.c
@@ -4930,7 +4930,7 @@ static void netdev_tx_timeout(struct net_device *dev)
* Only reset the hardware if time between calls is long
* enough.
*/
- if (jiffies - last_reset <= dev->watchdog_timeo)
+ if (time_before_eq(jiffies, last_reset + dev->watchdog_timeo))
hw_priv = NULL;
}
@@ -7072,6 +7072,7 @@ static int pcidev_init(struct pci_dev *pdev, const struct pci_device_id *id)
dev = alloc_etherdev(sizeof(struct dev_priv));
if (!dev)
goto pcidev_init_reg_err;
+ SET_NETDEV_DEV(dev, &pdev->dev);
info->netdev[i] = dev;
priv = netdev_priv(dev);
@@ -7106,7 +7107,7 @@ static int pcidev_init(struct pci_dev *pdev, const struct pci_device_id *id)
}
dev->netdev_ops = &netdev_ops;
- SET_ETHTOOL_OPS(dev, &netdev_ethtool_ops);
+ dev->ethtool_ops = &netdev_ethtool_ops;
if (register_netdev(dev))
goto pcidev_init_reg_err;
port_set_power_saving(port, true);
diff --git a/drivers/net/ethernet/microchip/enc28j60.c b/drivers/net/ethernet/microchip/enc28j60.c
index c7b40aa21f22..b1b5f66b8b69 100644
--- a/drivers/net/ethernet/microchip/enc28j60.c
+++ b/drivers/net/ethernet/microchip/enc28j60.c
@@ -1593,7 +1593,7 @@ static int enc28j60_probe(struct spi_device *spi)
dev->irq = spi->irq;
dev->netdev_ops = &enc28j60_netdev_ops;
dev->watchdog_timeo = TX_TIMEOUT;
- SET_ETHTOOL_OPS(dev, &enc28j60_ethtool_ops);
+ dev->ethtool_ops = &enc28j60_ethtool_ops;
enc28j60_lowpower(priv, true);
diff --git a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
index 130f6b204efa..f3d5d79f1cd1 100644
--- a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
+++ b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
@@ -4112,7 +4112,7 @@ static int myri10ge_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
setup_timer(&mgp->watchdog_timer, myri10ge_watchdog_timer,
(unsigned long)mgp);
- SET_ETHTOOL_OPS(netdev, &myri10ge_ethtool_ops);
+ netdev->ethtool_ops = &myri10ge_ethtool_ops;
INIT_WORK(&mgp->watchdog_work, myri10ge_watchdog);
status = register_netdev(netdev);
if (status != 0) {
diff --git a/drivers/net/ethernet/natsemi/natsemi.c b/drivers/net/ethernet/natsemi/natsemi.c
index 64ec2a437f46..291fba8b9f07 100644
--- a/drivers/net/ethernet/natsemi/natsemi.c
+++ b/drivers/net/ethernet/natsemi/natsemi.c
@@ -927,7 +927,7 @@ static int natsemi_probe1(struct pci_dev *pdev, const struct pci_device_id *ent)
dev->netdev_ops = &natsemi_netdev_ops;
dev->watchdog_timeo = TX_TIMEOUT;
- SET_ETHTOOL_OPS(dev, &ethtool_ops);
+ dev->ethtool_ops = &ethtool_ops;
if (mtu)
dev->mtu = mtu;
diff --git a/drivers/net/ethernet/natsemi/ns83820.c b/drivers/net/ethernet/natsemi/ns83820.c
index dbccf1de49ec..19bb8244b9e3 100644
--- a/drivers/net/ethernet/natsemi/ns83820.c
+++ b/drivers/net/ethernet/natsemi/ns83820.c
@@ -2030,7 +2030,7 @@ static int ns83820_init_one(struct pci_dev *pci_dev,
pci_dev->subsystem_vendor, pci_dev->subsystem_device);
ndev->netdev_ops = &netdev_ops;
- SET_ETHTOOL_OPS(ndev, &ops);
+ ndev->ethtool_ops = &ops;
ndev->watchdog_timeo = 5 * HZ;
pci_set_drvdata(pci_dev, ndev);
diff --git a/drivers/net/ethernet/neterion/s2io.c b/drivers/net/ethernet/neterion/s2io.c
index a2844ff322c4..be587647c706 100644
--- a/drivers/net/ethernet/neterion/s2io.c
+++ b/drivers/net/ethernet/neterion/s2io.c
@@ -534,15 +534,6 @@ static inline void s2io_start_all_tx_queue(struct s2io_nic *sp)
netif_tx_start_all_queues(sp->dev);
}
-static inline void s2io_start_tx_queue(struct s2io_nic *sp, int fifo_no)
-{
- if (!sp->config.multiq)
- sp->mac_control.fifos[fifo_no].queue_state =
- FIFO_QUEUE_START;
-
- netif_tx_start_all_queues(sp->dev);
-}
-
static inline void s2io_wake_all_tx_queue(struct s2io_nic *sp)
{
if (!sp->config.multiq) {
@@ -5369,8 +5360,8 @@ static int s2io_ethtool_gset(struct net_device *dev, struct ethtool_cmd *info)
ethtool_cmd_speed_set(info, SPEED_10000);
info->duplex = DUPLEX_FULL;
} else {
- ethtool_cmd_speed_set(info, -1);
- info->duplex = -1;
+ ethtool_cmd_speed_set(info, SPEED_UNKNOWN);
+ info->duplex = DUPLEX_UNKNOWN;
}
info->autoneg = AUTONEG_DISABLE;
@@ -7919,7 +7910,7 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
/* Driver entry points */
dev->netdev_ops = &s2io_netdev_ops;
- SET_ETHTOOL_OPS(dev, &netdev_ethtool_ops);
+ dev->ethtool_ops = &netdev_ethtool_ops;
dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM |
NETIF_F_TSO | NETIF_F_TSO6 |
NETIF_F_RXCSUM | NETIF_F_LRO;
diff --git a/drivers/net/ethernet/neterion/vxge/vxge-config.c b/drivers/net/ethernet/neterion/vxge/vxge-config.c
index 089b713b9f7b..2bbd01fcb9b0 100644
--- a/drivers/net/ethernet/neterion/vxge/vxge-config.c
+++ b/drivers/net/ethernet/neterion/vxge/vxge-config.c
@@ -120,7 +120,6 @@ __vxge_hw_device_register_poll(void __iomem *reg, u64 mask, u32 max_millis)
{
u64 val64;
u32 i = 0;
- enum vxge_hw_status ret = VXGE_HW_FAIL;
udelay(10);
@@ -139,7 +138,7 @@ __vxge_hw_device_register_poll(void __iomem *reg, u64 mask, u32 max_millis)
mdelay(1);
} while (++i <= max_millis);
- return ret;
+ return VXGE_HW_FAIL;
}
static inline enum vxge_hw_status
@@ -1682,12 +1681,10 @@ enum vxge_hw_status vxge_hw_driver_stats_get(
struct __vxge_hw_device *hldev,
struct vxge_hw_device_stats_sw_info *sw_stats)
{
- enum vxge_hw_status status = VXGE_HW_OK;
-
memcpy(sw_stats, &hldev->stats.sw_dev_info_stats,
sizeof(struct vxge_hw_device_stats_sw_info));
- return status;
+ return VXGE_HW_OK;
}
/*
@@ -3228,7 +3225,6 @@ enum vxge_hw_status
vxge_hw_vpath_strip_fcs_check(struct __vxge_hw_device *hldev, u64 vpath_mask)
{
struct vxge_hw_vpmgmt_reg __iomem *vpmgmt_reg;
- enum vxge_hw_status status = VXGE_HW_OK;
int i = 0, j = 0;
for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
@@ -3241,7 +3237,7 @@ vxge_hw_vpath_strip_fcs_check(struct __vxge_hw_device *hldev, u64 vpath_mask)
return VXGE_HW_FAIL;
}
}
- return status;
+ return VXGE_HW_OK;
}
/*
* vxge_hw_mgmt_reg_Write - Write Titan register.
@@ -3979,7 +3975,6 @@ __vxge_hw_vpath_mgmt_read(
{
u32 i, mtu = 0, max_pyld = 0;
u64 val64;
- enum vxge_hw_status status = VXGE_HW_OK;
for (i = 0; i < VXGE_HW_MAC_MAX_MAC_PORT_ID; i++) {
@@ -4009,7 +4004,7 @@ __vxge_hw_vpath_mgmt_read(
else
VXGE_HW_DEVICE_LINK_STATE_SET(vpath->hldev, VXGE_HW_LINK_DOWN);
- return status;
+ return VXGE_HW_OK;
}
/*
@@ -4039,14 +4034,13 @@ static enum vxge_hw_status
__vxge_hw_vpath_reset(struct __vxge_hw_device *hldev, u32 vp_id)
{
u64 val64;
- enum vxge_hw_status status = VXGE_HW_OK;
val64 = VXGE_HW_CMN_RSTHDLR_CFG0_SW_RESET_VPATH(1 << (16 - vp_id));
__vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(val64, 0, 32),
&hldev->common_reg->cmn_rsthdlr_cfg0);
- return status;
+ return VXGE_HW_OK;
}
/*
@@ -4227,7 +4221,6 @@ static enum vxge_hw_status
__vxge_hw_vpath_mac_configure(struct __vxge_hw_device *hldev, u32 vp_id)
{
u64 val64;
- enum vxge_hw_status status = VXGE_HW_OK;
struct __vxge_hw_virtualpath *vpath;
struct vxge_hw_vp_config *vp_config;
struct vxge_hw_vpath_reg __iomem *vp_reg;
@@ -4283,7 +4276,7 @@ __vxge_hw_vpath_mac_configure(struct __vxge_hw_device *hldev, u32 vp_id)
writeq(val64, &vp_reg->rxmac_vcfg1);
}
- return status;
+ return VXGE_HW_OK;
}
/*
@@ -4295,7 +4288,6 @@ static enum vxge_hw_status
__vxge_hw_vpath_tim_configure(struct __vxge_hw_device *hldev, u32 vp_id)
{
u64 val64;
- enum vxge_hw_status status = VXGE_HW_OK;
struct __vxge_hw_virtualpath *vpath;
struct vxge_hw_vpath_reg __iomem *vp_reg;
struct vxge_hw_vp_config *config;
@@ -4545,7 +4537,7 @@ __vxge_hw_vpath_tim_configure(struct __vxge_hw_device *hldev, u32 vp_id)
val64 |= VXGE_HW_TIM_WRKLD_CLC_CNT_RX_TX(3);
writeq(val64, &vp_reg->tim_wrkld_clc);
- return status;
+ return VXGE_HW_OK;
}
/*
diff --git a/drivers/net/ethernet/neterion/vxge/vxge-ethtool.c b/drivers/net/ethernet/neterion/vxge/vxge-ethtool.c
index f8f073880f84..b07d552a27d4 100644
--- a/drivers/net/ethernet/neterion/vxge/vxge-ethtool.c
+++ b/drivers/net/ethernet/neterion/vxge/vxge-ethtool.c
@@ -62,8 +62,8 @@ static int vxge_ethtool_gset(struct net_device *dev, struct ethtool_cmd *info)
ethtool_cmd_speed_set(info, SPEED_10000);
info->duplex = DUPLEX_FULL;
} else {
- ethtool_cmd_speed_set(info, -1);
- info->duplex = -1;
+ ethtool_cmd_speed_set(info, SPEED_UNKNOWN);
+ info->duplex = DUPLEX_UNKNOWN;
}
info->autoneg = AUTONEG_DISABLE;
@@ -1128,5 +1128,5 @@ static const struct ethtool_ops vxge_ethtool_ops = {
void vxge_initialize_ethtool_ops(struct net_device *ndev)
{
- SET_ETHTOOL_OPS(ndev, &vxge_ethtool_ops);
+ ndev->ethtool_ops = &vxge_ethtool_ops;
}
diff --git a/drivers/net/ethernet/neterion/vxge/vxge-main.c b/drivers/net/ethernet/neterion/vxge/vxge-main.c
index d107bcbb8543..7a0deadd53bf 100644
--- a/drivers/net/ethernet/neterion/vxge/vxge-main.c
+++ b/drivers/net/ethernet/neterion/vxge/vxge-main.c
@@ -2122,7 +2122,7 @@ static int vxge_open_vpaths(struct vxgedev *vdev)
static void adaptive_coalesce_tx_interrupts(struct vxge_fifo *fifo)
{
fifo->interrupt_count++;
- if (jiffies > fifo->jiffies + HZ / 100) {
+ if (time_before(fifo->jiffies + HZ / 100, jiffies)) {
struct __vxge_hw_fifo *hw_fifo = fifo->handle;
fifo->jiffies = jiffies;
@@ -2150,7 +2150,7 @@ static void adaptive_coalesce_tx_interrupts(struct vxge_fifo *fifo)
static void adaptive_coalesce_rx_interrupts(struct vxge_ring *ring)
{
ring->interrupt_count++;
- if (jiffies > ring->jiffies + HZ / 100) {
+ if (time_before(ring->jiffies + HZ / 100, jiffies)) {
struct __vxge_hw_ring *hw_ring = ring->handle;
ring->jiffies = jiffies;
diff --git a/drivers/net/ethernet/nvidia/forcedeth.c b/drivers/net/ethernet/nvidia/forcedeth.c
index fddb464aeab3..9afc536c5734 100644
--- a/drivers/net/ethernet/nvidia/forcedeth.c
+++ b/drivers/net/ethernet/nvidia/forcedeth.c
@@ -406,7 +406,7 @@ union ring_type {
#define NV_RX_DESCRIPTORVALID (1<<16)
#define NV_RX_MISSEDFRAME (1<<17)
-#define NV_RX_SUBSTRACT1 (1<<18)
+#define NV_RX_SUBTRACT1 (1<<18)
#define NV_RX_ERROR1 (1<<23)
#define NV_RX_ERROR2 (1<<24)
#define NV_RX_ERROR3 (1<<25)
@@ -423,7 +423,7 @@ union ring_type {
#define NV_RX2_CHECKSUM_IP_TCP (0x14000000)
#define NV_RX2_CHECKSUM_IP_UDP (0x18000000)
#define NV_RX2_DESCRIPTORVALID (1<<29)
-#define NV_RX2_SUBSTRACT1 (1<<25)
+#define NV_RX2_SUBTRACT1 (1<<25)
#define NV_RX2_ERROR1 (1<<18)
#define NV_RX2_ERROR2 (1<<19)
#define NV_RX2_ERROR3 (1<<20)
@@ -2832,7 +2832,7 @@ static int nv_rx_process(struct net_device *dev, int limit)
}
/* framing errors are soft errors */
else if ((flags & NV_RX_ERROR_MASK) == NV_RX_FRAMINGERR) {
- if (flags & NV_RX_SUBSTRACT1)
+ if (flags & NV_RX_SUBTRACT1)
len--;
}
/* the rest are hard errors */
@@ -2863,7 +2863,7 @@ static int nv_rx_process(struct net_device *dev, int limit)
}
/* framing errors are soft errors */
else if ((flags & NV_RX2_ERROR_MASK) == NV_RX2_FRAMINGERR) {
- if (flags & NV_RX2_SUBSTRACT1)
+ if (flags & NV_RX2_SUBTRACT1)
len--;
}
/* the rest are hard errors */
@@ -2937,7 +2937,7 @@ static int nv_rx_process_optimized(struct net_device *dev, int limit)
}
/* framing errors are soft errors */
else if ((flags & NV_RX2_ERROR_MASK) == NV_RX2_FRAMINGERR) {
- if (flags & NV_RX2_SUBSTRACT1)
+ if (flags & NV_RX2_SUBTRACT1)
len--;
}
/* the rest are hard errors */
@@ -4285,8 +4285,8 @@ static int nv_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
if (np->duplex)
ecmd->duplex = DUPLEX_FULL;
} else {
- speed = -1;
- ecmd->duplex = -1;
+ speed = SPEED_UNKNOWN;
+ ecmd->duplex = DUPLEX_UNKNOWN;
}
ethtool_cmd_speed_set(ecmd, speed);
ecmd->autoneg = np->autoneg;
@@ -5766,7 +5766,7 @@ static int nv_probe(struct pci_dev *pci_dev, const struct pci_device_id *id)
dev->netdev_ops = &nv_netdev_ops_optimized;
netif_napi_add(dev, &np->napi, nv_napi_poll, RX_WORK_PER_LOOP);
- SET_ETHTOOL_OPS(dev, &ops);
+ dev->ethtool_ops = &ops;
dev->watchdog_timeo = NV_WATCHDOG_TIMEO;
pci_set_drvdata(pci_dev, dev);
diff --git a/drivers/net/ethernet/nxp/lpc_eth.c b/drivers/net/ethernet/nxp/lpc_eth.c
index 422d9b51ac24..8706c0dbd0c3 100644
--- a/drivers/net/ethernet/nxp/lpc_eth.c
+++ b/drivers/net/ethernet/nxp/lpc_eth.c
@@ -1361,7 +1361,7 @@ static int lpc_eth_drv_probe(struct platform_device *pdev)
__lpc_eth_clock_enable(pldat, true);
/* Map IO space */
- pldat->net_base = ioremap(res->start, res->end - res->start + 1);
+ pldat->net_base = ioremap(res->start, resource_size(res));
if (!pldat->net_base) {
dev_err(&pdev->dev, "failed to map registers\n");
ret = -ENOMEM;
@@ -1417,10 +1417,8 @@ static int lpc_eth_drv_probe(struct platform_device *pdev)
}
pldat->dma_buff_base_p = dma_handle;
- netdev_dbg(ndev, "IO address start :0x%08x\n",
- res->start);
- netdev_dbg(ndev, "IO address size :%d\n",
- res->end - res->start + 1);
+ netdev_dbg(ndev, "IO address space :%pR\n", res);
+ netdev_dbg(ndev, "IO address size :%d\n", resource_size(res));
netdev_dbg(ndev, "IO address (mapped) :0x%p\n",
pldat->net_base);
netdev_dbg(ndev, "IRQ number :%d\n", ndev->irq);
diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/Kconfig b/drivers/net/ethernet/oki-semi/pch_gbe/Kconfig
index a588ffde9700..44c8be1c6805 100644
--- a/drivers/net/ethernet/oki-semi/pch_gbe/Kconfig
+++ b/drivers/net/ethernet/oki-semi/pch_gbe/Kconfig
@@ -4,7 +4,7 @@
config PCH_GBE
tristate "OKI SEMICONDUCTOR IOH(ML7223/ML7831) GbE"
- depends on PCI && (X86 || COMPILE_TEST)
+ depends on PCI && (X86_32 || COMPILE_TEST)
select MII
select PTP_1588_CLOCK_PCH
---help---
diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c
index 826f0ccdc23c..4fe8ea96bd25 100644
--- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c
+++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c
@@ -91,7 +91,7 @@ static int pch_gbe_get_settings(struct net_device *netdev,
ecmd->advertising &= ~(ADVERTISED_TP | ADVERTISED_1000baseT_Half);
if (!netif_carrier_ok(adapter->netdev))
- ethtool_cmd_speed_set(ecmd, -1);
+ ethtool_cmd_speed_set(ecmd, SPEED_UNKNOWN);
return ret;
}
@@ -508,5 +508,5 @@ static const struct ethtool_ops pch_gbe_ethtool_ops = {
void pch_gbe_set_ethtool_ops(struct net_device *netdev)
{
- SET_ETHTOOL_OPS(netdev, &pch_gbe_ethtool_ops);
+ netdev->ethtool_ops = &pch_gbe_ethtool_ops;
}
diff --git a/drivers/net/ethernet/packetengines/hamachi.c b/drivers/net/ethernet/packetengines/hamachi.c
index b6bdeb3c1971..9a997e4c3e08 100644
--- a/drivers/net/ethernet/packetengines/hamachi.c
+++ b/drivers/net/ethernet/packetengines/hamachi.c
@@ -724,10 +724,8 @@ static int hamachi_init_one(struct pci_dev *pdev,
/* The Hamachi-specific entries in the device structure. */
dev->netdev_ops = &hamachi_netdev_ops;
- if (chip_tbl[hmp->chip_id].flags & CanHaveMII)
- SET_ETHTOOL_OPS(dev, &ethtool_ops);
- else
- SET_ETHTOOL_OPS(dev, &ethtool_ops_no_mii);
+ dev->ethtool_ops = (chip_tbl[hmp->chip_id].flags & CanHaveMII) ?
+ &ethtool_ops : &ethtool_ops_no_mii;
dev->watchdog_timeo = TX_TIMEOUT;
if (mtu)
dev->mtu = mtu;
diff --git a/drivers/net/ethernet/packetengines/yellowfin.c b/drivers/net/ethernet/packetengines/yellowfin.c
index 9a6cb482dcd0..69a8dc095072 100644
--- a/drivers/net/ethernet/packetengines/yellowfin.c
+++ b/drivers/net/ethernet/packetengines/yellowfin.c
@@ -472,7 +472,7 @@ static int yellowfin_init_one(struct pci_dev *pdev,
/* The Yellowfin-specific entries in the device structure. */
dev->netdev_ops = &netdev_ops;
- SET_ETHTOOL_OPS(dev, &ethtool_ops);
+ dev->ethtool_ops = &ethtool_ops;
dev->watchdog_timeo = TX_TIMEOUT;
if (mtu)
diff --git a/drivers/net/ethernet/qlogic/Kconfig b/drivers/net/ethernet/qlogic/Kconfig
index c14bd3116e45..d49cba129081 100644
--- a/drivers/net/ethernet/qlogic/Kconfig
+++ b/drivers/net/ethernet/qlogic/Kconfig
@@ -66,6 +66,17 @@ config QLCNIC_VXLAN
Say Y here if you want to enable hardware offload support for
Virtual eXtensible Local Area Network (VXLAN) in the driver.
+config QLCNIC_HWMON
+ bool "QLOGIC QLCNIC 82XX and 83XX family HWMON support"
+ depends on QLCNIC && HWMON && !(QLCNIC=y && HWMON=m)
+ default y
+ ---help---
+ This configuration parameter can be used to read the
+ board temperature in Converged Ethernet devices
+ supported by qlcnic.
+
+ This data is available via the hwmon sysfs interface.
+
config QLGE
tristate "QLogic QLGE 10Gb Ethernet Driver Support"
depends on PCI
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
index f09c35d669b3..5bf05818a12c 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
@@ -1373,7 +1373,7 @@ netxen_setup_netdev(struct netxen_adapter *adapter,
netxen_nic_change_mtu(netdev, netdev->mtu);
- SET_ETHTOOL_OPS(netdev, &netxen_nic_ethtool_ops);
+ netdev->ethtool_ops = &netxen_nic_ethtool_ops;
netdev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO |
NETIF_F_RXCSUM;
diff --git a/drivers/net/ethernet/qlogic/qla3xxx.c b/drivers/net/ethernet/qlogic/qla3xxx.c
index 2eabd44f8914..b5d6bc1a8b00 100644
--- a/drivers/net/ethernet/qlogic/qla3xxx.c
+++ b/drivers/net/ethernet/qlogic/qla3xxx.c
@@ -3838,7 +3838,7 @@ static int ql3xxx_probe(struct pci_dev *pdev,
/* Set driver entry points */
ndev->netdev_ops = &ql3xxx_netdev_ops;
- SET_ETHTOOL_OPS(ndev, &ql3xxx_ethtool_ops);
+ ndev->ethtool_ops = &ql3xxx_ethtool_ops;
ndev->watchdog_timeo = 5 * HZ;
netif_napi_add(ndev, &qdev->napi, ql_poll, 64);
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
index f785d01c7d12..be618b9e874f 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
@@ -39,8 +39,8 @@
#define _QLCNIC_LINUX_MAJOR 5
#define _QLCNIC_LINUX_MINOR 3
-#define _QLCNIC_LINUX_SUBVERSION 57
-#define QLCNIC_LINUX_VERSIONID "5.3.57"
+#define _QLCNIC_LINUX_SUBVERSION 60
+#define QLCNIC_LINUX_VERSIONID "5.3.60"
#define QLCNIC_DRV_IDC_VER 0x01
#define QLCNIC_DRIVER_VERSION ((_QLCNIC_LINUX_MAJOR << 16) |\
(_QLCNIC_LINUX_MINOR << 8) | (_QLCNIC_LINUX_SUBVERSION))
@@ -441,6 +441,8 @@ struct qlcnic_82xx_dump_template_hdr {
u32 rsvd1[0];
};
+#define QLC_PEX_DMA_READ_SIZE (PAGE_SIZE * 16)
+
struct qlcnic_fw_dump {
u8 clr; /* flag to indicate if dump is cleared */
bool enable; /* enable/disable dump */
@@ -537,6 +539,7 @@ struct qlcnic_hardware_context {
u8 phys_port_id[ETH_ALEN];
u8 lb_mode;
u16 vxlan_port;
+ struct device *hwmon_dev;
};
struct qlcnic_adapter_stats {
@@ -1018,6 +1021,8 @@ struct qlcnic_ipaddr {
#define QLCNIC_DEL_VXLAN_PORT 0x200000
#endif
+#define QLCNIC_VLAN_FILTERING 0x800000
+
#define QLCNIC_IS_MSI_FAMILY(adapter) \
((adapter)->flags & (QLCNIC_MSI_ENABLED | QLCNIC_MSIX_ENABLED))
#define QLCNIC_IS_TSO_CAPABLE(adapter) \
@@ -1316,6 +1321,7 @@ struct qlcnic_eswitch {
#define QL_STATUS_INVALID_PARAM -1
#define MAX_BW 100 /* % of link speed */
+#define MIN_BW 1 /* % of link speed */
#define MAX_VLAN_ID 4095
#define MIN_VLAN_ID 2
#define DEFAULT_MAC_LEARN 1
@@ -1692,7 +1698,7 @@ int qlcnic_read_mac_addr(struct qlcnic_adapter *);
int qlcnic_setup_netdev(struct qlcnic_adapter *, struct net_device *, int);
void qlcnic_set_netdev_features(struct qlcnic_adapter *,
struct qlcnic_esw_func_cfg *);
-void qlcnic_sriov_vf_schedule_multi(struct net_device *);
+void qlcnic_sriov_vf_set_multi(struct net_device *);
int qlcnic_is_valid_nic_func(struct qlcnic_adapter *, u8);
int qlcnic_get_pci_func_type(struct qlcnic_adapter *, u16, u16 *, u16 *,
u16 *);
@@ -2338,6 +2344,16 @@ static inline bool qlcnic_83xx_vf_check(struct qlcnic_adapter *adapter)
return (device == PCI_DEVICE_ID_QLOGIC_VF_QLE834X) ? true : false;
}
+static inline bool qlcnic_sriov_check(struct qlcnic_adapter *adapter)
+{
+ bool status;
+
+ status = (qlcnic_sriov_pf_check(adapter) ||
+ qlcnic_sriov_vf_check(adapter)) ? true : false;
+
+ return status;
+}
+
static inline u32 qlcnic_get_vnic_func_count(struct qlcnic_adapter *adapter)
{
if (qlcnic_84xx_check(adapter))
@@ -2345,4 +2361,18 @@ static inline u32 qlcnic_get_vnic_func_count(struct qlcnic_adapter *adapter)
else
return QLC_DEFAULT_VNIC_COUNT;
}
+
+#ifdef CONFIG_QLCNIC_HWMON
+void qlcnic_register_hwmon_dev(struct qlcnic_adapter *);
+void qlcnic_unregister_hwmon_dev(struct qlcnic_adapter *);
+#else
+static inline void qlcnic_register_hwmon_dev(struct qlcnic_adapter *adapter)
+{
+ return;
+}
+static inline void qlcnic_unregister_hwmon_dev(struct qlcnic_adapter *adapter)
+{
+ return;
+}
+#endif
#endif /* __QLCNIC_H_ */
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
index b7cffb46a75d..a4a4ec0b68f8 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
@@ -33,6 +33,7 @@ static void qlcnic_83xx_get_beacon_state(struct qlcnic_adapter *);
#define RSS_HASHTYPE_IP_TCP 0x3
#define QLC_83XX_FW_MBX_CMD 0
#define QLC_SKIP_INACTIVE_PCI_REGS 7
+#define QLC_MAX_LEGACY_FUNC_SUPP 8
static const struct qlcnic_mailbox_metadata qlcnic_83xx_mbx_tbl[] = {
{QLCNIC_CMD_CONFIGURE_IP_ADDR, 6, 1},
@@ -357,8 +358,15 @@ int qlcnic_83xx_setup_intr(struct qlcnic_adapter *adapter)
if (!ahw->intr_tbl)
return -ENOMEM;
- if (!(adapter->flags & QLCNIC_MSIX_ENABLED))
+ if (!(adapter->flags & QLCNIC_MSIX_ENABLED)) {
+ if (adapter->ahw->pci_func >= QLC_MAX_LEGACY_FUNC_SUPP) {
+ dev_err(&adapter->pdev->dev, "PCI function number 8 and higher are not supported with legacy interrupt, func 0x%x\n",
+ ahw->pci_func);
+ return -EOPNOTSUPP;
+ }
+
qlcnic_83xx_enable_legacy(adapter);
+ }
for (i = 0; i < num_msix; i++) {
if (adapter->flags & QLCNIC_MSIX_ENABLED)
@@ -879,6 +887,9 @@ int qlcnic_83xx_alloc_mbx_args(struct qlcnic_cmd_args *mbx,
return 0;
}
}
+
+ dev_err(&adapter->pdev->dev, "%s: Invalid mailbox command opcode 0x%x\n",
+ __func__, type);
return -EINVAL;
}
@@ -3026,19 +3037,18 @@ void qlcnic_83xx_unlock_driver(struct qlcnic_adapter *adapter)
QLCRDX(adapter->ahw, QLC_83XX_DRV_UNLOCK);
}
-int qlcnic_83xx_ms_mem_write128(struct qlcnic_adapter *adapter, u64 addr,
+int qlcnic_ms_mem_write128(struct qlcnic_adapter *adapter, u64 addr,
u32 *data, u32 count)
{
int i, j, ret = 0;
u32 temp;
- int err = 0;
/* Check alignment */
if (addr & 0xF)
return -EIO;
mutex_lock(&adapter->ahw->mem_lock);
- qlcnic_83xx_wrt_reg_indirect(adapter, QLCNIC_MS_ADDR_HI, 0);
+ qlcnic_ind_wr(adapter, QLCNIC_MS_ADDR_HI, 0);
for (i = 0; i < count; i++, addr += 16) {
if (!((ADDR_IN_RANGE(addr, QLCNIC_ADDR_QDR_NET,
@@ -3049,26 +3059,16 @@ int qlcnic_83xx_ms_mem_write128(struct qlcnic_adapter *adapter, u64 addr,
return -EIO;
}
- qlcnic_83xx_wrt_reg_indirect(adapter, QLCNIC_MS_ADDR_LO, addr);
- qlcnic_83xx_wrt_reg_indirect(adapter, QLCNIC_MS_WRTDATA_LO,
- *data++);
- qlcnic_83xx_wrt_reg_indirect(adapter, QLCNIC_MS_WRTDATA_HI,
- *data++);
- qlcnic_83xx_wrt_reg_indirect(adapter, QLCNIC_MS_WRTDATA_ULO,
- *data++);
- qlcnic_83xx_wrt_reg_indirect(adapter, QLCNIC_MS_WRTDATA_UHI,
- *data++);
- qlcnic_83xx_wrt_reg_indirect(adapter, QLCNIC_MS_CTRL,
- QLCNIC_TA_WRITE_ENABLE);
- qlcnic_83xx_wrt_reg_indirect(adapter, QLCNIC_MS_CTRL,
- QLCNIC_TA_WRITE_START);
+ qlcnic_ind_wr(adapter, QLCNIC_MS_ADDR_LO, addr);
+ qlcnic_ind_wr(adapter, QLCNIC_MS_WRTDATA_LO, *data++);
+ qlcnic_ind_wr(adapter, QLCNIC_MS_WRTDATA_HI, *data++);
+ qlcnic_ind_wr(adapter, QLCNIC_MS_WRTDATA_ULO, *data++);
+ qlcnic_ind_wr(adapter, QLCNIC_MS_WRTDATA_UHI, *data++);
+ qlcnic_ind_wr(adapter, QLCNIC_MS_CTRL, QLCNIC_TA_WRITE_ENABLE);
+ qlcnic_ind_wr(adapter, QLCNIC_MS_CTRL, QLCNIC_TA_WRITE_START);
for (j = 0; j < MAX_CTL_CHECK; j++) {
- temp = QLCRD32(adapter, QLCNIC_MS_CTRL, &err);
- if (err == -EIO) {
- mutex_unlock(&adapter->ahw->mem_lock);
- return err;
- }
+ temp = qlcnic_ind_rd(adapter, QLCNIC_MS_CTRL);
if ((temp & TA_CTL_BUSY) == 0)
break;
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h
index 88d809c35633..2bf101a47d02 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h
@@ -418,7 +418,6 @@ enum qlcnic_83xx_states {
#define QLC_83XX_GET_FUNC_MODE_FROM_NPAR_INFO(val) (val & 0x80000000)
#define QLC_83XX_GET_LRO_CAPABILITY(val) (val & 0x20)
#define QLC_83XX_GET_LSO_CAPABILITY(val) (val & 0x40)
-#define QLC_83XX_GET_LSO_CAPABILITY(val) (val & 0x40)
#define QLC_83XX_GET_HW_LRO_CAPABILITY(val) (val & 0x400)
#define QLC_83XX_GET_VLAN_ALIGN_CAPABILITY(val) (val & 0x4000)
#define QLC_83XX_GET_FW_LRO_MSS_CAPABILITY(val) (val & 0x20000)
@@ -560,7 +559,7 @@ void qlcnic_83xx_napi_del(struct qlcnic_adapter *);
void qlcnic_83xx_napi_enable(struct qlcnic_adapter *);
void qlcnic_83xx_napi_disable(struct qlcnic_adapter *);
int qlcnic_83xx_config_led(struct qlcnic_adapter *, u32, u32);
-void qlcnic_ind_wr(struct qlcnic_adapter *, u32, u32);
+int qlcnic_ind_wr(struct qlcnic_adapter *, u32, u32);
int qlcnic_ind_rd(struct qlcnic_adapter *, u32);
int qlcnic_83xx_create_rx_ctx(struct qlcnic_adapter *);
int qlcnic_83xx_create_tx_ctx(struct qlcnic_adapter *,
@@ -617,7 +616,6 @@ void qlcnic_83xx_idc_request_reset(struct qlcnic_adapter *, u32);
int qlcnic_83xx_lock_driver(struct qlcnic_adapter *);
void qlcnic_83xx_unlock_driver(struct qlcnic_adapter *);
int qlcnic_83xx_set_default_offload_settings(struct qlcnic_adapter *);
-int qlcnic_83xx_ms_mem_write128(struct qlcnic_adapter *, u64, u32 *, u32);
int qlcnic_83xx_idc_vnic_pf_entry(struct qlcnic_adapter *);
int qlcnic_83xx_disable_vnic_mode(struct qlcnic_adapter *, int);
int qlcnic_83xx_config_vnic_opmode(struct qlcnic_adapter *);
@@ -659,4 +657,5 @@ void qlcnic_83xx_cache_tmpl_hdr_values(struct qlcnic_fw_dump *);
u32 qlcnic_83xx_get_cap_size(void *, int);
void qlcnic_83xx_set_sys_info(void *, int, u32);
void qlcnic_83xx_store_cap_mask(void *, u32);
+int qlcnic_ms_mem_write128(struct qlcnic_adapter *, u64, u32 *, u32);
#endif
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
index ba20c721ee97..f33559b72528 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
@@ -1363,8 +1363,8 @@ static int qlcnic_83xx_copy_bootloader(struct qlcnic_adapter *adapter)
return ret;
}
/* 16 byte write to MS memory */
- ret = qlcnic_83xx_ms_mem_write128(adapter, dest, (u32 *)p_cache,
- size / 16);
+ ret = qlcnic_ms_mem_write128(adapter, dest, (u32 *)p_cache,
+ size / 16);
if (ret) {
vfree(p_cache);
return ret;
@@ -1389,8 +1389,8 @@ static int qlcnic_83xx_copy_fw_file(struct qlcnic_adapter *adapter)
p_cache = (u32 *)fw->data;
addr = (u64)dest;
- ret = qlcnic_83xx_ms_mem_write128(adapter, addr,
- p_cache, size / 16);
+ ret = qlcnic_ms_mem_write128(adapter, addr,
+ p_cache, size / 16);
if (ret) {
dev_err(&adapter->pdev->dev, "MS memory write failed\n");
release_firmware(fw);
@@ -1405,8 +1405,8 @@ static int qlcnic_83xx_copy_fw_file(struct qlcnic_adapter *adapter)
data[i] = fw->data[size + i];
for (; i < 16; i++)
data[i] = 0;
- ret = qlcnic_83xx_ms_mem_write128(adapter, addr,
- (u32 *)data, 1);
+ ret = qlcnic_ms_mem_write128(adapter, addr,
+ (u32 *)data, 1);
if (ret) {
dev_err(&adapter->pdev->dev,
"MS memory write failed\n");
@@ -2181,6 +2181,8 @@ int qlcnic_83xx_configure_opmode(struct qlcnic_adapter *adapter)
max_sds_rings = QLCNIC_MAX_SDS_RINGS;
max_tx_rings = QLCNIC_MAX_TX_RINGS;
} else {
+ dev_err(&adapter->pdev->dev, "%s: Invalid opmode %d\n",
+ __func__, ret);
return -EIO;
}
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c
index c1e11f5715b0..304e247bdf33 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c
@@ -1027,8 +1027,11 @@ int qlcnic_config_port_mirroring(struct qlcnic_adapter *adapter, u8 id,
u32 arg1;
if (adapter->ahw->op_mode != QLCNIC_MGMT_FUNC ||
- !(adapter->eswitch[id].flags & QLCNIC_SWITCH_ENABLE))
+ !(adapter->eswitch[id].flags & QLCNIC_SWITCH_ENABLE)) {
+ dev_err(&adapter->pdev->dev, "%s: Not a management function\n",
+ __func__);
return err;
+ }
arg1 = id | (enable_mirroring ? BIT_4 : 0);
arg1 |= pci_func << 8;
@@ -1318,8 +1321,12 @@ int qlcnic_config_switch_port(struct qlcnic_adapter *adapter,
u32 arg1, arg2 = 0;
u8 pci_func;
- if (adapter->ahw->op_mode != QLCNIC_MGMT_FUNC)
+ if (adapter->ahw->op_mode != QLCNIC_MGMT_FUNC) {
+ dev_err(&adapter->pdev->dev, "%s: Not a management function\n",
+ __func__);
return err;
+ }
+
pci_func = esw_cfg->pci_func;
index = qlcnic_is_valid_nic_func(adapter, pci_func);
if (index < 0)
@@ -1363,6 +1370,8 @@ int qlcnic_config_switch_port(struct qlcnic_adapter *adapter,
arg1 &= ~(0x0ffff << 16);
break;
default:
+ dev_err(&adapter->pdev->dev, "%s: Invalid opmode 0x%x\n",
+ __func__, esw_cfg->op_mode);
return err;
}
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
index 5bacf5210aed..1b7f3dbae289 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
@@ -726,6 +726,11 @@ static int qlcnic_set_channels(struct net_device *dev,
struct qlcnic_adapter *adapter = netdev_priv(dev);
int err;
+ if (!(adapter->flags & QLCNIC_MSIX_ENABLED)) {
+ netdev_err(dev, "No RSS/TSS support in non MSI-X mode\n");
+ return -EINVAL;
+ }
+
if (channel->other_count || channel->combined_count)
return -EINVAL;
@@ -734,7 +739,7 @@ static int qlcnic_set_channels(struct net_device *dev,
if (err)
return err;
- if (channel->rx_count) {
+ if (adapter->drv_sds_rings != channel->rx_count) {
err = qlcnic_validate_rings(adapter, channel->rx_count,
QLCNIC_RX_QUEUE);
if (err) {
@@ -745,7 +750,7 @@ static int qlcnic_set_channels(struct net_device *dev,
adapter->drv_rss_rings = channel->rx_count;
}
- if (channel->tx_count) {
+ if (adapter->drv_tx_rings != channel->tx_count) {
err = qlcnic_validate_rings(adapter, channel->tx_count,
QLCNIC_TX_QUEUE);
if (err) {
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c
index 9f3adf4e70b5..851cb4a80d50 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c
@@ -373,12 +373,16 @@ int qlcnic_ind_rd(struct qlcnic_adapter *adapter, u32 addr)
return data;
}
-void qlcnic_ind_wr(struct qlcnic_adapter *adapter, u32 addr, u32 data)
+int qlcnic_ind_wr(struct qlcnic_adapter *adapter, u32 addr, u32 data)
{
+ int ret = 0;
+
if (qlcnic_82xx_check(adapter))
qlcnic_write_window_reg(addr, adapter->ahw->pci_base0, data);
else
- qlcnic_83xx_wrt_reg_indirect(adapter, addr, data);
+ ret = qlcnic_83xx_wrt_reg_indirect(adapter, addr, data);
+
+ return ret;
}
static int
@@ -567,28 +571,14 @@ static void __qlcnic_set_multi(struct net_device *netdev, u16 vlan)
void qlcnic_set_multi(struct net_device *netdev)
{
struct qlcnic_adapter *adapter = netdev_priv(netdev);
- struct qlcnic_mac_vlan_list *cur;
- struct netdev_hw_addr *ha;
- size_t temp;
if (!test_bit(__QLCNIC_FW_ATTACHED, &adapter->state))
return;
- if (qlcnic_sriov_vf_check(adapter)) {
- if (!netdev_mc_empty(netdev)) {
- netdev_for_each_mc_addr(ha, netdev) {
- temp = sizeof(struct qlcnic_mac_vlan_list);
- cur = kzalloc(temp, GFP_ATOMIC);
- if (cur == NULL)
- break;
- memcpy(cur->mac_addr,
- ha->addr, ETH_ALEN);
- list_add_tail(&cur->list, &adapter->vf_mc_list);
- }
- }
- qlcnic_sriov_vf_schedule_multi(adapter->netdev);
- return;
- }
- __qlcnic_set_multi(netdev, 0);
+
+ if (qlcnic_sriov_vf_check(adapter))
+ qlcnic_sriov_vf_set_multi(netdev);
+ else
+ __qlcnic_set_multi(netdev, 0);
}
int qlcnic_82xx_nic_set_promisc(struct qlcnic_adapter *adapter, u32 mode)
@@ -630,7 +620,7 @@ void qlcnic_prune_lb_filters(struct qlcnic_adapter *adapter)
struct hlist_node *n;
struct hlist_head *head;
int i;
- unsigned long time;
+ unsigned long expires;
u8 cmd;
for (i = 0; i < adapter->fhash.fbucket_size; i++) {
@@ -638,8 +628,8 @@ void qlcnic_prune_lb_filters(struct qlcnic_adapter *adapter)
hlist_for_each_entry_safe(tmp_fil, n, head, fnode) {
cmd = tmp_fil->vlan_id ? QLCNIC_MAC_VLAN_DEL :
QLCNIC_MAC_DEL;
- time = tmp_fil->ftime;
- if (jiffies > (QLCNIC_FILTER_AGE * HZ + time)) {
+ expires = tmp_fil->ftime + QLCNIC_FILTER_AGE * HZ;
+ if (time_before(expires, jiffies)) {
qlcnic_sre_macaddr_change(adapter,
tmp_fil->faddr,
tmp_fil->vlan_id,
@@ -657,8 +647,8 @@ void qlcnic_prune_lb_filters(struct qlcnic_adapter *adapter)
hlist_for_each_entry_safe(tmp_fil, n, head, fnode)
{
- time = tmp_fil->ftime;
- if (jiffies > (QLCNIC_FILTER_AGE * HZ + time)) {
+ expires = tmp_fil->ftime + QLCNIC_FILTER_AGE * HZ;
+ if (time_before(expires, jiffies)) {
spin_lock_bh(&adapter->rx_mac_learn_lock);
adapter->rx_fhash.fnum--;
hlist_del(&tmp_fil->fnode);
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
index 173b3d12991f..e45bf09af0c9 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
@@ -305,7 +305,6 @@ static void qlcnic_send_filter(struct qlcnic_adapter *adapter,
{
struct vlan_ethhdr *vh = (struct vlan_ethhdr *)(skb->data);
struct ethhdr *phdr = (struct ethhdr *)(skb->data);
- struct net_device *netdev = adapter->netdev;
u16 protocol = ntohs(skb->protocol);
struct qlcnic_filter *fil, *tmp_fil;
struct hlist_head *head;
@@ -314,27 +313,16 @@ static void qlcnic_send_filter(struct qlcnic_adapter *adapter,
u16 vlan_id = 0;
u8 hindex, hval;
- if (!qlcnic_sriov_pf_check(adapter)) {
- if (ether_addr_equal(phdr->h_source, adapter->mac_addr))
- return;
- } else {
+ if (ether_addr_equal(phdr->h_source, adapter->mac_addr))
+ return;
+
+ if (adapter->flags & QLCNIC_VLAN_FILTERING) {
if (protocol == ETH_P_8021Q) {
vh = (struct vlan_ethhdr *)skb->data;
vlan_id = ntohs(vh->h_vlan_TCI);
} else if (vlan_tx_tag_present(skb)) {
vlan_id = vlan_tx_tag_get(skb);
}
-
- if (ether_addr_equal(phdr->h_source, adapter->mac_addr) &&
- !vlan_id)
- return;
- }
-
- if (adapter->fhash.fnum >= adapter->fhash.fmax) {
- adapter->stats.mac_filter_limit_overrun++;
- netdev_info(netdev, "Can not add more than %d mac-vlan filters, configured %d\n",
- adapter->fhash.fmax, adapter->fhash.fnum);
- return;
}
memcpy(&src_addr, phdr->h_source, ETH_ALEN);
@@ -353,6 +341,11 @@ static void qlcnic_send_filter(struct qlcnic_adapter *adapter,
}
}
+ if (unlikely(adapter->fhash.fnum >= adapter->fhash.fmax)) {
+ adapter->stats.mac_filter_limit_overrun++;
+ return;
+ }
+
fil = kzalloc(sizeof(struct qlcnic_filter), GFP_ATOMIC);
if (!fil)
return;
@@ -1216,8 +1209,7 @@ qlcnic_process_rcv(struct qlcnic_adapter *adapter,
if (!skb)
return buffer;
- if (adapter->drv_mac_learn &&
- (adapter->flags & QLCNIC_ESWITCH_ENABLED)) {
+ if (adapter->rx_mac_learn) {
t_vid = 0;
is_lb_pkt = qlcnic_82xx_is_lb_pkt(sts_data0);
qlcnic_add_lb_filter(adapter, skb, is_lb_pkt, t_vid);
@@ -1293,8 +1285,7 @@ qlcnic_process_lro(struct qlcnic_adapter *adapter,
if (!skb)
return buffer;
- if (adapter->drv_mac_learn &&
- (adapter->flags & QLCNIC_ESWITCH_ENABLED)) {
+ if (adapter->rx_mac_learn) {
t_vid = 0;
is_lb_pkt = qlcnic_82xx_is_lb_pkt(sts_data0);
qlcnic_add_lb_filter(adapter, skb, is_lb_pkt, t_vid);
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
index 7e55e88a81bf..4fc186713b66 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
@@ -378,7 +378,8 @@ static int qlcnic_fdb_del(struct ndmsg *ndm, struct nlattr *tb[],
if (!adapter->fdb_mac_learn)
return ndo_dflt_fdb_del(ndm, tb, netdev, addr);
- if (adapter->flags & QLCNIC_ESWITCH_ENABLED) {
+ if ((adapter->flags & QLCNIC_ESWITCH_ENABLED) ||
+ qlcnic_sriov_check(adapter)) {
if (is_unicast_ether_addr(addr)) {
err = dev_uc_del(netdev, addr);
if (!err)
@@ -402,7 +403,8 @@ static int qlcnic_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
if (!adapter->fdb_mac_learn)
return ndo_dflt_fdb_add(ndm, tb, netdev, addr, flags);
- if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED)) {
+ if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED) &&
+ !qlcnic_sriov_check(adapter)) {
pr_info("%s: FDB e-switch is not enabled\n", __func__);
return -EOPNOTSUPP;
}
@@ -432,7 +434,8 @@ static int qlcnic_fdb_dump(struct sk_buff *skb, struct netlink_callback *ncb,
if (!adapter->fdb_mac_learn)
return ndo_dflt_fdb_dump(skb, ncb, netdev, idx);
- if (adapter->flags & QLCNIC_ESWITCH_ENABLED)
+ if ((adapter->flags & QLCNIC_ESWITCH_ENABLED) ||
+ qlcnic_sriov_check(adapter))
idx = ndo_dflt_fdb_dump(skb, ncb, netdev, idx);
return idx;
@@ -522,7 +525,7 @@ static const struct net_device_ops qlcnic_netdev_ops = {
#endif
#ifdef CONFIG_QLCNIC_SRIOV
.ndo_set_vf_mac = qlcnic_sriov_set_vf_mac,
- .ndo_set_vf_tx_rate = qlcnic_sriov_set_vf_tx_rate,
+ .ndo_set_vf_rate = qlcnic_sriov_set_vf_tx_rate,
.ndo_get_vf_config = qlcnic_sriov_get_vf_config,
.ndo_set_vf_vlan = qlcnic_sriov_set_vf_vlan,
.ndo_set_vf_spoofchk = qlcnic_sriov_set_vf_spoofchk,
@@ -690,10 +693,10 @@ int qlcnic_setup_tss_rss_intr(struct qlcnic_adapter *adapter)
adapter->msix_entries[vector].entry = vector;
restore:
- err = pci_enable_msix(pdev, adapter->msix_entries, num_msix);
- if (err > 0) {
+ err = pci_enable_msix_exact(pdev, adapter->msix_entries, num_msix);
+ if (err == -ENOSPC) {
if (!adapter->drv_tss_rings && !adapter->drv_rss_rings)
- return -ENOSPC;
+ return err;
netdev_info(adapter->netdev,
"Unable to allocate %d MSI-X vectors, Available vectors %d\n",
@@ -1014,6 +1017,8 @@ int qlcnic_init_pci_info(struct qlcnic_adapter *adapter)
if (pfn >= ahw->max_vnic_func) {
ret = QL_STATUS_INVALID_PARAM;
+ dev_err(&adapter->pdev->dev, "%s: Invalid function 0x%x, max 0x%x\n",
+ __func__, pfn, ahw->max_vnic_func);
goto err_eswitch;
}
@@ -1915,8 +1920,6 @@ void __qlcnic_down(struct qlcnic_adapter *adapter, struct net_device *netdev)
if (!test_and_clear_bit(__QLCNIC_DEV_UP, &adapter->state))
return;
- if (qlcnic_sriov_vf_check(adapter))
- qlcnic_sriov_cleanup_async_list(&adapter->ahw->sriov->bc);
smp_mb();
netif_carrier_off(netdev);
adapter->ahw->linkup = 0;
@@ -1928,6 +1931,8 @@ void __qlcnic_down(struct qlcnic_adapter *adapter, struct net_device *netdev)
qlcnic_delete_lb_filters(adapter);
qlcnic_nic_set_promisc(adapter, QLCNIC_NIU_NON_PROMISC_MODE);
+ if (qlcnic_sriov_vf_check(adapter))
+ qlcnic_sriov_cleanup_async_list(&adapter->ahw->sriov->bc);
qlcnic_napi_disable(adapter);
@@ -2052,6 +2057,7 @@ out:
static int qlcnic_alloc_adapter_resources(struct qlcnic_adapter *adapter)
{
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
int err = 0;
adapter->recv_ctx = kzalloc(sizeof(struct qlcnic_recv_context),
@@ -2061,6 +2067,18 @@ static int qlcnic_alloc_adapter_resources(struct qlcnic_adapter *adapter)
goto err_out;
}
+ if (qlcnic_83xx_check(adapter)) {
+ ahw->coal.type = QLCNIC_INTR_COAL_TYPE_RX_TX;
+ ahw->coal.tx_time_us = QLCNIC_DEF_INTR_COALESCE_TX_TIME_US;
+ ahw->coal.tx_packets = QLCNIC_DEF_INTR_COALESCE_TX_PACKETS;
+ ahw->coal.rx_time_us = QLCNIC_DEF_INTR_COALESCE_RX_TIME_US;
+ ahw->coal.rx_packets = QLCNIC_DEF_INTR_COALESCE_RX_PACKETS;
+ } else {
+ ahw->coal.type = QLCNIC_INTR_COAL_TYPE_RX;
+ ahw->coal.rx_time_us = QLCNIC_DEF_INTR_COALESCE_RX_TIME_US;
+ ahw->coal.rx_packets = QLCNIC_DEF_INTR_COALESCE_RX_PACKETS;
+ }
+
/* clear stats */
memset(&adapter->stats, 0, sizeof(adapter->stats));
err_out:
@@ -2069,12 +2087,20 @@ err_out:
static void qlcnic_free_adapter_resources(struct qlcnic_adapter *adapter)
{
+ struct qlcnic_fw_dump *fw_dump = &adapter->ahw->fw_dump;
+
kfree(adapter->recv_ctx);
adapter->recv_ctx = NULL;
- if (adapter->ahw->fw_dump.tmpl_hdr) {
- vfree(adapter->ahw->fw_dump.tmpl_hdr);
- adapter->ahw->fw_dump.tmpl_hdr = NULL;
+ if (fw_dump->tmpl_hdr) {
+ vfree(fw_dump->tmpl_hdr);
+ fw_dump->tmpl_hdr = NULL;
+ }
+
+ if (fw_dump->dma_buffer) {
+ dma_free_coherent(&adapter->pdev->dev, QLC_PEX_DMA_READ_SIZE,
+ fw_dump->dma_buffer, fw_dump->phys_addr);
+ fw_dump->dma_buffer = NULL;
}
kfree(adapter->ahw->reset.buff);
@@ -2247,10 +2273,8 @@ qlcnic_setup_netdev(struct qlcnic_adapter *adapter, struct net_device *netdev,
qlcnic_change_mtu(netdev, netdev->mtu);
- if (qlcnic_sriov_vf_check(adapter))
- SET_ETHTOOL_OPS(netdev, &qlcnic_sriov_vf_ethtool_ops);
- else
- SET_ETHTOOL_OPS(netdev, &qlcnic_ethtool_ops);
+ netdev->ethtool_ops = (qlcnic_sriov_vf_check(adapter)) ?
+ &qlcnic_sriov_vf_ethtool_ops : &qlcnic_ethtool_ops;
netdev->features |= (NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM |
NETIF_F_IPV6_CSUM | NETIF_F_GRO |
@@ -2417,9 +2441,6 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
int err, pci_using_dac = -1;
char board_name[QLCNIC_MAX_BOARD_NAME_LEN + 19]; /* MAC + ": " + name */
- if (pdev->is_virtfn)
- return -ENODEV;
-
err = pci_enable_device(pdev);
if (err)
return err;
@@ -2552,9 +2573,11 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
case -ENOMEM:
dev_err(&pdev->dev, "Adapter initialization failed. Please reboot\n");
goto err_out_free_hw;
+ case -EOPNOTSUPP:
+ dev_err(&pdev->dev, "Adapter initialization failed\n");
+ goto err_out_free_hw;
default:
- dev_err(&pdev->dev, "Adapter initialization failed. A reboot may be required to recover from this failure\n");
- dev_err(&pdev->dev, "If reboot does not help to recover from this failure, try a flash update of the adapter\n");
+ dev_err(&pdev->dev, "Adapter initialization failed. Driver will load in maintenance mode to recover the adapter using the application\n");
goto err_out_maintenance_mode;
}
}
@@ -2628,7 +2651,7 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
qlcnic_alloc_lb_filters_mem(adapter);
qlcnic_add_sysfs(adapter);
-
+ qlcnic_register_hwmon_dev(adapter);
return 0;
err_out_disable_mbx_intr:
@@ -2665,7 +2688,7 @@ err_out_disable_pdev:
err_out_maintenance_mode:
set_bit(__QLCNIC_MAINTENANCE_MODE, &adapter->state);
netdev->netdev_ops = &qlcnic_netdev_failed_ops;
- SET_ETHTOOL_OPS(netdev, &qlcnic_ethtool_failed_ops);
+ netdev->ethtool_ops = &qlcnic_ethtool_failed_ops;
ahw->port_type = QLCNIC_XGBE;
if (qlcnic_83xx_check(adapter))
@@ -2698,9 +2721,9 @@ static void qlcnic_remove(struct pci_dev *pdev)
return;
netdev = adapter->netdev;
- qlcnic_sriov_pf_disable(adapter);
qlcnic_cancel_idc_work(adapter);
+ qlcnic_sriov_pf_disable(adapter);
ahw = adapter->ahw;
unregister_netdev(netdev);
@@ -2735,6 +2758,8 @@ static void qlcnic_remove(struct pci_dev *pdev)
qlcnic_remove_sysfs(adapter);
+ qlcnic_unregister_hwmon_dev(adapter);
+
qlcnic_cleanup_pci_map(adapter->ahw);
qlcnic_release_firmware(adapter);
@@ -2828,6 +2853,8 @@ static int qlcnic_close(struct net_device *netdev)
return 0;
}
+#define QLCNIC_VF_LB_BUCKET_SIZE 1
+
void qlcnic_alloc_lb_filters_mem(struct qlcnic_adapter *adapter)
{
void *head;
@@ -2843,7 +2870,10 @@ void qlcnic_alloc_lb_filters_mem(struct qlcnic_adapter *adapter)
spin_lock_init(&adapter->mac_learn_lock);
spin_lock_init(&adapter->rx_mac_learn_lock);
- if (qlcnic_82xx_check(adapter)) {
+ if (qlcnic_sriov_vf_check(adapter)) {
+ filter_size = QLCNIC_83XX_SRIOV_VF_MAX_MAC - 1;
+ adapter->fhash.fbucket_size = QLCNIC_VF_LB_BUCKET_SIZE;
+ } else if (qlcnic_82xx_check(adapter)) {
filter_size = QLCNIC_LB_MAX_FILTERS;
adapter->fhash.fbucket_size = QLCNIC_LB_BUCKET_SIZE;
} else {
@@ -3973,16 +4003,6 @@ int qlcnic_validate_rings(struct qlcnic_adapter *adapter, __u32 ring_cnt,
strcpy(buf, "Tx");
}
- if (!QLCNIC_IS_MSI_FAMILY(adapter)) {
- netdev_err(netdev, "No RSS/TSS support in INT-x mode\n");
- return -EINVAL;
- }
-
- if (adapter->flags & QLCNIC_MSI_ENABLED) {
- netdev_err(netdev, "No RSS/TSS support in MSI mode\n");
- return -EINVAL;
- }
-
if (!is_power_of_2(ring_cnt)) {
netdev_err(netdev, "%s rings value should be a power of 2\n",
buf);
@@ -4122,7 +4142,7 @@ void qlcnic_restore_indev_addr(struct net_device *netdev, unsigned long event)
rcu_read_lock();
for_each_set_bit(vid, adapter->vlans, VLAN_N_VID) {
- dev = __vlan_find_dev_deep(netdev, htons(ETH_P_8021Q), vid);
+ dev = __vlan_find_dev_deep_rcu(netdev, htons(ETH_P_8021Q), vid);
if (!dev)
continue;
qlcnic_config_indev_addr(adapter, dev, event);
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c
index 37b979b1266b..e46fc39d425d 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c
@@ -238,6 +238,8 @@ void qlcnic_82xx_cache_tmpl_hdr_values(struct qlcnic_fw_dump *fw_dump)
hdr->drv_cap_mask = hdr->cap_mask;
fw_dump->cap_mask = hdr->cap_mask;
+
+ fw_dump->use_pex_dma = (hdr->capabilities & BIT_0) ? true : false;
}
inline u32 qlcnic_82xx_get_cap_size(void *t_hdr, int index)
@@ -276,6 +278,8 @@ inline void qlcnic_83xx_set_saved_state(void *t_hdr, u32 index,
hdr->saved_state[index] = value;
}
+#define QLCNIC_TEMPLATE_VERSION (0x20001)
+
void qlcnic_83xx_cache_tmpl_hdr_values(struct qlcnic_fw_dump *fw_dump)
{
struct qlcnic_83xx_dump_template_hdr *hdr;
@@ -288,6 +292,9 @@ void qlcnic_83xx_cache_tmpl_hdr_values(struct qlcnic_fw_dump *fw_dump)
hdr->drv_cap_mask = hdr->cap_mask;
fw_dump->cap_mask = hdr->cap_mask;
+
+ fw_dump->use_pex_dma = (fw_dump->version & 0xfffff) >=
+ QLCNIC_TEMPLATE_VERSION;
}
inline u32 qlcnic_83xx_get_cap_size(void *t_hdr, int index)
@@ -653,34 +660,31 @@ out:
#define QLC_DMA_CMD_BUFF_ADDR_HI 4
#define QLC_DMA_CMD_STATUS_CTRL 8
-#define QLC_PEX_DMA_READ_SIZE (PAGE_SIZE * 16)
-
static int qlcnic_start_pex_dma(struct qlcnic_adapter *adapter,
struct __mem *mem)
{
- struct qlcnic_83xx_dump_template_hdr *tmpl_hdr;
struct device *dev = &adapter->pdev->dev;
u32 dma_no, dma_base_addr, temp_addr;
int i, ret, dma_sts;
+ void *tmpl_hdr;
tmpl_hdr = adapter->ahw->fw_dump.tmpl_hdr;
- dma_no = tmpl_hdr->saved_state[QLC_83XX_DMA_ENGINE_INDEX];
+ dma_no = qlcnic_get_saved_state(adapter, tmpl_hdr,
+ QLC_83XX_DMA_ENGINE_INDEX);
dma_base_addr = QLC_DMA_REG_BASE_ADDR(dma_no);
temp_addr = dma_base_addr + QLC_DMA_CMD_BUFF_ADDR_LOW;
- ret = qlcnic_83xx_wrt_reg_indirect(adapter, temp_addr,
- mem->desc_card_addr);
+ ret = qlcnic_ind_wr(adapter, temp_addr, mem->desc_card_addr);
if (ret)
return ret;
temp_addr = dma_base_addr + QLC_DMA_CMD_BUFF_ADDR_HI;
- ret = qlcnic_83xx_wrt_reg_indirect(adapter, temp_addr, 0);
+ ret = qlcnic_ind_wr(adapter, temp_addr, 0);
if (ret)
return ret;
temp_addr = dma_base_addr + QLC_DMA_CMD_STATUS_CTRL;
- ret = qlcnic_83xx_wrt_reg_indirect(adapter, temp_addr,
- mem->start_dma_cmd);
+ ret = qlcnic_ind_wr(adapter, temp_addr, mem->start_dma_cmd);
if (ret)
return ret;
@@ -710,15 +714,16 @@ static u32 qlcnic_read_memory_pexdma(struct qlcnic_adapter *adapter,
struct qlcnic_fw_dump *fw_dump = &adapter->ahw->fw_dump;
u32 temp, dma_base_addr, size = 0, read_size = 0;
struct qlcnic_pex_dma_descriptor *dma_descr;
- struct qlcnic_83xx_dump_template_hdr *tmpl_hdr;
struct device *dev = &adapter->pdev->dev;
dma_addr_t dma_phys_addr;
void *dma_buffer;
+ void *tmpl_hdr;
tmpl_hdr = fw_dump->tmpl_hdr;
/* Check if DMA engine is available */
- temp = tmpl_hdr->saved_state[QLC_83XX_DMA_ENGINE_INDEX];
+ temp = qlcnic_get_saved_state(adapter, tmpl_hdr,
+ QLC_83XX_DMA_ENGINE_INDEX);
dma_base_addr = QLC_DMA_REG_BASE_ADDR(temp);
temp = qlcnic_ind_rd(adapter,
dma_base_addr + QLC_DMA_CMD_STATUS_CTRL);
@@ -764,8 +769,8 @@ static u32 qlcnic_read_memory_pexdma(struct qlcnic_adapter *adapter,
/* Write DMA descriptor to MS memory*/
temp = sizeof(struct qlcnic_pex_dma_descriptor) / 16;
- *ret = qlcnic_83xx_ms_mem_write128(adapter, mem->desc_card_addr,
- (u32 *)dma_descr, temp);
+ *ret = qlcnic_ms_mem_write128(adapter, mem->desc_card_addr,
+ (u32 *)dma_descr, temp);
if (*ret) {
dev_info(dev, "Failed to write DMA descriptor to MS memory at address 0x%x\n",
mem->desc_card_addr);
@@ -1141,8 +1146,6 @@ free_mem:
return err;
}
-#define QLCNIC_TEMPLATE_VERSION (0x20001)
-
int qlcnic_fw_cmd_get_minidump_temp(struct qlcnic_adapter *adapter)
{
struct qlcnic_hardware_context *ahw;
@@ -1150,6 +1153,7 @@ int qlcnic_fw_cmd_get_minidump_temp(struct qlcnic_adapter *adapter)
u32 version, csum, *tmp_buf;
u8 use_flash_temp = 0;
u32 temp_size = 0;
+ void *temp_buffer;
int err;
ahw = adapter->ahw;
@@ -1199,16 +1203,23 @@ flash_temp:
qlcnic_cache_tmpl_hdr_values(adapter, fw_dump);
+ if (fw_dump->use_pex_dma) {
+ fw_dump->dma_buffer = NULL;
+ temp_buffer = dma_alloc_coherent(&adapter->pdev->dev,
+ QLC_PEX_DMA_READ_SIZE,
+ &fw_dump->phys_addr,
+ GFP_KERNEL);
+ if (!temp_buffer)
+ fw_dump->use_pex_dma = false;
+ else
+ fw_dump->dma_buffer = temp_buffer;
+ }
+
+
dev_info(&adapter->pdev->dev,
"Default minidump capture mask 0x%x\n",
fw_dump->cap_mask);
- if (qlcnic_83xx_check(adapter) &&
- (fw_dump->version & 0xfffff) >= QLCNIC_TEMPLATE_VERSION)
- fw_dump->use_pex_dma = true;
- else
- fw_dump->use_pex_dma = false;
-
qlcnic_enable_fw_dump_state(adapter);
return 0;
@@ -1224,7 +1235,7 @@ int qlcnic_dump_fw(struct qlcnic_adapter *adapter)
struct device *dev = &adapter->pdev->dev;
struct qlcnic_hardware_context *ahw;
struct qlcnic_dump_entry *entry;
- void *temp_buffer, *tmpl_hdr;
+ void *tmpl_hdr;
u32 ocm_window;
__le32 *buffer;
char mesg[64];
@@ -1268,16 +1279,6 @@ int qlcnic_dump_fw(struct qlcnic_adapter *adapter)
qlcnic_set_sys_info(adapter, tmpl_hdr, 0, QLCNIC_DRIVER_VERSION);
qlcnic_set_sys_info(adapter, tmpl_hdr, 1, adapter->fw_version);
- if (fw_dump->use_pex_dma) {
- temp_buffer = dma_alloc_coherent(dev, QLC_PEX_DMA_READ_SIZE,
- &fw_dump->phys_addr,
- GFP_KERNEL);
- if (!temp_buffer)
- fw_dump->use_pex_dma = false;
- else
- fw_dump->dma_buffer = temp_buffer;
- }
-
if (qlcnic_82xx_check(adapter)) {
ops_cnt = ARRAY_SIZE(qlcnic_fw_dump_ops);
fw_dump_ops = qlcnic_fw_dump_ops;
@@ -1335,10 +1336,6 @@ int qlcnic_dump_fw(struct qlcnic_adapter *adapter)
/* Send a udev event to notify availability of FW dump */
kobject_uevent_env(&dev->kobj, KOBJ_CHANGE, msg);
- if (fw_dump->use_pex_dma)
- dma_free_coherent(dev, QLC_PEX_DMA_READ_SIZE,
- fw_dump->dma_buffer, fw_dump->phys_addr);
-
return 0;
}
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov.h
index 396bd1fd1d27..4677b2edccca 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov.h
@@ -52,6 +52,7 @@ enum qlcnic_bc_commands {
QLCNIC_BC_CMD_CFG_GUEST_VLAN = 0x3,
};
+#define QLCNIC_83XX_SRIOV_VF_MAX_MAC 2
#define QLC_BC_CMD 1
struct qlcnic_trans_list {
@@ -151,13 +152,14 @@ struct qlcnic_vf_info {
struct qlcnic_trans_list rcv_pend;
struct qlcnic_adapter *adapter;
struct qlcnic_vport *vp;
- struct mutex vlan_list_lock; /* Lock for VLAN list */
+ spinlock_t vlan_list_lock; /* Lock for VLAN list */
};
struct qlcnic_async_work_list {
struct list_head list;
struct work_struct work;
void *ptr;
+ struct qlcnic_cmd_args *cmd;
};
struct qlcnic_back_channel {
@@ -231,7 +233,7 @@ bool qlcnic_sriov_soft_flr_check(struct qlcnic_adapter *,
void qlcnic_sriov_pf_reset(struct qlcnic_adapter *);
int qlcnic_sriov_pf_reinit(struct qlcnic_adapter *);
int qlcnic_sriov_set_vf_mac(struct net_device *, int, u8 *);
-int qlcnic_sriov_set_vf_tx_rate(struct net_device *, int, int);
+int qlcnic_sriov_set_vf_tx_rate(struct net_device *, int, int, int);
int qlcnic_sriov_get_vf_config(struct net_device *, int ,
struct ifla_vf_info *);
int qlcnic_sriov_set_vf_vlan(struct net_device *, int, u16, u8);
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
index 6afe9c1f5ab9..1659c804f1d5 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
@@ -39,6 +39,8 @@ static int qlcnic_sriov_channel_cfg_cmd(struct qlcnic_adapter *, u8);
static void qlcnic_sriov_process_bc_cmd(struct work_struct *);
static int qlcnic_sriov_vf_shutdown(struct pci_dev *);
static int qlcnic_sriov_vf_resume(struct qlcnic_adapter *);
+static int qlcnic_sriov_async_issue_cmd(struct qlcnic_adapter *,
+ struct qlcnic_cmd_args *);
static struct qlcnic_hardware_ops qlcnic_sriov_vf_hw_ops = {
.read_crb = qlcnic_83xx_read_crb,
@@ -181,7 +183,7 @@ int qlcnic_sriov_init(struct qlcnic_adapter *adapter, int num_vfs)
vf->adapter = adapter;
vf->pci_func = qlcnic_sriov_virtid_fn(adapter, i);
mutex_init(&vf->send_cmd_lock);
- mutex_init(&vf->vlan_list_lock);
+ spin_lock_init(&vf->vlan_list_lock);
INIT_LIST_HEAD(&vf->rcv_act.wait_list);
INIT_LIST_HEAD(&vf->rcv_pend.wait_list);
spin_lock_init(&vf->rcv_act.lock);
@@ -197,8 +199,10 @@ int qlcnic_sriov_init(struct qlcnic_adapter *adapter, int num_vfs)
goto qlcnic_destroy_async_wq;
}
sriov->vf_info[i].vp = vp;
+ vp->vlan_mode = QLC_GUEST_VLAN_MODE;
vp->max_tx_bw = MAX_BW;
- vp->spoofchk = true;
+ vp->min_tx_bw = MIN_BW;
+ vp->spoofchk = false;
random_ether_addr(vp->mac);
dev_info(&adapter->pdev->dev,
"MAC Address %pM is configured for VF %d\n",
@@ -454,6 +458,7 @@ static int qlcnic_sriov_get_vf_acl(struct qlcnic_adapter *adapter)
struct qlcnic_cmd_args cmd;
int ret = 0;
+ memset(&cmd, 0, sizeof(cmd));
ret = qlcnic_sriov_alloc_bc_mbx_args(&cmd, QLCNIC_BC_CMD_GET_ACL);
if (ret)
return ret;
@@ -515,6 +520,8 @@ static int qlcnic_sriov_setup_vf(struct qlcnic_adapter *adapter,
{
int err;
+ adapter->flags |= QLCNIC_VLAN_FILTERING;
+ adapter->ahw->total_nic_func = 1;
INIT_LIST_HEAD(&adapter->vf_mc_list);
if (!qlcnic_use_msi_x && !!qlcnic_use_msi)
dev_warn(&adapter->pdev->dev,
@@ -770,6 +777,7 @@ static int qlcnic_sriov_prepare_bc_hdr(struct qlcnic_bc_trans *trans,
cmd->req.arg = (u32 *)trans->req_pay;
cmd->rsp.arg = (u32 *)trans->rsp_pay;
cmd_op = cmd->req.arg[0] & 0xff;
+ cmd->cmd_op = cmd_op;
remainder = (trans->rsp_pay_size) % (bc_pay_sz);
num_frags = (trans->rsp_pay_size) / (bc_pay_sz);
if (remainder)
@@ -1356,7 +1364,7 @@ static int qlcnic_sriov_retry_bc_cmd(struct qlcnic_adapter *adapter,
return -EIO;
}
-static int qlcnic_sriov_issue_cmd(struct qlcnic_adapter *adapter,
+static int __qlcnic_sriov_issue_cmd(struct qlcnic_adapter *adapter,
struct qlcnic_cmd_args *cmd)
{
struct qlcnic_hardware_context *ahw = adapter->ahw;
@@ -1408,12 +1416,17 @@ retry:
(mbx_err_code == QLCNIC_MBX_PORT_RSP_OK)) {
rsp = QLCNIC_RCODE_SUCCESS;
} else {
- rsp = mbx_err_code;
- if (!rsp)
- rsp = 1;
- dev_err(dev,
- "MBX command 0x%x failed with err:0x%x for VF %d\n",
- opcode, mbx_err_code, func);
+ if (cmd->type == QLC_83XX_MBX_CMD_NO_WAIT) {
+ rsp = QLCNIC_RCODE_SUCCESS;
+ } else {
+ rsp = mbx_err_code;
+ if (!rsp)
+ rsp = 1;
+
+ dev_err(dev,
+ "MBX command 0x%x failed with err:0x%x for VF %d\n",
+ opcode, mbx_err_code, func);
+ }
}
err_out:
@@ -1435,12 +1448,23 @@ free_cmd:
return rsp;
}
+
+static int qlcnic_sriov_issue_cmd(struct qlcnic_adapter *adapter,
+ struct qlcnic_cmd_args *cmd)
+{
+ if (cmd->type == QLC_83XX_MBX_CMD_NO_WAIT)
+ return qlcnic_sriov_async_issue_cmd(adapter, cmd);
+ else
+ return __qlcnic_sriov_issue_cmd(adapter, cmd);
+}
+
static int qlcnic_sriov_channel_cfg_cmd(struct qlcnic_adapter *adapter, u8 cmd_op)
{
struct qlcnic_cmd_args cmd;
struct qlcnic_vf_info *vf = &adapter->ahw->sriov->vf_info[0];
int ret;
+ memset(&cmd, 0, sizeof(cmd));
if (qlcnic_sriov_alloc_bc_mbx_args(&cmd, cmd_op))
return -ENOMEM;
@@ -1465,58 +1489,28 @@ out:
return ret;
}
-static void qlcnic_vf_add_mc_list(struct net_device *netdev)
+static void qlcnic_vf_add_mc_list(struct net_device *netdev, const u8 *mac)
{
struct qlcnic_adapter *adapter = netdev_priv(netdev);
struct qlcnic_sriov *sriov = adapter->ahw->sriov;
- struct qlcnic_mac_vlan_list *cur;
- struct list_head *head, tmp_list;
struct qlcnic_vf_info *vf;
u16 vlan_id;
int i;
- static const u8 bcast_addr[ETH_ALEN] = {
- 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
- };
-
vf = &adapter->ahw->sriov->vf_info[0];
- INIT_LIST_HEAD(&tmp_list);
- head = &adapter->vf_mc_list;
- netif_addr_lock_bh(netdev);
- while (!list_empty(head)) {
- cur = list_entry(head->next, struct qlcnic_mac_vlan_list, list);
- list_move(&cur->list, &tmp_list);
- }
-
- netif_addr_unlock_bh(netdev);
-
- while (!list_empty(&tmp_list)) {
- cur = list_entry((&tmp_list)->next,
- struct qlcnic_mac_vlan_list, list);
- if (!qlcnic_sriov_check_any_vlan(vf)) {
- qlcnic_nic_add_mac(adapter, bcast_addr, 0);
- qlcnic_nic_add_mac(adapter, cur->mac_addr, 0);
- } else {
- mutex_lock(&vf->vlan_list_lock);
- for (i = 0; i < sriov->num_allowed_vlans; i++) {
- vlan_id = vf->sriov_vlans[i];
- if (vlan_id) {
- qlcnic_nic_add_mac(adapter, bcast_addr,
- vlan_id);
- qlcnic_nic_add_mac(adapter,
- cur->mac_addr,
- vlan_id);
- }
- }
- mutex_unlock(&vf->vlan_list_lock);
- if (qlcnic_84xx_check(adapter)) {
- qlcnic_nic_add_mac(adapter, bcast_addr, 0);
- qlcnic_nic_add_mac(adapter, cur->mac_addr, 0);
- }
+ if (!qlcnic_sriov_check_any_vlan(vf)) {
+ qlcnic_nic_add_mac(adapter, mac, 0);
+ } else {
+ spin_lock(&vf->vlan_list_lock);
+ for (i = 0; i < sriov->num_allowed_vlans; i++) {
+ vlan_id = vf->sriov_vlans[i];
+ if (vlan_id)
+ qlcnic_nic_add_mac(adapter, mac, vlan_id);
}
- list_del(&cur->list);
- kfree(cur);
+ spin_unlock(&vf->vlan_list_lock);
+ if (qlcnic_84xx_check(adapter))
+ qlcnic_nic_add_mac(adapter, mac, 0);
}
}
@@ -1525,6 +1519,7 @@ void qlcnic_sriov_cleanup_async_list(struct qlcnic_back_channel *bc)
struct list_head *head = &bc->async_list;
struct qlcnic_async_work_list *entry;
+ flush_workqueue(bc->bc_async_wq);
while (!list_empty(head)) {
entry = list_entry(head->next, struct qlcnic_async_work_list,
list);
@@ -1534,10 +1529,14 @@ void qlcnic_sriov_cleanup_async_list(struct qlcnic_back_channel *bc)
}
}
-static void qlcnic_sriov_vf_set_multi(struct net_device *netdev)
+void qlcnic_sriov_vf_set_multi(struct net_device *netdev)
{
struct qlcnic_adapter *adapter = netdev_priv(netdev);
struct qlcnic_hardware_context *ahw = adapter->ahw;
+ static const u8 bcast_addr[ETH_ALEN] = {
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
+ };
+ struct netdev_hw_addr *ha;
u32 mode = VPORT_MISS_MODE_DROP;
if (!test_bit(__QLCNIC_FW_ATTACHED, &adapter->state))
@@ -1549,23 +1548,49 @@ static void qlcnic_sriov_vf_set_multi(struct net_device *netdev)
} else if ((netdev->flags & IFF_ALLMULTI) ||
(netdev_mc_count(netdev) > ahw->max_mc_count)) {
mode = VPORT_MISS_MODE_ACCEPT_MULTI;
+ } else {
+ qlcnic_vf_add_mc_list(netdev, bcast_addr);
+ if (!netdev_mc_empty(netdev)) {
+ netdev_for_each_mc_addr(ha, netdev)
+ qlcnic_vf_add_mc_list(netdev, ha->addr);
+ }
}
- if (qlcnic_sriov_vf_check(adapter))
- qlcnic_vf_add_mc_list(netdev);
+ /* configure unicast MAC address, if there is not sufficient space
+ * to store all the unicast addresses then enable promiscuous mode
+ */
+ if (netdev_uc_count(netdev) > ahw->max_uc_count) {
+ mode = VPORT_MISS_MODE_ACCEPT_ALL;
+ } else if (!netdev_uc_empty(netdev)) {
+ netdev_for_each_uc_addr(ha, netdev)
+ qlcnic_vf_add_mc_list(netdev, ha->addr);
+ }
+
+ if (adapter->pdev->is_virtfn) {
+ if (mode == VPORT_MISS_MODE_ACCEPT_ALL &&
+ !adapter->fdb_mac_learn) {
+ qlcnic_alloc_lb_filters_mem(adapter);
+ adapter->drv_mac_learn = 1;
+ adapter->rx_mac_learn = true;
+ } else {
+ adapter->drv_mac_learn = 0;
+ adapter->rx_mac_learn = false;
+ }
+ }
qlcnic_nic_set_promisc(adapter, mode);
}
-static void qlcnic_sriov_handle_async_multi(struct work_struct *work)
+static void qlcnic_sriov_handle_async_issue_cmd(struct work_struct *work)
{
struct qlcnic_async_work_list *entry;
- struct net_device *netdev;
+ struct qlcnic_adapter *adapter;
+ struct qlcnic_cmd_args *cmd;
entry = container_of(work, struct qlcnic_async_work_list, work);
- netdev = (struct net_device *)entry->ptr;
-
- qlcnic_sriov_vf_set_multi(netdev);
+ adapter = entry->ptr;
+ cmd = entry->cmd;
+ __qlcnic_sriov_issue_cmd(adapter, cmd);
return;
}
@@ -1595,8 +1620,9 @@ qlcnic_sriov_get_free_node_async_work(struct qlcnic_back_channel *bc)
return entry;
}
-static void qlcnic_sriov_schedule_bc_async_work(struct qlcnic_back_channel *bc,
- work_func_t func, void *data)
+static void qlcnic_sriov_schedule_async_cmd(struct qlcnic_back_channel *bc,
+ work_func_t func, void *data,
+ struct qlcnic_cmd_args *cmd)
{
struct qlcnic_async_work_list *entry = NULL;
@@ -1605,21 +1631,23 @@ static void qlcnic_sriov_schedule_bc_async_work(struct qlcnic_back_channel *bc,
return;
entry->ptr = data;
+ entry->cmd = cmd;
INIT_WORK(&entry->work, func);
queue_work(bc->bc_async_wq, &entry->work);
}
-void qlcnic_sriov_vf_schedule_multi(struct net_device *netdev)
+static int qlcnic_sriov_async_issue_cmd(struct qlcnic_adapter *adapter,
+ struct qlcnic_cmd_args *cmd)
{
- struct qlcnic_adapter *adapter = netdev_priv(netdev);
struct qlcnic_back_channel *bc = &adapter->ahw->sriov->bc;
if (adapter->need_fw_reset)
- return;
+ return -EIO;
- qlcnic_sriov_schedule_bc_async_work(bc, qlcnic_sriov_handle_async_multi,
- netdev);
+ qlcnic_sriov_schedule_async_cmd(bc, qlcnic_sriov_handle_async_issue_cmd,
+ adapter, cmd);
+ return 0;
}
static int qlcnic_sriov_vf_reinit_driver(struct qlcnic_adapter *adapter)
@@ -1843,6 +1871,12 @@ static int qlcnic_sriov_vf_idc_unknown_state(struct qlcnic_adapter *adapter)
return 0;
}
+static void qlcnic_sriov_vf_periodic_tasks(struct qlcnic_adapter *adapter)
+{
+ if (adapter->fhash.fnum)
+ qlcnic_prune_lb_filters(adapter);
+}
+
static void qlcnic_sriov_vf_poll_dev_state(struct work_struct *work)
{
struct qlcnic_adapter *adapter;
@@ -1874,6 +1908,8 @@ static void qlcnic_sriov_vf_poll_dev_state(struct work_struct *work)
}
idc->prev_state = idc->curr_state;
+ qlcnic_sriov_vf_periodic_tasks(adapter);
+
if (!ret && test_bit(QLC_83XX_MODULE_LOADED, &idc->status))
qlcnic_schedule_work(adapter, qlcnic_sriov_vf_poll_dev_state,
idc->delay);
@@ -1897,7 +1933,7 @@ static int qlcnic_sriov_check_vlan_id(struct qlcnic_sriov *sriov,
if (!vf->sriov_vlans)
return err;
- mutex_lock(&vf->vlan_list_lock);
+ spin_lock_bh(&vf->vlan_list_lock);
for (i = 0; i < sriov->num_allowed_vlans; i++) {
if (vf->sriov_vlans[i] == vlan_id) {
@@ -1906,7 +1942,7 @@ static int qlcnic_sriov_check_vlan_id(struct qlcnic_sriov *sriov,
}
}
- mutex_unlock(&vf->vlan_list_lock);
+ spin_unlock_bh(&vf->vlan_list_lock);
return err;
}
@@ -1915,12 +1951,12 @@ static int qlcnic_sriov_validate_num_vlans(struct qlcnic_sriov *sriov,
{
int err = 0;
- mutex_lock(&vf->vlan_list_lock);
+ spin_lock_bh(&vf->vlan_list_lock);
if (vf->num_vlan >= sriov->num_allowed_vlans)
err = -EINVAL;
- mutex_unlock(&vf->vlan_list_lock);
+ spin_unlock_bh(&vf->vlan_list_lock);
return err;
}
@@ -1973,7 +2009,7 @@ static void qlcnic_sriov_vlan_operation(struct qlcnic_vf_info *vf, u16 vlan_id,
if (!vf->sriov_vlans)
return;
- mutex_lock(&vf->vlan_list_lock);
+ spin_lock_bh(&vf->vlan_list_lock);
switch (opcode) {
case QLC_VLAN_ADD:
@@ -1986,7 +2022,7 @@ static void qlcnic_sriov_vlan_operation(struct qlcnic_vf_info *vf, u16 vlan_id,
netdev_err(adapter->netdev, "Invalid VLAN operation\n");
}
- mutex_unlock(&vf->vlan_list_lock);
+ spin_unlock_bh(&vf->vlan_list_lock);
return;
}
@@ -1994,10 +2030,12 @@ int qlcnic_sriov_cfg_vf_guest_vlan(struct qlcnic_adapter *adapter,
u16 vid, u8 enable)
{
struct qlcnic_sriov *sriov = adapter->ahw->sriov;
+ struct net_device *netdev = adapter->netdev;
struct qlcnic_vf_info *vf;
struct qlcnic_cmd_args cmd;
int ret;
+ memset(&cmd, 0, sizeof(cmd));
if (vid == 0)
return 0;
@@ -2019,14 +2057,18 @@ int qlcnic_sriov_cfg_vf_guest_vlan(struct qlcnic_adapter *adapter,
dev_err(&adapter->pdev->dev,
"Failed to configure guest VLAN, err=%d\n", ret);
} else {
+ netif_addr_lock_bh(netdev);
qlcnic_free_mac_list(adapter);
+ netif_addr_unlock_bh(netdev);
if (enable)
qlcnic_sriov_vlan_operation(vf, vid, QLC_VLAN_ADD);
else
qlcnic_sriov_vlan_operation(vf, vid, QLC_VLAN_DELETE);
- qlcnic_set_multi(adapter->netdev);
+ netif_addr_lock_bh(netdev);
+ qlcnic_set_multi(netdev);
+ netif_addr_unlock_bh(netdev);
}
qlcnic_free_mbx_args(&cmd);
@@ -2157,11 +2199,11 @@ bool qlcnic_sriov_check_any_vlan(struct qlcnic_vf_info *vf)
{
bool err = false;
- mutex_lock(&vf->vlan_list_lock);
+ spin_lock_bh(&vf->vlan_list_lock);
if (vf->num_vlan)
err = true;
- mutex_unlock(&vf->vlan_list_lock);
+ spin_unlock_bh(&vf->vlan_list_lock);
return err;
}
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c
index 280137991544..a29538b86edf 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c
@@ -16,6 +16,7 @@
#define QLC_VF_FLOOD_BIT BIT_16
#define QLC_FLOOD_MODE 0x5
#define QLC_SRIOV_ALLOW_VLAN0 BIT_19
+#define QLC_INTR_COAL_TYPE_MASK 0x7
static int qlcnic_sriov_pf_get_vport_handle(struct qlcnic_adapter *, u8);
@@ -83,7 +84,7 @@ static int qlcnic_sriov_pf_cal_res_limit(struct qlcnic_adapter *adapter,
info->max_tx_ques = res->num_tx_queues / max;
if (qlcnic_83xx_pf_check(adapter))
- num_macs = 1;
+ num_macs = QLCNIC_83XX_SRIOV_VF_MAX_MAC;
info->max_rx_mcast_mac_filters = res->num_rx_mcast_mac_filters;
@@ -337,9 +338,12 @@ static int qlcnic_sriov_pf_cfg_vlan_filtering(struct qlcnic_adapter *adapter,
cmd.req.arg[1] = 0x4;
if (enable) {
+ adapter->flags |= QLCNIC_VLAN_FILTERING;
cmd.req.arg[1] |= BIT_16;
if (qlcnic_84xx_check(adapter))
cmd.req.arg[1] |= QLC_SRIOV_ALLOW_VLAN0;
+ } else {
+ adapter->flags &= ~QLCNIC_VLAN_FILTERING;
}
err = qlcnic_issue_cmd(adapter, &cmd);
@@ -471,12 +475,12 @@ static int qlcnic_pci_sriov_disable(struct qlcnic_adapter *adapter)
return -EPERM;
}
+ qlcnic_sriov_pf_disable(adapter);
+
rtnl_lock();
if (netif_running(netdev))
__qlcnic_down(adapter, netdev);
- qlcnic_sriov_pf_disable(adapter);
-
qlcnic_sriov_free_vlans(adapter);
qlcnic_sriov_pf_cleanup(adapter);
@@ -595,7 +599,6 @@ static int __qlcnic_pci_sriov_enable(struct qlcnic_adapter *adapter,
qlcnic_sriov_alloc_vlans(adapter);
- err = qlcnic_sriov_pf_enable(adapter, num_vfs);
return err;
del_flr_queue:
@@ -626,25 +629,36 @@ static int qlcnic_pci_sriov_enable(struct qlcnic_adapter *adapter, int num_vfs)
__qlcnic_down(adapter, netdev);
err = __qlcnic_pci_sriov_enable(adapter, num_vfs);
- if (err) {
- netdev_info(netdev, "Failed to enable SR-IOV on port %d\n",
- adapter->portnum);
+ if (err)
+ goto error;
- err = -EIO;
- if (qlcnic_83xx_configure_opmode(adapter))
- goto error;
- } else {
+ if (netif_running(netdev))
+ __qlcnic_up(adapter, netdev);
+
+ rtnl_unlock();
+ err = qlcnic_sriov_pf_enable(adapter, num_vfs);
+ if (!err) {
netdev_info(netdev,
"SR-IOV is enabled successfully on port %d\n",
adapter->portnum);
/* Return number of vfs enabled */
- err = num_vfs;
+ return num_vfs;
}
+
+ rtnl_lock();
if (netif_running(netdev))
- __qlcnic_up(adapter, netdev);
+ __qlcnic_down(adapter, netdev);
error:
+ if (!qlcnic_83xx_configure_opmode(adapter)) {
+ if (netif_running(netdev))
+ __qlcnic_up(adapter, netdev);
+ }
+
rtnl_unlock();
+ netdev_info(netdev, "Failed to enable SR-IOV on port %d\n",
+ adapter->portnum);
+
return err;
}
@@ -773,7 +787,7 @@ static int qlcnic_sriov_cfg_vf_def_mac(struct qlcnic_adapter *adapter,
struct qlcnic_vf_info *vf,
u16 vlan, u8 op)
{
- struct qlcnic_cmd_args cmd;
+ struct qlcnic_cmd_args *cmd;
struct qlcnic_macvlan_mbx mv;
struct qlcnic_vport *vp;
u8 *addr;
@@ -783,21 +797,27 @@ static int qlcnic_sriov_cfg_vf_def_mac(struct qlcnic_adapter *adapter,
vp = vf->vp;
- if (qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_CONFIG_MAC_VLAN))
+ cmd = kzalloc(sizeof(*cmd), GFP_ATOMIC);
+ if (!cmd)
return -ENOMEM;
+ err = qlcnic_alloc_mbx_args(cmd, adapter, QLCNIC_CMD_CONFIG_MAC_VLAN);
+ if (err)
+ goto free_cmd;
+
+ cmd->type = QLC_83XX_MBX_CMD_NO_WAIT;
vpid = qlcnic_sriov_pf_get_vport_handle(adapter, vf->pci_func);
if (vpid < 0) {
err = -EINVAL;
- goto out;
+ goto free_args;
}
if (vlan)
op = ((op == QLCNIC_MAC_ADD || op == QLCNIC_MAC_VLAN_ADD) ?
QLCNIC_MAC_VLAN_ADD : QLCNIC_MAC_VLAN_DEL);
- cmd.req.arg[1] = op | (1 << 8) | (3 << 6);
- cmd.req.arg[1] |= ((vpid & 0xffff) << 16) | BIT_31;
+ cmd->req.arg[1] = op | (1 << 8) | (3 << 6);
+ cmd->req.arg[1] |= ((vpid & 0xffff) << 16) | BIT_31;
addr = vp->mac;
mv.vlan = vlan;
@@ -807,18 +827,18 @@ static int qlcnic_sriov_cfg_vf_def_mac(struct qlcnic_adapter *adapter,
mv.mac_addr3 = addr[3];
mv.mac_addr4 = addr[4];
mv.mac_addr5 = addr[5];
- buf = &cmd.req.arg[2];
+ buf = &cmd->req.arg[2];
memcpy(buf, &mv, sizeof(struct qlcnic_macvlan_mbx));
- err = qlcnic_issue_cmd(adapter, &cmd);
+ err = qlcnic_issue_cmd(adapter, cmd);
- if (err)
- dev_err(&adapter->pdev->dev,
- "MAC-VLAN %s to CAM failed, err=%d.\n",
- ((op == 1) ? "add " : "delete "), err);
+ if (!err)
+ return err;
-out:
- qlcnic_free_mbx_args(&cmd);
+free_args:
+ qlcnic_free_mbx_args(cmd);
+free_cmd:
+ kfree(cmd);
return err;
}
@@ -840,7 +860,7 @@ static void qlcnic_83xx_cfg_default_mac_vlan(struct qlcnic_adapter *adapter,
sriov = adapter->ahw->sriov;
- mutex_lock(&vf->vlan_list_lock);
+ spin_lock_bh(&vf->vlan_list_lock);
if (vf->num_vlan) {
for (i = 0; i < sriov->num_allowed_vlans; i++) {
vlan = vf->sriov_vlans[i];
@@ -849,7 +869,7 @@ static void qlcnic_83xx_cfg_default_mac_vlan(struct qlcnic_adapter *adapter,
opcode);
}
}
- mutex_unlock(&vf->vlan_list_lock);
+ spin_unlock_bh(&vf->vlan_list_lock);
if (vf->vp->vlan_mode != QLC_PVID_MODE) {
if (qlcnic_83xx_pf_check(adapter) &&
@@ -1178,19 +1198,41 @@ static int qlcnic_sriov_validate_cfg_intrcoal(struct qlcnic_adapter *adapter,
{
struct qlcnic_nic_intr_coalesce *coal = &adapter->ahw->coal;
u16 ctx_id, pkts, time;
+ int err = -EINVAL;
+ u8 type;
+ type = cmd->req.arg[1] & QLC_INTR_COAL_TYPE_MASK;
ctx_id = cmd->req.arg[1] >> 16;
pkts = cmd->req.arg[2] & 0xffff;
time = cmd->req.arg[2] >> 16;
- if (ctx_id != vf->rx_ctx_id)
- return -EINVAL;
- if (pkts > coal->rx_packets)
- return -EINVAL;
- if (time < coal->rx_time_us)
- return -EINVAL;
+ switch (type) {
+ case QLCNIC_INTR_COAL_TYPE_RX:
+ if (ctx_id != vf->rx_ctx_id || pkts > coal->rx_packets ||
+ time < coal->rx_time_us)
+ goto err_label;
+ break;
+ case QLCNIC_INTR_COAL_TYPE_TX:
+ if (ctx_id != vf->tx_ctx_id || pkts > coal->tx_packets ||
+ time < coal->tx_time_us)
+ goto err_label;
+ break;
+ default:
+ netdev_err(adapter->netdev, "Invalid coalescing type 0x%x received\n",
+ type);
+ return err;
+ }
return 0;
+
+err_label:
+ netdev_err(adapter->netdev, "Expected: rx_ctx_id 0x%x rx_packets 0x%x rx_time_us 0x%x tx_ctx_id 0x%x tx_packets 0x%x tx_time_us 0x%x\n",
+ vf->rx_ctx_id, coal->rx_packets, coal->rx_time_us,
+ vf->tx_ctx_id, coal->tx_packets, coal->tx_time_us);
+ netdev_err(adapter->netdev, "Received: ctx_id 0x%x packets 0x%x time_us 0x%x type 0x%x\n",
+ ctx_id, pkts, time, type);
+
+ return err;
}
static int qlcnic_sriov_pf_cfg_intrcoal_cmd(struct qlcnic_bc_trans *tran,
@@ -1214,7 +1256,6 @@ static int qlcnic_sriov_validate_cfg_macvlan(struct qlcnic_adapter *adapter,
struct qlcnic_vf_info *vf,
struct qlcnic_cmd_args *cmd)
{
- struct qlcnic_macvlan_mbx *macvlan;
struct qlcnic_vport *vp = vf->vp;
u8 op, new_op;
@@ -1224,14 +1265,6 @@ static int qlcnic_sriov_validate_cfg_macvlan(struct qlcnic_adapter *adapter,
cmd->req.arg[1] |= (vf->vp->handle << 16);
cmd->req.arg[1] |= BIT_31;
- macvlan = (struct qlcnic_macvlan_mbx *)&cmd->req.arg[2];
- if (!(macvlan->mac_addr0 & BIT_0)) {
- dev_err(&adapter->pdev->dev,
- "MAC address change is not allowed from VF %d",
- vf->pci_func);
- return -EINVAL;
- }
-
if (vp->vlan_mode == QLC_PVID_MODE) {
op = cmd->req.arg[1] & 0x7;
cmd->req.arg[1] &= ~0x7;
@@ -1815,7 +1848,8 @@ int qlcnic_sriov_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
return 0;
}
-int qlcnic_sriov_set_vf_tx_rate(struct net_device *netdev, int vf, int tx_rate)
+int qlcnic_sriov_set_vf_tx_rate(struct net_device *netdev, int vf,
+ int min_tx_rate, int max_tx_rate)
{
struct qlcnic_adapter *adapter = netdev_priv(netdev);
struct qlcnic_sriov *sriov = adapter->ahw->sriov;
@@ -1830,35 +1864,52 @@ int qlcnic_sriov_set_vf_tx_rate(struct net_device *netdev, int vf, int tx_rate)
if (vf >= sriov->num_vfs)
return -EINVAL;
- if (tx_rate >= 10000 || tx_rate < 100) {
+ vf_info = &sriov->vf_info[vf];
+ vp = vf_info->vp;
+ vpid = vp->handle;
+
+ if (!min_tx_rate)
+ min_tx_rate = QLC_VF_MIN_TX_RATE;
+
+ if (max_tx_rate &&
+ (max_tx_rate >= 10000 || max_tx_rate < min_tx_rate)) {
netdev_err(netdev,
- "Invalid Tx rate, allowed range is [%d - %d]",
- QLC_VF_MIN_TX_RATE, QLC_VF_MAX_TX_RATE);
+ "Invalid max Tx rate, allowed range is [%d - %d]",
+ min_tx_rate, QLC_VF_MAX_TX_RATE);
return -EINVAL;
}
- if (tx_rate == 0)
- tx_rate = 10000;
+ if (!max_tx_rate)
+ max_tx_rate = 10000;
- vf_info = &sriov->vf_info[vf];
- vp = vf_info->vp;
- vpid = vp->handle;
+ if (min_tx_rate &&
+ (min_tx_rate > max_tx_rate || min_tx_rate < QLC_VF_MIN_TX_RATE)) {
+ netdev_err(netdev,
+ "Invalid min Tx rate, allowed range is [%d - %d]",
+ QLC_VF_MIN_TX_RATE, max_tx_rate);
+ return -EINVAL;
+ }
if (test_bit(QLC_BC_VF_STATE, &vf_info->state)) {
if (qlcnic_sriov_get_vf_vport_info(adapter, &nic_info, vpid))
return -EIO;
- nic_info.max_tx_bw = tx_rate / 100;
+ nic_info.max_tx_bw = max_tx_rate / 100;
+ nic_info.min_tx_bw = min_tx_rate / 100;
nic_info.bit_offsets = BIT_0;
if (qlcnic_sriov_pf_set_vport_info(adapter, &nic_info, vpid))
return -EIO;
}
- vp->max_tx_bw = tx_rate / 100;
+ vp->max_tx_bw = max_tx_rate / 100;
netdev_info(netdev,
- "Setting Tx rate %d (Mbps), %d %% of PF bandwidth, for VF %d\n",
- tx_rate, vp->max_tx_bw, vf);
+ "Setting Max Tx rate %d (Mbps), %d %% of PF bandwidth, for VF %d\n",
+ max_tx_rate, vp->max_tx_bw, vf);
+ vp->min_tx_bw = min_tx_rate / 100;
+ netdev_info(netdev,
+ "Setting Min Tx rate %d (Mbps), %d %% of PF bandwidth, for VF %d\n",
+ min_tx_rate, vp->min_tx_bw, vf);
return 0;
}
@@ -1957,9 +2008,13 @@ int qlcnic_sriov_get_vf_config(struct net_device *netdev,
ivi->qos = vp->qos;
ivi->spoofchk = vp->spoofchk;
if (vp->max_tx_bw == MAX_BW)
- ivi->tx_rate = 0;
+ ivi->max_tx_rate = 0;
+ else
+ ivi->max_tx_rate = vp->max_tx_bw * 100;
+ if (vp->min_tx_bw == MIN_BW)
+ ivi->min_tx_rate = 0;
else
- ivi->tx_rate = vp->max_tx_bw * 100;
+ ivi->min_tx_rate = vp->min_tx_bw * 100;
ivi->vf = vf;
return 0;
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c
index cd346e27f2e1..f5786d5792df 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c
@@ -19,6 +19,10 @@
#include <linux/sysfs.h>
#include <linux/aer.h>
#include <linux/log2.h>
+#ifdef CONFIG_QLCNIC_HWMON
+#include <linux/hwmon.h>
+#include <linux/hwmon-sysfs.h>
+#endif
#define QLC_STATUS_UNSUPPORTED_CMD -2
@@ -358,6 +362,8 @@ int qlcnic_is_valid_nic_func(struct qlcnic_adapter *adapter, u8 pci_func)
if (adapter->npars[i].pci_func == pci_func)
return i;
}
+
+ dev_err(&adapter->pdev->dev, "%s: Invalid nic function\n", __func__);
return -EINVAL;
}
@@ -1243,6 +1249,68 @@ static struct bin_attribute bin_attr_flash = {
.write = qlcnic_83xx_sysfs_flash_write_handler,
};
+#ifdef CONFIG_QLCNIC_HWMON
+
+static ssize_t qlcnic_hwmon_show_temp(struct device *dev,
+ struct device_attribute *dev_attr,
+ char *buf)
+{
+ struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
+ unsigned int temperature = 0, value = 0;
+
+ if (qlcnic_83xx_check(adapter))
+ value = QLCRDX(adapter->ahw, QLC_83XX_ASIC_TEMP);
+ else if (qlcnic_82xx_check(adapter))
+ value = QLC_SHARED_REG_RD32(adapter, QLCNIC_ASIC_TEMP);
+
+ temperature = qlcnic_get_temp_val(value);
+ /* display millidegree celcius */
+ temperature *= 1000;
+ return sprintf(buf, "%u\n", temperature);
+}
+
+/* hwmon-sysfs attributes */
+static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO,
+ qlcnic_hwmon_show_temp, NULL, 1);
+
+static struct attribute *qlcnic_hwmon_attrs[] = {
+ &sensor_dev_attr_temp1_input.dev_attr.attr,
+ NULL
+};
+
+ATTRIBUTE_GROUPS(qlcnic_hwmon);
+
+void qlcnic_register_hwmon_dev(struct qlcnic_adapter *adapter)
+{
+ struct device *dev = &adapter->pdev->dev;
+ struct device *hwmon_dev;
+
+ /* Skip hwmon registration for a VF device */
+ if (qlcnic_sriov_vf_check(adapter)) {
+ adapter->ahw->hwmon_dev = NULL;
+ return;
+ }
+ hwmon_dev = hwmon_device_register_with_groups(dev, qlcnic_driver_name,
+ adapter,
+ qlcnic_hwmon_groups);
+ if (IS_ERR(hwmon_dev)) {
+ dev_err(dev, "Cannot register with hwmon, err=%ld\n",
+ PTR_ERR(hwmon_dev));
+ hwmon_dev = NULL;
+ }
+ adapter->ahw->hwmon_dev = hwmon_dev;
+}
+
+void qlcnic_unregister_hwmon_dev(struct qlcnic_adapter *adapter)
+{
+ struct device *hwmon_dev = adapter->ahw->hwmon_dev;
+ if (hwmon_dev) {
+ hwmon_device_unregister(hwmon_dev);
+ adapter->ahw->hwmon_dev = NULL;
+ }
+}
+#endif
+
void qlcnic_create_sysfs_entries(struct qlcnic_adapter *adapter)
{
struct device *dev = &adapter->pdev->dev;
diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_main.c b/drivers/net/ethernet/qlogic/qlge/qlge_main.c
index 0a1d76acab81..b40050e03a56 100644
--- a/drivers/net/ethernet/qlogic/qlge/qlge_main.c
+++ b/drivers/net/ethernet/qlogic/qlge/qlge_main.c
@@ -3595,7 +3595,7 @@ static int ql_request_irq(struct ql_adapter *qdev)
}
return status;
err_irq:
- netif_err(qdev, ifup, qdev->ndev, "Failed to get the interrupts!!!/n");
+ netif_err(qdev, ifup, qdev->ndev, "Failed to get the interrupts!!!\n");
ql_free_irq(qdev);
return status;
}
@@ -4770,7 +4770,7 @@ static int qlge_probe(struct pci_dev *pdev,
ndev->irq = pdev->irq;
ndev->netdev_ops = &qlge_netdev_ops;
- SET_ETHTOOL_OPS(ndev, &qlge_ethtool_ops);
+ ndev->ethtool_ops = &qlge_ethtool_ops;
ndev->watchdog_timeo = 10 * HZ;
err = register_netdev(ndev);
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
index aa1c079f231d..be425ad5e824 100644
--- a/drivers/net/ethernet/realtek/r8169.c
+++ b/drivers/net/ethernet/realtek/r8169.c
@@ -7125,7 +7125,7 @@ rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
for (i = 0; i < ETH_ALEN; i++)
dev->dev_addr[i] = RTL_R8(MAC0 + i);
- SET_ETHTOOL_OPS(dev, &rtl8169_ethtool_ops);
+ dev->ethtool_ops = &rtl8169_ethtool_ops;
dev->watchdog_timeo = RTL8169_TX_TIMEOUT;
netif_napi_add(dev, &tp->napi, rtl8169_poll, R8169_NAPI_WEIGHT);
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
index 6a9509ccd33b..7622213beef1 100644
--- a/drivers/net/ethernet/renesas/sh_eth.c
+++ b/drivers/net/ethernet/renesas/sh_eth.c
@@ -307,6 +307,27 @@ static const u16 sh_eth_offset_fast_sh4[SH_ETH_MAX_REGISTER_OFFSET] = {
};
static const u16 sh_eth_offset_fast_sh3_sh2[SH_ETH_MAX_REGISTER_OFFSET] = {
+ [EDMR] = 0x0000,
+ [EDTRR] = 0x0004,
+ [EDRRR] = 0x0008,
+ [TDLAR] = 0x000c,
+ [RDLAR] = 0x0010,
+ [EESR] = 0x0014,
+ [EESIPR] = 0x0018,
+ [TRSCER] = 0x001c,
+ [RMFCR] = 0x0020,
+ [TFTR] = 0x0024,
+ [FDR] = 0x0028,
+ [RMCR] = 0x002c,
+ [EDOCR] = 0x0030,
+ [FCFTR] = 0x0034,
+ [RPADIR] = 0x0038,
+ [TRIMD] = 0x003c,
+ [RBWAR] = 0x0040,
+ [RDFAR] = 0x0044,
+ [TBRAR] = 0x004c,
+ [TDFAR] = 0x0050,
+
[ECMR] = 0x0160,
[ECSR] = 0x0164,
[ECSIPR] = 0x0168,
@@ -546,7 +567,6 @@ static struct sh_eth_cpu_data sh7757_data = {
.register_type = SH_ETH_REG_FAST_SH4,
.eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
- .rmcr_value = RMCR_RNC,
.tx_check = EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_RTO,
.eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE |
@@ -624,7 +644,6 @@ static struct sh_eth_cpu_data sh7757_data_giga = {
EESR_RFE | EESR_RDE | EESR_RFRMER | EESR_TFE |
EESR_TDE | EESR_ECI,
.fdr_value = 0x0000072f,
- .rmcr_value = RMCR_RNC,
.irq_flags = IRQF_SHARED,
.apr = 1,
@@ -752,7 +771,6 @@ static struct sh_eth_cpu_data r8a7740_data = {
EESR_RFE | EESR_RDE | EESR_RFRMER | EESR_TFE |
EESR_TDE | EESR_ECI,
.fdr_value = 0x0000070f,
- .rmcr_value = RMCR_RNC,
.apr = 1,
.mpr = 1,
@@ -784,7 +802,6 @@ static struct sh_eth_cpu_data r7s72100_data = {
EESR_RFE | EESR_RDE | EESR_RFRMER | EESR_TFE |
EESR_TDE | EESR_ECI,
.fdr_value = 0x0000070f,
- .rmcr_value = RMCR_RNC,
.no_psr = 1,
.apr = 1,
@@ -833,9 +850,6 @@ static void sh_eth_set_default_cpu_data(struct sh_eth_cpu_data *cd)
if (!cd->fdr_value)
cd->fdr_value = DEFAULT_FDR_INIT;
- if (!cd->rmcr_value)
- cd->rmcr_value = DEFAULT_RMCR_VALUE;
-
if (!cd->tx_check)
cd->tx_check = DEFAULT_TX_CHECK;
@@ -1287,8 +1301,8 @@ static int sh_eth_dev_init(struct net_device *ndev, bool start)
sh_eth_write(ndev, mdp->cd->fdr_value, FDR);
sh_eth_write(ndev, 0, TFTR);
- /* Frame recv control */
- sh_eth_write(ndev, mdp->cd->rmcr_value, RMCR);
+ /* Frame recv control (enable multiple-packets per rx irq) */
+ sh_eth_write(ndev, RMCR_RNC, RMCR);
sh_eth_write(ndev, DESC_I_RINT8 | DESC_I_RINT5 | DESC_I_TINT2, TRSCER);
@@ -1385,7 +1399,6 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota)
int entry = mdp->cur_rx % mdp->num_rx_ring;
int boguscnt = (mdp->dirty_rx + mdp->num_rx_ring) - mdp->cur_rx;
struct sk_buff *skb;
- int exceeded = 0;
u16 pkt_len = 0;
u32 desc_status;
@@ -1397,10 +1410,9 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota)
if (--boguscnt < 0)
break;
- if (*quota <= 0) {
- exceeded = 1;
+ if (*quota <= 0)
break;
- }
+
(*quota)--;
if (!(desc_status & RDFEND))
@@ -1448,7 +1460,6 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota)
ndev->stats.rx_packets++;
ndev->stats.rx_bytes += pkt_len;
}
- rxdesc->status |= cpu_to_edmac(mdp, RD_RACT);
entry = (++mdp->cur_rx) % mdp->num_rx_ring;
rxdesc = &mdp->rx_ring[entry];
}
@@ -1494,7 +1505,7 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota)
sh_eth_write(ndev, EDRRR_R, EDRRR);
}
- return exceeded;
+ return *quota <= 0;
}
static void sh_eth_rcv_snd_disable(struct net_device *ndev)
@@ -2627,8 +2638,8 @@ static int sh_mdio_init(struct sh_eth_private *mdp,
pdev->name, pdev->id);
/* PHY IRQ */
- mdp->mii_bus->irq = devm_kzalloc(dev, sizeof(int) * PHY_MAX_ADDR,
- GFP_KERNEL);
+ mdp->mii_bus->irq = devm_kmalloc_array(dev, PHY_MAX_ADDR, sizeof(int),
+ GFP_KERNEL);
if (!mdp->mii_bus->irq) {
ret = -ENOMEM;
goto out_free_bus;
@@ -2843,7 +2854,7 @@ static int sh_eth_drv_probe(struct platform_device *pdev)
ndev->netdev_ops = &sh_eth_netdev_ops_tsu;
else
ndev->netdev_ops = &sh_eth_netdev_ops;
- SET_ETHTOOL_OPS(ndev, &sh_eth_ethtool_ops);
+ ndev->ethtool_ops = &sh_eth_ethtool_ops;
ndev->watchdog_timeo = TX_TIMEOUT;
/* debug message level */
diff --git a/drivers/net/ethernet/renesas/sh_eth.h b/drivers/net/ethernet/renesas/sh_eth.h
index d55e37cd5fec..b37c427144ee 100644
--- a/drivers/net/ethernet/renesas/sh_eth.h
+++ b/drivers/net/ethernet/renesas/sh_eth.h
@@ -319,7 +319,6 @@ enum TD_STS_BIT {
enum RMCR_BIT {
RMCR_RNC = 0x00000001,
};
-#define DEFAULT_RMCR_VALUE 0x00000000
/* ECMR */
enum FELIC_MODE_BIT {
@@ -466,7 +465,6 @@ struct sh_eth_cpu_data {
unsigned long fdr_value;
unsigned long fcftr_value;
unsigned long rpadir_value;
- unsigned long rmcr_value;
/* interrupt checking mask */
unsigned long tx_check;
diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_ethtool.c b/drivers/net/ethernet/samsung/sxgbe/sxgbe_ethtool.c
index 0415fa50eeb7..c0981ae45874 100644
--- a/drivers/net/ethernet/samsung/sxgbe/sxgbe_ethtool.c
+++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_ethtool.c
@@ -520,5 +520,5 @@ static const struct ethtool_ops sxgbe_ethtool_ops = {
void sxgbe_set_ethtool_ops(struct net_device *netdev)
{
- SET_ETHTOOL_OPS(netdev, &sxgbe_ethtool_ops);
+ netdev->ethtool_ops = &sxgbe_ethtool_ops;
}
diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c b/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c
index 82a9a983869f..698494481d18 100644
--- a/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c
+++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c
@@ -425,8 +425,8 @@ dmamem_err:
* @rx_rsize: ring size
* Description: this function initializes the DMA RX descriptor
*/
-void free_rx_ring(struct device *dev, struct sxgbe_rx_queue *rx_ring,
- int rx_rsize)
+static void free_rx_ring(struct device *dev, struct sxgbe_rx_queue *rx_ring,
+ int rx_rsize)
{
dma_free_coherent(dev, rx_rsize * sizeof(struct sxgbe_rx_norm_desc),
rx_ring->dma_rx, rx_ring->dma_rx_phy);
@@ -519,8 +519,8 @@ error:
* @tx_rsize: ring size
* Description: this function initializes the DMA TX descriptor
*/
-void free_tx_ring(struct device *dev, struct sxgbe_tx_queue *tx_ring,
- int tx_rsize)
+static void free_tx_ring(struct device *dev, struct sxgbe_tx_queue *tx_ring,
+ int tx_rsize)
{
dma_free_coherent(dev, tx_rsize * sizeof(struct sxgbe_tx_norm_desc),
tx_ring->dma_tx, tx_ring->dma_tx_phy);
@@ -1221,11 +1221,10 @@ static int sxgbe_release(struct net_device *dev)
return 0;
}
-
/* Prepare first Tx descriptor for doing TSO operation */
-void sxgbe_tso_prepare(struct sxgbe_priv_data *priv,
- struct sxgbe_tx_norm_desc *first_desc,
- struct sk_buff *skb)
+static void sxgbe_tso_prepare(struct sxgbe_priv_data *priv,
+ struct sxgbe_tx_norm_desc *first_desc,
+ struct sk_buff *skb)
{
unsigned int total_hdr_len, tcp_hdr_len;
@@ -1914,40 +1913,6 @@ static void sxgbe_set_rx_mode(struct net_device *dev)
readl(ioaddr + SXGBE_HASH_LOW));
}
-/**
- * sxgbe_config - entry point for changing configuration mode passed on by
- * ifconfig
- * @dev : pointer to the device structure
- * @map : pointer to the device mapping structure
- * Description:
- * This function is a driver entry point which gets called by the kernel
- * whenever some device configuration is changed.
- * Return value:
- * This function returns 0 if success and appropriate error otherwise.
- */
-static int sxgbe_config(struct net_device *dev, struct ifmap *map)
-{
- struct sxgbe_priv_data *priv = netdev_priv(dev);
-
- /* Can't act on a running interface */
- if (dev->flags & IFF_UP)
- return -EBUSY;
-
- /* Don't allow changing the I/O address */
- if (map->base_addr != (unsigned long)priv->ioaddr) {
- netdev_warn(dev, "can't change I/O address\n");
- return -EOPNOTSUPP;
- }
-
- /* Don't allow changing the IRQ */
- if (map->irq != priv->irq) {
- netdev_warn(dev, "not change IRQ number %d\n", priv->irq);
- return -EOPNOTSUPP;
- }
-
- return 0;
-}
-
#ifdef CONFIG_NET_POLL_CONTROLLER
/**
* sxgbe_poll_controller - entry point for polling receive by device
@@ -2009,7 +1974,6 @@ static const struct net_device_ops sxgbe_netdev_ops = {
.ndo_set_rx_mode = sxgbe_set_rx_mode,
.ndo_tx_timeout = sxgbe_tx_timeout,
.ndo_do_ioctl = sxgbe_ioctl,
- .ndo_set_config = sxgbe_config,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = sxgbe_poll_controller,
#endif
diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_reg.h b/drivers/net/ethernet/samsung/sxgbe/sxgbe_reg.h
index 56f8bf5a3f1b..81437d91df99 100644
--- a/drivers/net/ethernet/samsung/sxgbe/sxgbe_reg.h
+++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_reg.h
@@ -188,7 +188,6 @@
/* L3/L4 function registers */
#define SXGBE_CORE_L34_ADDCTL_REG 0x0C00
-#define SXGBE_CORE_L34_ADDCTL_REG 0x0C00
#define SXGBE_CORE_L34_DATA_REG 0x0C04
/* ARP registers */
diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c
index 63d595fd3cc5..1e274045970f 100644
--- a/drivers/net/ethernet/sfc/efx.c
+++ b/drivers/net/ethernet/sfc/efx.c
@@ -2248,7 +2248,7 @@ static int efx_register_netdev(struct efx_nic *efx)
} else {
net_dev->netdev_ops = &efx_farch_netdev_ops;
}
- SET_ETHTOOL_OPS(net_dev, &efx_ethtool_ops);
+ net_dev->ethtool_ops = &efx_ethtool_ops;
net_dev->gso_max_segs = EFX_TSO_MAX_SEGS;
rtnl_lock();
diff --git a/drivers/net/ethernet/sfc/ethtool.c b/drivers/net/ethernet/sfc/ethtool.c
index 0de8b07c24c2..74739c4b9997 100644
--- a/drivers/net/ethernet/sfc/ethtool.c
+++ b/drivers/net/ethernet/sfc/ethtool.c
@@ -1033,7 +1033,7 @@ static u32 efx_ethtool_get_rxfh_indir_size(struct net_device *net_dev)
0 : ARRAY_SIZE(efx->rx_indir_table));
}
-static int efx_ethtool_get_rxfh_indir(struct net_device *net_dev, u32 *indir)
+static int efx_ethtool_get_rxfh(struct net_device *net_dev, u32 *indir, u8 *key)
{
struct efx_nic *efx = netdev_priv(net_dev);
@@ -1041,8 +1041,8 @@ static int efx_ethtool_get_rxfh_indir(struct net_device *net_dev, u32 *indir)
return 0;
}
-static int efx_ethtool_set_rxfh_indir(struct net_device *net_dev,
- const u32 *indir)
+static int efx_ethtool_set_rxfh(struct net_device *net_dev,
+ const u32 *indir, const u8 *key)
{
struct efx_nic *efx = netdev_priv(net_dev);
@@ -1125,8 +1125,8 @@ const struct ethtool_ops efx_ethtool_ops = {
.get_rxnfc = efx_ethtool_get_rxnfc,
.set_rxnfc = efx_ethtool_set_rxnfc,
.get_rxfh_indir_size = efx_ethtool_get_rxfh_indir_size,
- .get_rxfh_indir = efx_ethtool_get_rxfh_indir,
- .set_rxfh_indir = efx_ethtool_set_rxfh_indir,
+ .get_rxfh = efx_ethtool_get_rxfh,
+ .set_rxfh = efx_ethtool_set_rxfh,
.get_ts_info = efx_ethtool_get_ts_info,
.get_module_info = efx_ethtool_get_module_info,
.get_module_eeprom = efx_ethtool_get_module_eeprom,
diff --git a/drivers/net/ethernet/sfc/io.h b/drivers/net/ethernet/sfc/io.h
index 4d3f119b67b3..afb94aa2c15e 100644
--- a/drivers/net/ethernet/sfc/io.h
+++ b/drivers/net/ethernet/sfc/io.h
@@ -66,10 +66,17 @@
#define EFX_USE_QWORD_IO 1
#endif
+/* Hardware issue requires that only 64-bit naturally aligned writes
+ * are seen by hardware. Its not strictly necessary to restrict to
+ * x86_64 arch, but done for safety since unusual write combining behaviour
+ * can break PIO.
+ */
+#ifdef CONFIG_X86_64
/* PIO is a win only if write-combining is possible */
#ifdef ARCH_HAS_IOREMAP_WC
#define EFX_USE_PIO 1
#endif
+#endif
#ifdef EFX_USE_QWORD_IO
static inline void _efx_writeq(struct efx_nic *efx, __le64 value,
diff --git a/drivers/net/ethernet/sfc/siena_sriov.c b/drivers/net/ethernet/sfc/siena_sriov.c
index 9a9205e77896..43d2e64546ed 100644
--- a/drivers/net/ethernet/sfc/siena_sriov.c
+++ b/drivers/net/ethernet/sfc/siena_sriov.c
@@ -1633,7 +1633,8 @@ int efx_sriov_get_vf_config(struct net_device *net_dev, int vf_i,
ivi->vf = vf_i;
ether_addr_copy(ivi->mac, vf->addr.mac_addr);
- ivi->tx_rate = 0;
+ ivi->max_tx_rate = 0;
+ ivi->min_tx_rate = 0;
tci = ntohs(vf->addr.tci);
ivi->vlan = tci & VLAN_VID_MASK;
ivi->qos = (tci >> VLAN_PRIO_SHIFT) & 0x7;
diff --git a/drivers/net/ethernet/sfc/tx.c b/drivers/net/ethernet/sfc/tx.c
index fa9475300411..ede8dcca0ff3 100644
--- a/drivers/net/ethernet/sfc/tx.c
+++ b/drivers/net/ethernet/sfc/tx.c
@@ -189,6 +189,18 @@ struct efx_short_copy_buffer {
u8 buf[L1_CACHE_BYTES];
};
+/* Copy in explicit 64-bit writes. */
+static void efx_memcpy_64(void __iomem *dest, void *src, size_t len)
+{
+ u64 *src64 = src;
+ u64 __iomem *dest64 = dest;
+ size_t l64 = len / 8;
+ size_t i;
+
+ for (i = 0; i < l64; i++)
+ writeq(src64[i], &dest64[i]);
+}
+
/* Copy to PIO, respecting that writes to PIO buffers must be dword aligned.
* Advances piobuf pointer. Leaves additional data in the copy buffer.
*/
@@ -198,7 +210,7 @@ static void efx_memcpy_toio_aligned(struct efx_nic *efx, u8 __iomem **piobuf,
{
int block_len = len & ~(sizeof(copy_buf->buf) - 1);
- memcpy_toio(*piobuf, data, block_len);
+ efx_memcpy_64(*piobuf, data, block_len);
*piobuf += block_len;
len -= block_len;
@@ -230,7 +242,7 @@ static void efx_memcpy_toio_aligned_cb(struct efx_nic *efx, u8 __iomem **piobuf,
if (copy_buf->used < sizeof(copy_buf->buf))
return;
- memcpy_toio(*piobuf, copy_buf->buf, sizeof(copy_buf->buf));
+ efx_memcpy_64(*piobuf, copy_buf->buf, sizeof(copy_buf->buf));
*piobuf += sizeof(copy_buf->buf);
data += copy_to_buf;
len -= copy_to_buf;
@@ -245,7 +257,7 @@ static void efx_flush_copy_buffer(struct efx_nic *efx, u8 __iomem *piobuf,
{
/* if there's anything in it, write the whole buffer, including junk */
if (copy_buf->used)
- memcpy_toio(piobuf, copy_buf->buf, sizeof(copy_buf->buf));
+ efx_memcpy_64(piobuf, copy_buf->buf, sizeof(copy_buf->buf));
}
/* Traverse skb structure and copy fragments in to PIO buffer.
@@ -304,8 +316,8 @@ efx_enqueue_skb_pio(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
*/
BUILD_BUG_ON(L1_CACHE_BYTES >
SKB_DATA_ALIGN(sizeof(struct skb_shared_info)));
- memcpy_toio(tx_queue->piobuf, skb->data,
- ALIGN(skb->len, L1_CACHE_BYTES));
+ efx_memcpy_64(tx_queue->piobuf, skb->data,
+ ALIGN(skb->len, L1_CACHE_BYTES));
}
EFX_POPULATE_QWORD_5(buffer->option,
diff --git a/drivers/net/ethernet/sis/sis190.c b/drivers/net/ethernet/sis/sis190.c
index acbbe48a519c..a86339903b9b 100644
--- a/drivers/net/ethernet/sis/sis190.c
+++ b/drivers/net/ethernet/sis/sis190.c
@@ -1877,7 +1877,7 @@ static int sis190_init_one(struct pci_dev *pdev,
dev->netdev_ops = &sis190_netdev_ops;
- SET_ETHTOOL_OPS(dev, &sis190_ethtool_ops);
+ dev->ethtool_ops = &sis190_ethtool_ops;
dev->watchdog_timeo = SIS190_TX_TIMEOUT;
spin_lock_init(&tp->lock);
diff --git a/drivers/net/ethernet/smsc/smc91c92_cs.c b/drivers/net/ethernet/smsc/smc91c92_cs.c
index c7a4868571f9..6b33127ab352 100644
--- a/drivers/net/ethernet/smsc/smc91c92_cs.c
+++ b/drivers/net/ethernet/smsc/smc91c92_cs.c
@@ -318,7 +318,7 @@ static int smc91c92_probe(struct pcmcia_device *link)
/* The SMC91c92-specific entries in the device structure. */
dev->netdev_ops = &smc_netdev_ops;
- SET_ETHTOOL_OPS(dev, &ethtool_ops);
+ dev->ethtool_ops = &ethtool_ops;
dev->watchdog_timeo = TX_TIMEOUT;
smc->mii_if.dev = dev;
diff --git a/drivers/net/ethernet/smsc/smsc911x.c b/drivers/net/ethernet/smsc/smsc911x.c
index a0fc151da40d..5e13fa5524ae 100644
--- a/drivers/net/ethernet/smsc/smsc911x.c
+++ b/drivers/net/ethernet/smsc/smsc911x.c
@@ -2477,6 +2477,8 @@ static int smsc911x_drv_probe(struct platform_device *pdev)
goto out_disable_resources;
}
+ netif_carrier_off(dev);
+
retval = register_netdev(dev);
if (retval) {
SMSC_WARN(pdata, probe, "Error %i registering device", retval);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
index c5f9cb85c8ef..c62e67f3c2f0 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
@@ -322,9 +322,7 @@ static int stmmac_ethtool_getsettings(struct net_device *dev,
return -EBUSY;
}
cmd->transceiver = XCVR_INTERNAL;
- spin_lock_irq(&priv->lock);
rc = phy_ethtool_gset(phy, cmd);
- spin_unlock_irq(&priv->lock);
return rc;
}
@@ -431,8 +429,6 @@ stmmac_get_pauseparam(struct net_device *netdev,
if (priv->pcs) /* FIXME */
return;
- spin_lock(&priv->lock);
-
pause->rx_pause = 0;
pause->tx_pause = 0;
pause->autoneg = priv->phydev->autoneg;
@@ -442,7 +438,6 @@ stmmac_get_pauseparam(struct net_device *netdev,
if (priv->flow_ctrl & FLOW_TX)
pause->tx_pause = 1;
- spin_unlock(&priv->lock);
}
static int
@@ -457,8 +452,6 @@ stmmac_set_pauseparam(struct net_device *netdev,
if (priv->pcs) /* FIXME */
return -EOPNOTSUPP;
- spin_lock(&priv->lock);
-
if (pause->rx_pause)
new_pause |= FLOW_RX;
if (pause->tx_pause)
@@ -473,7 +466,6 @@ stmmac_set_pauseparam(struct net_device *netdev,
} else
priv->hw->mac->flow_ctrl(priv->ioaddr, phy->duplex,
priv->flow_ctrl, priv->pause);
- spin_unlock(&priv->lock);
return ret;
}
@@ -784,5 +776,5 @@ static const struct ethtool_ops stmmac_ethtool_ops = {
void stmmac_set_ethtool_ops(struct net_device *netdev)
{
- SET_ETHTOOL_OPS(netdev, &stmmac_ethtool_ops);
+ netdev->ethtool_ops = &stmmac_ethtool_ops;
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 0f4841d2e8dc..057a1208e594 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -1753,7 +1753,7 @@ static int stmmac_open(struct net_device *dev)
}
/* Request the IRQ lines */
- if (priv->lpi_irq != -ENXIO) {
+ if (priv->lpi_irq > 0) {
ret = request_irq(priv->lpi_irq, stmmac_interrupt, IRQF_SHARED,
dev->name, dev);
if (unlikely(ret < 0)) {
@@ -1813,7 +1813,7 @@ static int stmmac_release(struct net_device *dev)
free_irq(dev->irq, dev);
if (priv->wol_irq != dev->irq)
free_irq(priv->wol_irq, dev);
- if (priv->lpi_irq != -ENXIO)
+ if (priv->lpi_irq > 0)
free_irq(priv->lpi_irq, dev);
/* Stop TX/RX DMA and clear the descriptors */
@@ -2212,27 +2212,6 @@ static void stmmac_tx_timeout(struct net_device *dev)
stmmac_tx_err(priv);
}
-/* Configuration changes (passed on by ifconfig) */
-static int stmmac_config(struct net_device *dev, struct ifmap *map)
-{
- if (dev->flags & IFF_UP) /* can't act on a running interface */
- return -EBUSY;
-
- /* Don't allow changing the I/O address */
- if (map->base_addr != dev->base_addr) {
- pr_warn("%s: can't change I/O address\n", dev->name);
- return -EOPNOTSUPP;
- }
-
- /* Don't allow changing the IRQ */
- if (map->irq != dev->irq) {
- pr_warn("%s: not change IRQ number %d\n", dev->name, dev->irq);
- return -EOPNOTSUPP;
- }
-
- return 0;
-}
-
/**
* stmmac_set_rx_mode - entry point for multicast addressing
* @dev : pointer to the device structure
@@ -2598,7 +2577,6 @@ static const struct net_device_ops stmmac_netdev_ops = {
.ndo_set_rx_mode = stmmac_set_rx_mode,
.ndo_tx_timeout = stmmac_tx_timeout,
.ndo_do_ioctl = stmmac_ioctl,
- .ndo_set_config = stmmac_config,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = stmmac_poll_controller,
#endif
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
index a468eb107823..a5b1e1b776fe 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
@@ -205,10 +205,13 @@ int stmmac_mdio_register(struct net_device *ndev)
if (new_bus == NULL)
return -ENOMEM;
- if (mdio_bus_data->irqs)
+ if (mdio_bus_data->irqs) {
irqlist = mdio_bus_data->irqs;
- else
+ } else {
+ for (addr = 0; addr < PHY_MAX_ADDR; addr++)
+ priv->mii_irq[addr] = PHY_POLL;
irqlist = priv->mii_irq;
+ }
#ifdef CONFIG_OF
if (priv->device->of_node)
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
index 46aef5108bea..ea7a65be1f9a 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
@@ -237,10 +237,12 @@ static int stmmac_pltfr_probe(struct platform_device *pdev)
/* Get the MAC information */
priv->dev->irq = platform_get_irq_byname(pdev, "macirq");
- if (priv->dev->irq == -ENXIO) {
- pr_err("%s: ERROR: MAC IRQ configuration "
- "information not found\n", __func__);
- return -ENXIO;
+ if (priv->dev->irq < 0) {
+ if (priv->dev->irq != -EPROBE_DEFER) {
+ netdev_err(priv->dev,
+ "MAC IRQ configuration information not found\n");
+ }
+ return priv->dev->irq;
}
/*
@@ -252,10 +254,15 @@ static int stmmac_pltfr_probe(struct platform_device *pdev)
* so the driver will continue to use the mac irq (ndev->irq)
*/
priv->wol_irq = platform_get_irq_byname(pdev, "eth_wake_irq");
- if (priv->wol_irq == -ENXIO)
+ if (priv->wol_irq < 0) {
+ if (priv->wol_irq == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
priv->wol_irq = priv->dev->irq;
+ }
priv->lpi_irq = platform_get_irq_byname(pdev, "eth_lpi");
+ if (priv->lpi_irq == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
platform_set_drvdata(pdev, priv->dev);
diff --git a/drivers/net/ethernet/tehuti/tehuti.c b/drivers/net/ethernet/tehuti/tehuti.c
index 2ead87759ab4..38da73a2a886 100644
--- a/drivers/net/ethernet/tehuti/tehuti.c
+++ b/drivers/net/ethernet/tehuti/tehuti.c
@@ -2413,7 +2413,7 @@ static void bdx_set_ethtool_ops(struct net_device *netdev)
.get_ethtool_stats = bdx_get_ethtool_stats,
};
- SET_ETHTOOL_OPS(netdev, &bdx_ethtool_ops);
+ netdev->ethtool_ops = &bdx_ethtool_ops;
}
/**
diff --git a/drivers/net/ethernet/ti/cpmac.c b/drivers/net/ethernet/ti/cpmac.c
index 73f74f369437..7399a52f7c26 100644
--- a/drivers/net/ethernet/ti/cpmac.c
+++ b/drivers/net/ethernet/ti/cpmac.c
@@ -313,19 +313,6 @@ static int mii_irqs[PHY_MAX_ADDR] = { PHY_POLL, };
static struct mii_bus *cpmac_mii;
-static int cpmac_config(struct net_device *dev, struct ifmap *map)
-{
- if (dev->flags & IFF_UP)
- return -EBUSY;
-
- /* Don't allow changing the I/O address */
- if (map->base_addr != dev->base_addr)
- return -EOPNOTSUPP;
-
- /* ignore other fields */
- return 0;
-}
-
static void cpmac_set_multicast_list(struct net_device *dev)
{
struct netdev_hw_addr *ha;
@@ -1100,7 +1087,6 @@ static const struct net_device_ops cpmac_netdev_ops = {
.ndo_tx_timeout = cpmac_tx_timeout,
.ndo_set_rx_mode = cpmac_set_multicast_list,
.ndo_do_ioctl = cpmac_ioctl,
- .ndo_set_config = cpmac_config,
.ndo_change_mtu = eth_change_mtu,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = eth_mac_addr,
diff --git a/drivers/net/ethernet/ti/cpsw-phy-sel.c b/drivers/net/ethernet/ti/cpsw-phy-sel.c
index 148da9ae8366..aa8bf45e53dc 100644
--- a/drivers/net/ethernet/ti/cpsw-phy-sel.c
+++ b/drivers/net/ethernet/ti/cpsw-phy-sel.c
@@ -29,6 +29,8 @@
#define AM33XX_GMII_SEL_RMII2_IO_CLK_EN BIT(7)
#define AM33XX_GMII_SEL_RMII1_IO_CLK_EN BIT(6)
+#define GMII_SEL_MODE_MASK 0x3
+
struct cpsw_phy_sel_priv {
struct device *dev;
u32 __iomem *gmii_sel;
@@ -65,7 +67,7 @@ static void cpsw_gmii_sel_am3352(struct cpsw_phy_sel_priv *priv,
break;
};
- mask = 0x3 << (slave * 2) | BIT(slave + 6);
+ mask = GMII_SEL_MODE_MASK << (slave * 2) | BIT(slave + 6);
mode <<= slave * 2;
if (priv->rmii_clock_external) {
@@ -81,6 +83,55 @@ static void cpsw_gmii_sel_am3352(struct cpsw_phy_sel_priv *priv,
writel(reg, priv->gmii_sel);
}
+static void cpsw_gmii_sel_dra7xx(struct cpsw_phy_sel_priv *priv,
+ phy_interface_t phy_mode, int slave)
+{
+ u32 reg;
+ u32 mask;
+ u32 mode = 0;
+
+ reg = readl(priv->gmii_sel);
+
+ switch (phy_mode) {
+ case PHY_INTERFACE_MODE_RMII:
+ mode = AM33XX_GMII_SEL_MODE_RMII;
+ break;
+
+ case PHY_INTERFACE_MODE_RGMII:
+ case PHY_INTERFACE_MODE_RGMII_ID:
+ case PHY_INTERFACE_MODE_RGMII_RXID:
+ case PHY_INTERFACE_MODE_RGMII_TXID:
+ mode = AM33XX_GMII_SEL_MODE_RGMII;
+ break;
+
+ case PHY_INTERFACE_MODE_MII:
+ default:
+ mode = AM33XX_GMII_SEL_MODE_MII;
+ break;
+ };
+
+ switch (slave) {
+ case 0:
+ mask = GMII_SEL_MODE_MASK;
+ break;
+ case 1:
+ mask = GMII_SEL_MODE_MASK << 4;
+ mode <<= 4;
+ break;
+ default:
+ dev_err(priv->dev, "invalid slave number...\n");
+ return;
+ }
+
+ if (priv->rmii_clock_external)
+ dev_err(priv->dev, "RMII External clock is not supported\n");
+
+ reg &= ~mask;
+ reg |= mode;
+
+ writel(reg, priv->gmii_sel);
+}
+
static struct platform_driver cpsw_phy_sel_driver;
static int match(struct device *dev, void *data)
{
@@ -112,6 +163,14 @@ static const struct of_device_id cpsw_phy_sel_id_table[] = {
.compatible = "ti,am3352-cpsw-phy-sel",
.data = &cpsw_gmii_sel_am3352,
},
+ {
+ .compatible = "ti,dra7xx-cpsw-phy-sel",
+ .data = &cpsw_gmii_sel_dra7xx,
+ },
+ {
+ .compatible = "ti,am43xx-cpsw-phy-sel",
+ .data = &cpsw_gmii_sel_am3352,
+ },
{}
};
MODULE_DEVICE_TABLE(of, cpsw_phy_sel_id_table);
@@ -132,6 +191,7 @@ static int cpsw_phy_sel_probe(struct platform_device *pdev)
return -ENOMEM;
}
+ priv->dev = &pdev->dev;
priv->cpsw_phy_sel = of_id->data;
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "gmii-sel");
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index c331b7ebc812..ff380dac6629 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -143,13 +143,13 @@ do { \
u32 i; \
for (i = 0; i < priv->num_irqs; i++) \
enable_irq(priv->irqs_table[i]); \
- } while (0);
+ } while (0)
#define cpsw_disable_irq(priv) \
do { \
u32 i; \
for (i = 0; i < priv->num_irqs; i++) \
disable_irq_nosync(priv->irqs_table[i]); \
- } while (0);
+ } while (0)
#define cpsw_slave_index(priv) \
((priv->data.dual_emac) ? priv->emac_port : \
@@ -248,20 +248,31 @@ struct cpsw_ss_regs {
#define TS_131 (1<<11) /* Time Sync Dest IP Addr 131 enable */
#define TS_130 (1<<10) /* Time Sync Dest IP Addr 130 enable */
#define TS_129 (1<<9) /* Time Sync Dest IP Addr 129 enable */
-#define TS_BIT8 (1<<8) /* ts_ttl_nonzero? */
+#define TS_TTL_NONZERO (1<<8) /* Time Sync Time To Live Non-zero enable */
+#define TS_ANNEX_F_EN (1<<6) /* Time Sync Annex F enable */
#define TS_ANNEX_D_EN (1<<4) /* Time Sync Annex D enable */
#define TS_LTYPE2_EN (1<<3) /* Time Sync LTYPE 2 enable */
#define TS_LTYPE1_EN (1<<2) /* Time Sync LTYPE 1 enable */
#define TS_TX_EN (1<<1) /* Time Sync Transmit Enable */
#define TS_RX_EN (1<<0) /* Time Sync Receive Enable */
-#define CTRL_TS_BITS \
- (TS_320 | TS_319 | TS_132 | TS_131 | TS_130 | TS_129 | TS_BIT8 | \
- TS_ANNEX_D_EN | TS_LTYPE1_EN)
+#define CTRL_V2_TS_BITS \
+ (TS_320 | TS_319 | TS_132 | TS_131 | TS_130 | TS_129 |\
+ TS_TTL_NONZERO | TS_ANNEX_D_EN | TS_LTYPE1_EN)
+
+#define CTRL_V2_ALL_TS_MASK (CTRL_V2_TS_BITS | TS_TX_EN | TS_RX_EN)
+#define CTRL_V2_TX_TS_BITS (CTRL_V2_TS_BITS | TS_TX_EN)
+#define CTRL_V2_RX_TS_BITS (CTRL_V2_TS_BITS | TS_RX_EN)
+
-#define CTRL_ALL_TS_MASK (CTRL_TS_BITS | TS_TX_EN | TS_RX_EN)
-#define CTRL_TX_TS_BITS (CTRL_TS_BITS | TS_TX_EN)
-#define CTRL_RX_TS_BITS (CTRL_TS_BITS | TS_RX_EN)
+#define CTRL_V3_TS_BITS \
+ (TS_320 | TS_319 | TS_132 | TS_131 | TS_130 | TS_129 |\
+ TS_TTL_NONZERO | TS_ANNEX_F_EN | TS_ANNEX_D_EN |\
+ TS_LTYPE1_EN)
+
+#define CTRL_V3_ALL_TS_MASK (CTRL_V3_TS_BITS | TS_TX_EN | TS_RX_EN)
+#define CTRL_V3_TX_TS_BITS (CTRL_V3_TS_BITS | TS_TX_EN)
+#define CTRL_V3_RX_TS_BITS (CTRL_V3_TS_BITS | TS_RX_EN)
/* Bit definitions for the CPSW2_TS_SEQ_MTYPE register */
#define TS_SEQ_ID_OFFSET_SHIFT (16) /* Time Sync Sequence ID Offset */
@@ -1376,13 +1387,27 @@ static void cpsw_hwtstamp_v2(struct cpsw_priv *priv)
slave = &priv->slaves[priv->data.active_slave];
ctrl = slave_read(slave, CPSW2_CONTROL);
- ctrl &= ~CTRL_ALL_TS_MASK;
+ switch (priv->version) {
+ case CPSW_VERSION_2:
+ ctrl &= ~CTRL_V2_ALL_TS_MASK;
- if (priv->cpts->tx_enable)
- ctrl |= CTRL_TX_TS_BITS;
+ if (priv->cpts->tx_enable)
+ ctrl |= CTRL_V2_TX_TS_BITS;
- if (priv->cpts->rx_enable)
- ctrl |= CTRL_RX_TS_BITS;
+ if (priv->cpts->rx_enable)
+ ctrl |= CTRL_V2_RX_TS_BITS;
+ break;
+ case CPSW_VERSION_3:
+ default:
+ ctrl &= ~CTRL_V3_ALL_TS_MASK;
+
+ if (priv->cpts->tx_enable)
+ ctrl |= CTRL_V3_TX_TS_BITS;
+
+ if (priv->cpts->rx_enable)
+ ctrl |= CTRL_V3_RX_TS_BITS;
+ break;
+ }
mtype = (30 << TS_SEQ_ID_OFFSET_SHIFT) | EVENT_MSG_BITS;
@@ -1398,7 +1423,8 @@ static int cpsw_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
struct hwtstamp_config cfg;
if (priv->version != CPSW_VERSION_1 &&
- priv->version != CPSW_VERSION_2)
+ priv->version != CPSW_VERSION_2 &&
+ priv->version != CPSW_VERSION_3)
return -EOPNOTSUPP;
if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg)))
@@ -1443,6 +1469,7 @@ static int cpsw_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
cpsw_hwtstamp_v1(priv);
break;
case CPSW_VERSION_2:
+ case CPSW_VERSION_3:
cpsw_hwtstamp_v2(priv);
break;
default:
@@ -1459,7 +1486,8 @@ static int cpsw_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
struct hwtstamp_config cfg;
if (priv->version != CPSW_VERSION_1 &&
- priv->version != CPSW_VERSION_2)
+ priv->version != CPSW_VERSION_2 &&
+ priv->version != CPSW_VERSION_3)
return -EOPNOTSUPP;
cfg.flags = 0;
@@ -1780,25 +1808,25 @@ static int cpsw_probe_dt(struct cpsw_platform_data *data,
return -EINVAL;
if (of_property_read_u32(node, "slaves", &prop)) {
- pr_err("Missing slaves property in the DT.\n");
+ dev_err(&pdev->dev, "Missing slaves property in the DT.\n");
return -EINVAL;
}
data->slaves = prop;
if (of_property_read_u32(node, "active_slave", &prop)) {
- pr_err("Missing active_slave property in the DT.\n");
+ dev_err(&pdev->dev, "Missing active_slave property in the DT.\n");
return -EINVAL;
}
data->active_slave = prop;
if (of_property_read_u32(node, "cpts_clock_mult", &prop)) {
- pr_err("Missing cpts_clock_mult property in the DT.\n");
+ dev_err(&pdev->dev, "Missing cpts_clock_mult property in the DT.\n");
return -EINVAL;
}
data->cpts_clock_mult = prop;
if (of_property_read_u32(node, "cpts_clock_shift", &prop)) {
- pr_err("Missing cpts_clock_shift property in the DT.\n");
+ dev_err(&pdev->dev, "Missing cpts_clock_shift property in the DT.\n");
return -EINVAL;
}
data->cpts_clock_shift = prop;
@@ -1810,31 +1838,31 @@ static int cpsw_probe_dt(struct cpsw_platform_data *data,
return -ENOMEM;
if (of_property_read_u32(node, "cpdma_channels", &prop)) {
- pr_err("Missing cpdma_channels property in the DT.\n");
+ dev_err(&pdev->dev, "Missing cpdma_channels property in the DT.\n");
return -EINVAL;
}
data->channels = prop;
if (of_property_read_u32(node, "ale_entries", &prop)) {
- pr_err("Missing ale_entries property in the DT.\n");
+ dev_err(&pdev->dev, "Missing ale_entries property in the DT.\n");
return -EINVAL;
}
data->ale_entries = prop;
if (of_property_read_u32(node, "bd_ram_size", &prop)) {
- pr_err("Missing bd_ram_size property in the DT.\n");
+ dev_err(&pdev->dev, "Missing bd_ram_size property in the DT.\n");
return -EINVAL;
}
data->bd_ram_size = prop;
if (of_property_read_u32(node, "rx_descs", &prop)) {
- pr_err("Missing rx_descs property in the DT.\n");
+ dev_err(&pdev->dev, "Missing rx_descs property in the DT.\n");
return -EINVAL;
}
data->rx_descs = prop;
if (of_property_read_u32(node, "mac_control", &prop)) {
- pr_err("Missing mac_control property in the DT.\n");
+ dev_err(&pdev->dev, "Missing mac_control property in the DT.\n");
return -EINVAL;
}
data->mac_control = prop;
@@ -1848,7 +1876,7 @@ static int cpsw_probe_dt(struct cpsw_platform_data *data,
ret = of_platform_populate(node, NULL, NULL, &pdev->dev);
/* We do not want to force this, as in some cases may not have child */
if (ret)
- pr_warn("Doesn't have any child node\n");
+ dev_warn(&pdev->dev, "Doesn't have any child node\n");
for_each_child_of_node(node, slave_node) {
struct cpsw_slave_data *slave_data = data->slave_data + i;
@@ -1865,7 +1893,7 @@ static int cpsw_probe_dt(struct cpsw_platform_data *data,
parp = of_get_property(slave_node, "phy_id", &lenp);
if ((parp == NULL) || (lenp != (sizeof(void *) * 2))) {
- pr_err("Missing slave[%d] phy_id property\n", i);
+ dev_err(&pdev->dev, "Missing slave[%d] phy_id property\n", i);
return -EINVAL;
}
mdio_node = of_find_node_by_phandle(be32_to_cpup(parp));
@@ -1885,18 +1913,18 @@ static int cpsw_probe_dt(struct cpsw_platform_data *data,
slave_data->phy_if = of_get_phy_mode(slave_node);
if (slave_data->phy_if < 0) {
- pr_err("Missing or malformed slave[%d] phy-mode property\n",
- i);
+ dev_err(&pdev->dev, "Missing or malformed slave[%d] phy-mode property\n",
+ i);
return slave_data->phy_if;
}
if (data->dual_emac) {
if (of_property_read_u32(slave_node, "dual_emac_res_vlan",
&prop)) {
- pr_err("Missing dual_emac_res_vlan in DT.\n");
+ dev_err(&pdev->dev, "Missing dual_emac_res_vlan in DT.\n");
slave_data->dual_emac_res_vlan = i+1;
- pr_err("Using %d as Reserved VLAN for %d slave\n",
- slave_data->dual_emac_res_vlan, i);
+ dev_err(&pdev->dev, "Using %d as Reserved VLAN for %d slave\n",
+ slave_data->dual_emac_res_vlan, i);
} else {
slave_data->dual_emac_res_vlan = prop;
}
@@ -1920,7 +1948,7 @@ static int cpsw_probe_dual_emac(struct platform_device *pdev,
ndev = alloc_etherdev(sizeof(struct cpsw_priv));
if (!ndev) {
- pr_err("cpsw: error allocating net_device\n");
+ dev_err(&pdev->dev, "cpsw: error allocating net_device\n");
return -ENOMEM;
}
@@ -1936,10 +1964,10 @@ static int cpsw_probe_dual_emac(struct platform_device *pdev,
if (is_valid_ether_addr(data->slave_data[1].mac_addr)) {
memcpy(priv_sl2->mac_addr, data->slave_data[1].mac_addr,
ETH_ALEN);
- pr_info("cpsw: Detected MACID = %pM\n", priv_sl2->mac_addr);
+ dev_info(&pdev->dev, "cpsw: Detected MACID = %pM\n", priv_sl2->mac_addr);
} else {
random_ether_addr(priv_sl2->mac_addr);
- pr_info("cpsw: Random MACID = %pM\n", priv_sl2->mac_addr);
+ dev_info(&pdev->dev, "cpsw: Random MACID = %pM\n", priv_sl2->mac_addr);
}
memcpy(ndev->dev_addr, priv_sl2->mac_addr, ETH_ALEN);
@@ -1970,14 +1998,14 @@ static int cpsw_probe_dual_emac(struct platform_device *pdev,
ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
ndev->netdev_ops = &cpsw_netdev_ops;
- SET_ETHTOOL_OPS(ndev, &cpsw_ethtool_ops);
+ ndev->ethtool_ops = &cpsw_ethtool_ops;
netif_napi_add(ndev, &priv_sl2->napi, cpsw_poll, CPSW_POLL_WEIGHT);
/* register the network device */
SET_NETDEV_DEV(ndev, &pdev->dev);
ret = register_netdev(ndev);
if (ret) {
- pr_err("cpsw: error registering net device\n");
+ dev_err(&pdev->dev, "cpsw: error registering net device\n");
free_netdev(ndev);
ret = -ENODEV;
}
@@ -1999,7 +2027,7 @@ static int cpsw_probe(struct platform_device *pdev)
ndev = alloc_etherdev(sizeof(struct cpsw_priv));
if (!ndev) {
- pr_err("error allocating net_device\n");
+ dev_err(&pdev->dev, "error allocating net_device\n");
return -ENOMEM;
}
@@ -2014,7 +2042,7 @@ static int cpsw_probe(struct platform_device *pdev)
priv->cpts = devm_kzalloc(&pdev->dev, sizeof(struct cpts), GFP_KERNEL);
priv->irq_enabled = true;
if (!priv->cpts) {
- pr_err("error allocating cpts\n");
+ dev_err(&pdev->dev, "error allocating cpts\n");
goto clean_ndev_ret;
}
@@ -2027,7 +2055,7 @@ static int cpsw_probe(struct platform_device *pdev)
pinctrl_pm_select_default_state(&pdev->dev);
if (cpsw_probe_dt(&priv->data, pdev)) {
- pr_err("cpsw: platform data missing\n");
+ dev_err(&pdev->dev, "cpsw: platform data missing\n");
ret = -ENODEV;
goto clean_runtime_disable_ret;
}
@@ -2035,10 +2063,10 @@ static int cpsw_probe(struct platform_device *pdev)
if (is_valid_ether_addr(data->slave_data[0].mac_addr)) {
memcpy(priv->mac_addr, data->slave_data[0].mac_addr, ETH_ALEN);
- pr_info("Detected MACID = %pM\n", priv->mac_addr);
+ dev_info(&pdev->dev, "Detected MACID = %pM\n", priv->mac_addr);
} else {
eth_random_addr(priv->mac_addr);
- pr_info("Random MACID = %pM\n", priv->mac_addr);
+ dev_info(&pdev->dev, "Random MACID = %pM\n", priv->mac_addr);
}
memcpy(ndev->dev_addr, priv->mac_addr, ETH_ALEN);
@@ -2199,7 +2227,7 @@ static int cpsw_probe(struct platform_device *pdev)
ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
ndev->netdev_ops = &cpsw_netdev_ops;
- SET_ETHTOOL_OPS(ndev, &cpsw_ethtool_ops);
+ ndev->ethtool_ops = &cpsw_ethtool_ops;
netif_napi_add(ndev, &priv->napi, cpsw_poll, CPSW_POLL_WEIGHT);
/* register the network device */
diff --git a/drivers/net/ethernet/ti/cpts.c b/drivers/net/ethernet/ti/cpts.c
index 243513980b51..6b56f85951e5 100644
--- a/drivers/net/ethernet/ti/cpts.c
+++ b/drivers/net/ethernet/ti/cpts.c
@@ -236,13 +236,11 @@ static void cpts_overflow_check(struct work_struct *work)
schedule_delayed_work(&cpts->overflow_work, CPTS_OVERFLOW_PERIOD);
}
-#define CPTS_REF_CLOCK_NAME "cpsw_cpts_rft_clk"
-
-static void cpts_clk_init(struct cpts *cpts)
+static void cpts_clk_init(struct device *dev, struct cpts *cpts)
{
- cpts->refclk = clk_get(NULL, CPTS_REF_CLOCK_NAME);
+ cpts->refclk = devm_clk_get(dev, "cpts");
if (IS_ERR(cpts->refclk)) {
- pr_err("Failed to clk_get %s\n", CPTS_REF_CLOCK_NAME);
+ dev_err(dev, "Failed to get cpts refclk\n");
cpts->refclk = NULL;
return;
}
@@ -252,7 +250,6 @@ static void cpts_clk_init(struct cpts *cpts)
static void cpts_clk_release(struct cpts *cpts)
{
clk_disable(cpts->refclk);
- clk_put(cpts->refclk);
}
static int cpts_match(struct sk_buff *skb, unsigned int ptp_class,
@@ -390,7 +387,7 @@ int cpts_register(struct device *dev, struct cpts *cpts,
for (i = 0; i < CPTS_MAX_EVENTS; i++)
list_add(&cpts->pool_data[i].list, &cpts->pool);
- cpts_clk_init(cpts);
+ cpts_clk_init(dev, cpts);
cpts_write32(cpts, CPTS_EN, control);
cpts_write32(cpts, TS_PEND_EN, int_enable);
diff --git a/drivers/net/ethernet/ti/davinci_cpdma.c b/drivers/net/ethernet/ti/davinci_cpdma.c
index 88ef27067bf2..4a000f6dd6fc 100644
--- a/drivers/net/ethernet/ti/davinci_cpdma.c
+++ b/drivers/net/ethernet/ti/davinci_cpdma.c
@@ -158,9 +158,9 @@ cpdma_desc_pool_create(struct device *dev, u32 phys, u32 hw_addr,
int bitmap_size;
struct cpdma_desc_pool *pool;
- pool = kzalloc(sizeof(*pool), GFP_KERNEL);
+ pool = devm_kzalloc(dev, sizeof(*pool), GFP_KERNEL);
if (!pool)
- return NULL;
+ goto fail;
spin_lock_init(&pool->lock);
@@ -170,7 +170,7 @@ cpdma_desc_pool_create(struct device *dev, u32 phys, u32 hw_addr,
pool->num_desc = size / pool->desc_size;
bitmap_size = (pool->num_desc / BITS_PER_LONG) * sizeof(long);
- pool->bitmap = kzalloc(bitmap_size, GFP_KERNEL);
+ pool->bitmap = devm_kzalloc(dev, bitmap_size, GFP_KERNEL);
if (!pool->bitmap)
goto fail;
@@ -187,10 +187,7 @@ cpdma_desc_pool_create(struct device *dev, u32 phys, u32 hw_addr,
if (pool->iomap)
return pool;
-
fail:
- kfree(pool->bitmap);
- kfree(pool);
return NULL;
}
@@ -203,7 +200,6 @@ static void cpdma_desc_pool_destroy(struct cpdma_desc_pool *pool)
spin_lock_irqsave(&pool->lock, flags);
WARN_ON(pool->used_desc);
- kfree(pool->bitmap);
if (pool->cpumap) {
dma_free_coherent(pool->dev, pool->mem_size, pool->cpumap,
pool->phys);
@@ -211,7 +207,6 @@ static void cpdma_desc_pool_destroy(struct cpdma_desc_pool *pool)
iounmap(pool->iomap);
}
spin_unlock_irqrestore(&pool->lock, flags);
- kfree(pool);
}
static inline dma_addr_t desc_phys(struct cpdma_desc_pool *pool,
@@ -276,7 +271,7 @@ struct cpdma_ctlr *cpdma_ctlr_create(struct cpdma_params *params)
{
struct cpdma_ctlr *ctlr;
- ctlr = kzalloc(sizeof(*ctlr), GFP_KERNEL);
+ ctlr = devm_kzalloc(params->dev, sizeof(*ctlr), GFP_KERNEL);
if (!ctlr)
return NULL;
@@ -290,10 +285,8 @@ struct cpdma_ctlr *cpdma_ctlr_create(struct cpdma_params *params)
ctlr->params.desc_hw_addr,
ctlr->params.desc_mem_size,
ctlr->params.desc_align);
- if (!ctlr->pool) {
- kfree(ctlr);
+ if (!ctlr->pool)
return NULL;
- }
if (WARN_ON(ctlr->num_chan > CPDMA_MAX_CHANNELS))
ctlr->num_chan = CPDMA_MAX_CHANNELS;
@@ -468,7 +461,6 @@ int cpdma_ctlr_destroy(struct cpdma_ctlr *ctlr)
cpdma_desc_pool_destroy(ctlr->pool);
spin_unlock_irqrestore(&ctlr->lock, flags);
- kfree(ctlr);
return ret;
}
EXPORT_SYMBOL_GPL(cpdma_ctlr_destroy);
@@ -507,21 +499,22 @@ struct cpdma_chan *cpdma_chan_create(struct cpdma_ctlr *ctlr, int chan_num,
cpdma_handler_fn handler)
{
struct cpdma_chan *chan;
- int ret, offset = (chan_num % CPDMA_MAX_CHANNELS) * 4;
+ int offset = (chan_num % CPDMA_MAX_CHANNELS) * 4;
unsigned long flags;
if (__chan_linear(chan_num) >= ctlr->num_chan)
return NULL;
- ret = -ENOMEM;
- chan = kzalloc(sizeof(*chan), GFP_KERNEL);
+ chan = devm_kzalloc(ctlr->dev, sizeof(*chan), GFP_KERNEL);
if (!chan)
- goto err_chan_alloc;
+ return ERR_PTR(-ENOMEM);
spin_lock_irqsave(&ctlr->lock, flags);
- ret = -EBUSY;
- if (ctlr->channels[chan_num])
- goto err_chan_busy;
+ if (ctlr->channels[chan_num]) {
+ spin_unlock_irqrestore(&ctlr->lock, flags);
+ devm_kfree(ctlr->dev, chan);
+ return ERR_PTR(-EBUSY);
+ }
chan->ctlr = ctlr;
chan->state = CPDMA_STATE_IDLE;
@@ -551,12 +544,6 @@ struct cpdma_chan *cpdma_chan_create(struct cpdma_ctlr *ctlr, int chan_num,
ctlr->channels[chan_num] = chan;
spin_unlock_irqrestore(&ctlr->lock, flags);
return chan;
-
-err_chan_busy:
- spin_unlock_irqrestore(&ctlr->lock, flags);
- kfree(chan);
-err_chan_alloc:
- return ERR_PTR(ret);
}
EXPORT_SYMBOL_GPL(cpdma_chan_create);
diff --git a/drivers/net/ethernet/ti/davinci_emac.c b/drivers/net/ethernet/ti/davinci_emac.c
index 8f0e69ce07ca..35a139e9a833 100644
--- a/drivers/net/ethernet/ti/davinci_emac.c
+++ b/drivers/net/ethernet/ti/davinci_emac.c
@@ -1567,7 +1567,6 @@ static int emac_dev_open(struct net_device *ndev)
while ((res = platform_get_resource(priv->pdev, IORESOURCE_IRQ,
res_num))) {
for (irq_num = res->start; irq_num <= res->end; irq_num++) {
- dev_err(emac_dev, "Request IRQ %d\n", irq_num);
if (request_irq(irq_num, emac_irq, 0, ndev->name,
ndev)) {
dev_err(emac_dev,
@@ -1865,7 +1864,6 @@ static int davinci_emac_probe(struct platform_device *pdev)
struct emac_priv *priv;
unsigned long hw_ram_addr;
struct emac_platform_data *pdata;
- struct device *emac_dev;
struct cpdma_params dma_params;
struct clk *emac_clk;
unsigned long emac_bus_frequency;
@@ -1911,7 +1909,6 @@ static int davinci_emac_probe(struct platform_device *pdev)
priv->coal_intvl = 0;
priv->bus_freq_mhz = (u32)(emac_bus_frequency / 1000000);
- emac_dev = &ndev->dev;
/* Get EMAC platform data */
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
priv->emac_base_phys = res->start + pdata->ctrl_reg_offset;
@@ -1930,7 +1927,7 @@ static int davinci_emac_probe(struct platform_device *pdev)
hw_ram_addr = (u32 __force)res->start + pdata->ctrl_ram_offset;
memset(&dma_params, 0, sizeof(dma_params));
- dma_params.dev = emac_dev;
+ dma_params.dev = &pdev->dev;
dma_params.dmaregs = priv->emac_base;
dma_params.rxthresh = priv->emac_base + 0x120;
dma_params.rxfree = priv->emac_base + 0x140;
@@ -1980,7 +1977,7 @@ static int davinci_emac_probe(struct platform_device *pdev)
}
ndev->netdev_ops = &emac_netdev_ops;
- SET_ETHTOOL_OPS(ndev, &ethtool_ops);
+ ndev->ethtool_ops = &ethtool_ops;
netif_napi_add(ndev, &priv->napi, emac_poll, EMAC_POLL_WEIGHT);
/* register the network device */
@@ -1994,7 +1991,7 @@ static int davinci_emac_probe(struct platform_device *pdev)
if (netif_msg_probe(priv)) {
- dev_notice(emac_dev, "DaVinci EMAC Probe found device "\
+ dev_notice(&pdev->dev, "DaVinci EMAC Probe found device "
"(regs: %p, irq: %d)\n",
(void *)priv->emac_base_phys, ndev->irq);
}
diff --git a/drivers/net/ethernet/ti/davinci_mdio.c b/drivers/net/ethernet/ti/davinci_mdio.c
index 0cca9dec5d82..735dc53d4b01 100644
--- a/drivers/net/ethernet/ti/davinci_mdio.c
+++ b/drivers/net/ethernet/ti/davinci_mdio.c
@@ -303,7 +303,7 @@ static int davinci_mdio_probe_dt(struct mdio_platform_data *data,
return -EINVAL;
if (of_property_read_u32(node, "bus_freq", &prop)) {
- pr_err("Missing bus_freq property in the DT.\n");
+ dev_err(&pdev->dev, "Missing bus_freq property in the DT.\n");
return -EINVAL;
}
data->bus_freq = prop;
@@ -321,15 +321,14 @@ static int davinci_mdio_probe(struct platform_device *pdev)
struct phy_device *phy;
int ret, addr;
- data = kzalloc(sizeof(*data), GFP_KERNEL);
+ data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
if (!data)
return -ENOMEM;
- data->bus = mdiobus_alloc();
+ data->bus = devm_mdiobus_alloc(dev);
if (!data->bus) {
dev_err(dev, "failed to alloc mii bus\n");
- ret = -ENOMEM;
- goto bail_out;
+ return -ENOMEM;
}
if (dev->of_node) {
@@ -349,12 +348,9 @@ static int davinci_mdio_probe(struct platform_device *pdev)
data->bus->parent = dev;
data->bus->priv = data;
- /* Select default pin state */
- pinctrl_pm_select_default_state(&pdev->dev);
-
pm_runtime_enable(&pdev->dev);
pm_runtime_get_sync(&pdev->dev);
- data->clk = clk_get(&pdev->dev, "fck");
+ data->clk = devm_clk_get(dev, "fck");
if (IS_ERR(data->clk)) {
dev_err(dev, "failed to get device clock\n");
ret = PTR_ERR(data->clk);
@@ -367,24 +363,9 @@ static int davinci_mdio_probe(struct platform_device *pdev)
spin_lock_init(&data->lock);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res) {
- dev_err(dev, "could not find register map resource\n");
- ret = -ENOENT;
- goto bail_out;
- }
-
- res = devm_request_mem_region(dev, res->start, resource_size(res),
- dev_name(dev));
- if (!res) {
- dev_err(dev, "could not allocate register map resource\n");
- ret = -ENXIO;
- goto bail_out;
- }
-
- data->regs = devm_ioremap_nocache(dev, res->start, resource_size(res));
- if (!data->regs) {
- dev_err(dev, "could not map mdio registers\n");
- ret = -ENOMEM;
+ data->regs = devm_ioremap_resource(dev, res);
+ if (IS_ERR(data->regs)) {
+ ret = PTR_ERR(data->regs);
goto bail_out;
}
@@ -406,16 +387,9 @@ static int davinci_mdio_probe(struct platform_device *pdev)
return 0;
bail_out:
- if (data->bus)
- mdiobus_free(data->bus);
-
- if (data->clk)
- clk_put(data->clk);
pm_runtime_put_sync(&pdev->dev);
pm_runtime_disable(&pdev->dev);
- kfree(data);
-
return ret;
}
@@ -423,18 +397,12 @@ static int davinci_mdio_remove(struct platform_device *pdev)
{
struct davinci_mdio_data *data = platform_get_drvdata(pdev);
- if (data->bus) {
+ if (data->bus)
mdiobus_unregister(data->bus);
- mdiobus_free(data->bus);
- }
- if (data->clk)
- clk_put(data->clk);
pm_runtime_put_sync(&pdev->dev);
pm_runtime_disable(&pdev->dev);
- kfree(data);
-
return 0;
}
diff --git a/drivers/net/ethernet/tile/tilegx.c b/drivers/net/ethernet/tile/tilegx.c
index 449011b0e007..14389f841d43 100644
--- a/drivers/net/ethernet/tile/tilegx.c
+++ b/drivers/net/ethernet/tile/tilegx.c
@@ -2192,7 +2192,6 @@ static void tile_net_dev_init(const char *name, const uint8_t *mac)
{
int ret;
int i;
- int nz_addr = 0;
struct net_device *dev;
struct tile_net_priv *priv;
@@ -2212,7 +2211,6 @@ static void tile_net_dev_init(const char *name, const uint8_t *mac)
/* Initialize "priv". */
priv = netdev_priv(dev);
- memset(priv, 0, sizeof(*priv));
priv->dev = dev;
priv->channel = -1;
priv->loopify_channel = -1;
@@ -2223,15 +2221,10 @@ static void tile_net_dev_init(const char *name, const uint8_t *mac)
* be done before the device is opened. If the MAC is all zeroes,
* we use a random address, since we're probably on the simulator.
*/
- for (i = 0; i < 6; i++)
- nz_addr |= mac[i];
-
- if (nz_addr) {
- memcpy(dev->dev_addr, mac, ETH_ALEN);
- dev->addr_len = 6;
- } else {
+ if (!is_zero_ether_addr(mac))
+ ether_addr_copy(dev->dev_addr, mac);
+ else
eth_hw_addr_random(dev);
- }
/* Register the network device. */
ret = register_netdev(dev);
diff --git a/drivers/net/ethernet/toshiba/ps3_gelic_net.c b/drivers/net/ethernet/toshiba/ps3_gelic_net.c
index d899d0072ae0..bb7992804664 100644
--- a/drivers/net/ethernet/toshiba/ps3_gelic_net.c
+++ b/drivers/net/ethernet/toshiba/ps3_gelic_net.c
@@ -1561,7 +1561,7 @@ static struct gelic_card *gelic_alloc_card_net(struct net_device **netdev)
* alloc netdev
*/
*netdev = alloc_etherdev(sizeof(struct gelic_port));
- if (!netdev) {
+ if (!*netdev) {
kfree(card->unalign);
return NULL;
}
diff --git a/drivers/net/ethernet/via/Kconfig b/drivers/net/ethernet/via/Kconfig
index 8a049a2b4474..f66ddaee0c87 100644
--- a/drivers/net/ethernet/via/Kconfig
+++ b/drivers/net/ethernet/via/Kconfig
@@ -19,7 +19,7 @@ if NET_VENDOR_VIA
config VIA_RHINE
tristate "VIA Rhine support"
- depends on PCI
+ depends on (PCI || USE_OF)
select CRC32
select MII
---help---
diff --git a/drivers/net/ethernet/via/via-rhine.c b/drivers/net/ethernet/via/via-rhine.c
index f61dc2b72bb2..2d72f96a9e2c 100644
--- a/drivers/net/ethernet/via/via-rhine.c
+++ b/drivers/net/ethernet/via/via-rhine.c
@@ -94,6 +94,10 @@ static const int multicast_filter_limit = 32;
#include <linux/ioport.h>
#include <linux/interrupt.h>
#include <linux/pci.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/of_irq.h>
+#include <linux/platform_device.h>
#include <linux/dma-mapping.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
@@ -116,13 +120,6 @@ static const int multicast_filter_limit = 32;
static const char version[] =
"v1.10-LK" DRV_VERSION " " DRV_RELDATE " Written by Donald Becker";
-/* This driver was written to use PCI memory space. Some early versions
- of the Rhine may only work correctly with I/O space accesses. */
-#ifdef CONFIG_VIA_RHINE_MMIO
-#define USE_MMIO
-#else
-#endif
-
MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
MODULE_DESCRIPTION("VIA Rhine PCI Fast Ethernet driver");
MODULE_LICENSE("GPL");
@@ -260,6 +257,12 @@ enum rhine_quirks {
rq6patterns = 0x0040, /* 6 instead of 4 patterns for WOL */
rqStatusWBRace = 0x0080, /* Tx Status Writeback Error possible */
rqRhineI = 0x0100, /* See comment below */
+ rqIntPHY = 0x0200, /* Integrated PHY */
+ rqMgmt = 0x0400, /* Management adapter */
+ rqNeedEnMMIO = 0x0800, /* Whether the core needs to be
+ * switched from PIO mode to MMIO
+ * (only applies to PCI)
+ */
};
/*
* rqRhineI: VT86C100A (aka Rhine-I) uses different bits to enable
@@ -279,6 +282,15 @@ static DEFINE_PCI_DEVICE_TABLE(rhine_pci_tbl) = {
};
MODULE_DEVICE_TABLE(pci, rhine_pci_tbl);
+/* OpenFirmware identifiers for platform-bus devices
+ * The .data field is currently only used to store quirks
+ */
+static u32 vt8500_quirks = rqWOL | rqForceReset | rq6patterns;
+static struct of_device_id rhine_of_tbl[] = {
+ { .compatible = "via,vt8500-rhine", .data = &vt8500_quirks },
+ { } /* terminate list */
+};
+MODULE_DEVICE_TABLE(of, rhine_of_tbl);
/* Offsets to the device registers. */
enum register_offsets {
@@ -338,13 +350,11 @@ enum bcr1_bits {
BCR1_MED1=0x80, /* for VT6102 */
};
-#ifdef USE_MMIO
/* Registers we check that mmio and reg are the same. */
static const int mmio_verify_registers[] = {
RxConfig, TxConfig, IntrEnable, ConfigA, ConfigB, ConfigC, ConfigD,
0
};
-#endif
/* Bits in the interrupt status/mask registers. */
enum intr_status_bits {
@@ -446,7 +456,7 @@ struct rhine_private {
unsigned char *tx_bufs;
dma_addr_t tx_bufs_dma;
- struct pci_dev *pdev;
+ int irq;
long pioaddr;
struct net_device *dev;
struct napi_struct napi;
@@ -649,20 +659,46 @@ static void rhine_chip_reset(struct net_device *dev)
"failed" : "succeeded");
}
-#ifdef USE_MMIO
static void enable_mmio(long pioaddr, u32 quirks)
{
int n;
- if (quirks & rqRhineI) {
- /* More recent docs say that this bit is reserved ... */
- n = inb(pioaddr + ConfigA) | 0x20;
- outb(n, pioaddr + ConfigA);
- } else {
- n = inb(pioaddr + ConfigD) | 0x80;
- outb(n, pioaddr + ConfigD);
+
+ if (quirks & rqNeedEnMMIO) {
+ if (quirks & rqRhineI) {
+ /* More recent docs say that this bit is reserved */
+ n = inb(pioaddr + ConfigA) | 0x20;
+ outb(n, pioaddr + ConfigA);
+ } else {
+ n = inb(pioaddr + ConfigD) | 0x80;
+ outb(n, pioaddr + ConfigD);
+ }
}
}
-#endif
+
+static inline int verify_mmio(struct device *hwdev,
+ long pioaddr,
+ void __iomem *ioaddr,
+ u32 quirks)
+{
+ if (quirks & rqNeedEnMMIO) {
+ int i = 0;
+
+ /* Check that selected MMIO registers match the PIO ones */
+ while (mmio_verify_registers[i]) {
+ int reg = mmio_verify_registers[i++];
+ unsigned char a = inb(pioaddr+reg);
+ unsigned char b = readb(ioaddr+reg);
+
+ if (a != b) {
+ dev_err(hwdev,
+ "MMIO do not match PIO [%02x] (%02x != %02x)\n",
+ reg, a, b);
+ return -EIO;
+ }
+ }
+ }
+ return 0;
+}
/*
* Loads bytes 0x00-0x05, 0x6E-0x6F, 0x78-0x7B from EEPROM
@@ -682,14 +718,12 @@ static void rhine_reload_eeprom(long pioaddr, struct net_device *dev)
if (i > 512)
pr_info("%4d cycles used @ %s:%d\n", i, __func__, __LINE__);
-#ifdef USE_MMIO
/*
* Reloading from EEPROM overwrites ConfigA-D, so we must re-enable
* MMIO. If reloading EEPROM was done first this could be avoided, but
* it is not known if that still works with the "win98-reboot" problem.
*/
enable_mmio(pioaddr, rp->quirks);
-#endif
/* Turn off EEPROM-controlled wake-up (magic packet) */
if (rp->quirks & rqWOL)
@@ -701,7 +735,7 @@ static void rhine_reload_eeprom(long pioaddr, struct net_device *dev)
static void rhine_poll(struct net_device *dev)
{
struct rhine_private *rp = netdev_priv(dev);
- const int irq = rp->pdev->irq;
+ const int irq = rp->irq;
disable_irq(irq);
rhine_interrupt(irq, dev);
@@ -846,7 +880,8 @@ static void rhine_hw_init(struct net_device *dev, long pioaddr)
msleep(5);
/* Reload EEPROM controlled bytes cleared by soft reset */
- rhine_reload_eeprom(pioaddr, dev);
+ if (dev_is_pci(dev->dev.parent))
+ rhine_reload_eeprom(pioaddr, dev);
}
static const struct net_device_ops rhine_netdev_ops = {
@@ -867,125 +902,37 @@ static const struct net_device_ops rhine_netdev_ops = {
#endif
};
-static int rhine_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
+static int rhine_init_one_common(struct device *hwdev, u32 quirks,
+ long pioaddr, void __iomem *ioaddr, int irq)
{
struct net_device *dev;
struct rhine_private *rp;
- int i, rc;
- u32 quirks;
- long pioaddr;
- long memaddr;
- void __iomem *ioaddr;
- int io_size, phy_id;
+ int i, rc, phy_id;
const char *name;
-#ifdef USE_MMIO
- int bar = 1;
-#else
- int bar = 0;
-#endif
-
-/* when built into the kernel, we only print version if device is found */
-#ifndef MODULE
- pr_info_once("%s\n", version);
-#endif
-
- io_size = 256;
- phy_id = 0;
- quirks = 0;
- name = "Rhine";
- if (pdev->revision < VTunknown0) {
- quirks = rqRhineI;
- io_size = 128;
- }
- else if (pdev->revision >= VT6102) {
- quirks = rqWOL | rqForceReset;
- if (pdev->revision < VT6105) {
- name = "Rhine II";
- quirks |= rqStatusWBRace; /* Rhine-II exclusive */
- }
- else {
- phy_id = 1; /* Integrated PHY, phy_id fixed to 1 */
- if (pdev->revision >= VT6105_B0)
- quirks |= rq6patterns;
- if (pdev->revision < VT6105M)
- name = "Rhine III";
- else
- name = "Rhine III (Management Adapter)";
- }
- }
-
- rc = pci_enable_device(pdev);
- if (rc)
- goto err_out;
/* this should always be supported */
- rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+ rc = dma_set_mask(hwdev, DMA_BIT_MASK(32));
if (rc) {
- dev_err(&pdev->dev,
- "32-bit PCI DMA addresses not supported by the card!?\n");
- goto err_out_pci_disable;
- }
-
- /* sanity check */
- if ((pci_resource_len(pdev, 0) < io_size) ||
- (pci_resource_len(pdev, 1) < io_size)) {
- rc = -EIO;
- dev_err(&pdev->dev, "Insufficient PCI resources, aborting\n");
- goto err_out_pci_disable;
+ dev_err(hwdev, "32-bit DMA addresses not supported by the card!?\n");
+ goto err_out;
}
- pioaddr = pci_resource_start(pdev, 0);
- memaddr = pci_resource_start(pdev, 1);
-
- pci_set_master(pdev);
-
dev = alloc_etherdev(sizeof(struct rhine_private));
if (!dev) {
rc = -ENOMEM;
- goto err_out_pci_disable;
+ goto err_out;
}
- SET_NETDEV_DEV(dev, &pdev->dev);
+ SET_NETDEV_DEV(dev, hwdev);
rp = netdev_priv(dev);
rp->dev = dev;
rp->quirks = quirks;
rp->pioaddr = pioaddr;
- rp->pdev = pdev;
+ rp->base = ioaddr;
+ rp->irq = irq;
rp->msg_enable = netif_msg_init(debug, RHINE_MSG_DEFAULT);
- rc = pci_request_regions(pdev, DRV_NAME);
- if (rc)
- goto err_out_free_netdev;
-
- ioaddr = pci_iomap(pdev, bar, io_size);
- if (!ioaddr) {
- rc = -EIO;
- dev_err(&pdev->dev,
- "ioremap failed for device %s, region 0x%X @ 0x%lX\n",
- pci_name(pdev), io_size, memaddr);
- goto err_out_free_res;
- }
-
-#ifdef USE_MMIO
- enable_mmio(pioaddr, quirks);
-
- /* Check that selected MMIO registers match the PIO ones */
- i = 0;
- while (mmio_verify_registers[i]) {
- int reg = mmio_verify_registers[i++];
- unsigned char a = inb(pioaddr+reg);
- unsigned char b = readb(ioaddr+reg);
- if (a != b) {
- rc = -EIO;
- dev_err(&pdev->dev,
- "MMIO do not match PIO [%02x] (%02x != %02x)\n",
- reg, a, b);
- goto err_out_unmap;
- }
- }
-#endif /* USE_MMIO */
-
- rp->base = ioaddr;
+ phy_id = rp->quirks & rqIntPHY ? 1 : 0;
u64_stats_init(&rp->tx_stats.syncp);
u64_stats_init(&rp->rx_stats.syncp);
@@ -1030,7 +977,7 @@ static int rhine_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if (rp->quirks & rqRhineI)
dev->features |= NETIF_F_SG|NETIF_F_HW_CSUM;
- if (pdev->revision >= VT6105M)
+ if (rp->quirks & rqMgmt)
dev->features |= NETIF_F_HW_VLAN_CTAG_TX |
NETIF_F_HW_VLAN_CTAG_RX |
NETIF_F_HW_VLAN_CTAG_FILTER;
@@ -1038,18 +985,21 @@ static int rhine_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
/* dev->name not defined before register_netdev()! */
rc = register_netdev(dev);
if (rc)
- goto err_out_unmap;
+ goto err_out_free_netdev;
+
+ if (rp->quirks & rqRhineI)
+ name = "Rhine";
+ else if (rp->quirks & rqStatusWBRace)
+ name = "Rhine II";
+ else if (rp->quirks & rqMgmt)
+ name = "Rhine III (Management Adapter)";
+ else
+ name = "Rhine III";
netdev_info(dev, "VIA %s at 0x%lx, %pM, IRQ %d\n",
- name,
-#ifdef USE_MMIO
- memaddr,
-#else
- (long)ioaddr,
-#endif
- dev->dev_addr, pdev->irq);
+ name, (long)ioaddr, dev->dev_addr, rp->irq);
- pci_set_drvdata(pdev, dev);
+ dev_set_drvdata(hwdev, dev);
{
u16 mii_cmd;
@@ -1078,41 +1028,158 @@ static int rhine_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
return 0;
+err_out_free_netdev:
+ free_netdev(dev);
+err_out:
+ return rc;
+}
+
+static int rhine_init_one_pci(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ struct device *hwdev = &pdev->dev;
+ int rc;
+ long pioaddr, memaddr;
+ void __iomem *ioaddr;
+ int io_size = pdev->revision < VTunknown0 ? 128 : 256;
+
+/* This driver was written to use PCI memory space. Some early versions
+ * of the Rhine may only work correctly with I/O space accesses.
+ * TODO: determine for which revisions this is true and assign the flag
+ * in code as opposed to this Kconfig option (???)
+ */
+#ifdef CONFIG_VIA_RHINE_MMIO
+ u32 quirks = rqNeedEnMMIO;
+#else
+ u32 quirks = 0;
+#endif
+
+/* when built into the kernel, we only print version if device is found */
+#ifndef MODULE
+ pr_info_once("%s\n", version);
+#endif
+
+ rc = pci_enable_device(pdev);
+ if (rc)
+ goto err_out;
+
+ if (pdev->revision < VTunknown0) {
+ quirks |= rqRhineI;
+ } else if (pdev->revision >= VT6102) {
+ quirks |= rqWOL | rqForceReset;
+ if (pdev->revision < VT6105) {
+ quirks |= rqStatusWBRace;
+ } else {
+ quirks |= rqIntPHY;
+ if (pdev->revision >= VT6105_B0)
+ quirks |= rq6patterns;
+ if (pdev->revision >= VT6105M)
+ quirks |= rqMgmt;
+ }
+ }
+
+ /* sanity check */
+ if ((pci_resource_len(pdev, 0) < io_size) ||
+ (pci_resource_len(pdev, 1) < io_size)) {
+ rc = -EIO;
+ dev_err(hwdev, "Insufficient PCI resources, aborting\n");
+ goto err_out_pci_disable;
+ }
+
+ pioaddr = pci_resource_start(pdev, 0);
+ memaddr = pci_resource_start(pdev, 1);
+
+ pci_set_master(pdev);
+
+ rc = pci_request_regions(pdev, DRV_NAME);
+ if (rc)
+ goto err_out_pci_disable;
+
+ ioaddr = pci_iomap(pdev, (quirks & rqNeedEnMMIO ? 1 : 0), io_size);
+ if (!ioaddr) {
+ rc = -EIO;
+ dev_err(hwdev,
+ "ioremap failed for device %s, region 0x%X @ 0x%lX\n",
+ dev_name(hwdev), io_size, memaddr);
+ goto err_out_free_res;
+ }
+
+ enable_mmio(pioaddr, quirks);
+
+ rc = verify_mmio(hwdev, pioaddr, ioaddr, quirks);
+ if (rc)
+ goto err_out_unmap;
+
+ rc = rhine_init_one_common(&pdev->dev, quirks,
+ pioaddr, ioaddr, pdev->irq);
+ if (!rc)
+ return 0;
+
err_out_unmap:
pci_iounmap(pdev, ioaddr);
err_out_free_res:
pci_release_regions(pdev);
-err_out_free_netdev:
- free_netdev(dev);
err_out_pci_disable:
pci_disable_device(pdev);
err_out:
return rc;
}
+static int rhine_init_one_platform(struct platform_device *pdev)
+{
+ const struct of_device_id *match;
+ const u32 *quirks;
+ int irq;
+ struct resource *res;
+ void __iomem *ioaddr;
+
+ match = of_match_device(rhine_of_tbl, &pdev->dev);
+ if (!match)
+ return -EINVAL;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ ioaddr = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(ioaddr))
+ return PTR_ERR(ioaddr);
+
+ irq = irq_of_parse_and_map(pdev->dev.of_node, 0);
+ if (!irq)
+ return -EINVAL;
+
+ quirks = match->data;
+ if (!quirks)
+ return -EINVAL;
+
+ return rhine_init_one_common(&pdev->dev, *quirks,
+ (long)ioaddr, ioaddr, irq);
+}
+
static int alloc_ring(struct net_device* dev)
{
struct rhine_private *rp = netdev_priv(dev);
+ struct device *hwdev = dev->dev.parent;
void *ring;
dma_addr_t ring_dma;
- ring = pci_alloc_consistent(rp->pdev,
- RX_RING_SIZE * sizeof(struct rx_desc) +
- TX_RING_SIZE * sizeof(struct tx_desc),
- &ring_dma);
+ ring = dma_alloc_coherent(hwdev,
+ RX_RING_SIZE * sizeof(struct rx_desc) +
+ TX_RING_SIZE * sizeof(struct tx_desc),
+ &ring_dma,
+ GFP_ATOMIC);
if (!ring) {
netdev_err(dev, "Could not allocate DMA memory\n");
return -ENOMEM;
}
if (rp->quirks & rqRhineI) {
- rp->tx_bufs = pci_alloc_consistent(rp->pdev,
- PKT_BUF_SZ * TX_RING_SIZE,
- &rp->tx_bufs_dma);
+ rp->tx_bufs = dma_alloc_coherent(hwdev,
+ PKT_BUF_SZ * TX_RING_SIZE,
+ &rp->tx_bufs_dma,
+ GFP_ATOMIC);
if (rp->tx_bufs == NULL) {
- pci_free_consistent(rp->pdev,
- RX_RING_SIZE * sizeof(struct rx_desc) +
- TX_RING_SIZE * sizeof(struct tx_desc),
- ring, ring_dma);
+ dma_free_coherent(hwdev,
+ RX_RING_SIZE * sizeof(struct rx_desc) +
+ TX_RING_SIZE * sizeof(struct tx_desc),
+ ring, ring_dma);
return -ENOMEM;
}
}
@@ -1128,16 +1195,17 @@ static int alloc_ring(struct net_device* dev)
static void free_ring(struct net_device* dev)
{
struct rhine_private *rp = netdev_priv(dev);
+ struct device *hwdev = dev->dev.parent;
- pci_free_consistent(rp->pdev,
- RX_RING_SIZE * sizeof(struct rx_desc) +
- TX_RING_SIZE * sizeof(struct tx_desc),
- rp->rx_ring, rp->rx_ring_dma);
+ dma_free_coherent(hwdev,
+ RX_RING_SIZE * sizeof(struct rx_desc) +
+ TX_RING_SIZE * sizeof(struct tx_desc),
+ rp->rx_ring, rp->rx_ring_dma);
rp->tx_ring = NULL;
if (rp->tx_bufs)
- pci_free_consistent(rp->pdev, PKT_BUF_SZ * TX_RING_SIZE,
- rp->tx_bufs, rp->tx_bufs_dma);
+ dma_free_coherent(hwdev, PKT_BUF_SZ * TX_RING_SIZE,
+ rp->tx_bufs, rp->tx_bufs_dma);
rp->tx_bufs = NULL;
@@ -1146,6 +1214,7 @@ static void free_ring(struct net_device* dev)
static void alloc_rbufs(struct net_device *dev)
{
struct rhine_private *rp = netdev_priv(dev);
+ struct device *hwdev = dev->dev.parent;
dma_addr_t next;
int i;
@@ -1174,9 +1243,9 @@ static void alloc_rbufs(struct net_device *dev)
break;
rp->rx_skbuff_dma[i] =
- pci_map_single(rp->pdev, skb->data, rp->rx_buf_sz,
- PCI_DMA_FROMDEVICE);
- if (dma_mapping_error(&rp->pdev->dev, rp->rx_skbuff_dma[i])) {
+ dma_map_single(hwdev, skb->data, rp->rx_buf_sz,
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(hwdev, rp->rx_skbuff_dma[i])) {
rp->rx_skbuff_dma[i] = 0;
dev_kfree_skb(skb);
break;
@@ -1190,6 +1259,7 @@ static void alloc_rbufs(struct net_device *dev)
static void free_rbufs(struct net_device* dev)
{
struct rhine_private *rp = netdev_priv(dev);
+ struct device *hwdev = dev->dev.parent;
int i;
/* Free all the skbuffs in the Rx queue. */
@@ -1197,9 +1267,9 @@ static void free_rbufs(struct net_device* dev)
rp->rx_ring[i].rx_status = 0;
rp->rx_ring[i].addr = cpu_to_le32(0xBADF00D0); /* An invalid address. */
if (rp->rx_skbuff[i]) {
- pci_unmap_single(rp->pdev,
+ dma_unmap_single(hwdev,
rp->rx_skbuff_dma[i],
- rp->rx_buf_sz, PCI_DMA_FROMDEVICE);
+ rp->rx_buf_sz, DMA_FROM_DEVICE);
dev_kfree_skb(rp->rx_skbuff[i]);
}
rp->rx_skbuff[i] = NULL;
@@ -1230,6 +1300,7 @@ static void alloc_tbufs(struct net_device* dev)
static void free_tbufs(struct net_device* dev)
{
struct rhine_private *rp = netdev_priv(dev);
+ struct device *hwdev = dev->dev.parent;
int i;
for (i = 0; i < TX_RING_SIZE; i++) {
@@ -1238,10 +1309,10 @@ static void free_tbufs(struct net_device* dev)
rp->tx_ring[i].addr = cpu_to_le32(0xBADF00D0); /* An invalid address. */
if (rp->tx_skbuff[i]) {
if (rp->tx_skbuff_dma[i]) {
- pci_unmap_single(rp->pdev,
+ dma_unmap_single(hwdev,
rp->tx_skbuff_dma[i],
rp->tx_skbuff[i]->len,
- PCI_DMA_TODEVICE);
+ DMA_TO_DEVICE);
}
dev_kfree_skb(rp->tx_skbuff[i]);
}
@@ -1278,8 +1349,9 @@ static void rhine_set_carrier(struct mii_if_info *mii)
/* autoneg is off: Link is always assumed to be up */
if (!netif_carrier_ok(dev))
netif_carrier_on(dev);
- } else /* Let MMI library update carrier status */
- rhine_check_media(dev, 0);
+ }
+
+ rhine_check_media(dev, 0);
netif_info(rp, link, dev, "force_media %d, carrier %d\n",
mii->force_media, netif_carrier_ok(dev));
@@ -1469,7 +1541,7 @@ static void init_registers(struct net_device *dev)
rhine_set_rx_mode(dev);
- if (rp->pdev->revision >= VT6105M)
+ if (rp->quirks & rqMgmt)
rhine_init_cam_filter(dev);
napi_enable(&rp->napi);
@@ -1581,16 +1653,15 @@ static int rhine_open(struct net_device *dev)
void __iomem *ioaddr = rp->base;
int rc;
- rc = request_irq(rp->pdev->irq, rhine_interrupt, IRQF_SHARED, dev->name,
- dev);
+ rc = request_irq(rp->irq, rhine_interrupt, IRQF_SHARED, dev->name, dev);
if (rc)
return rc;
- netif_dbg(rp, ifup, dev, "%s() irq %d\n", __func__, rp->pdev->irq);
+ netif_dbg(rp, ifup, dev, "%s() irq %d\n", __func__, rp->irq);
rc = alloc_ring(dev);
if (rc) {
- free_irq(rp->pdev->irq, dev);
+ free_irq(rp->irq, dev);
return rc;
}
alloc_rbufs(dev);
@@ -1659,6 +1730,7 @@ static netdev_tx_t rhine_start_tx(struct sk_buff *skb,
struct net_device *dev)
{
struct rhine_private *rp = netdev_priv(dev);
+ struct device *hwdev = dev->dev.parent;
void __iomem *ioaddr = rp->base;
unsigned entry;
@@ -1695,9 +1767,9 @@ static netdev_tx_t rhine_start_tx(struct sk_buff *skb,
rp->tx_bufs));
} else {
rp->tx_skbuff_dma[entry] =
- pci_map_single(rp->pdev, skb->data, skb->len,
- PCI_DMA_TODEVICE);
- if (dma_mapping_error(&rp->pdev->dev, rp->tx_skbuff_dma[entry])) {
+ dma_map_single(hwdev, skb->data, skb->len,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(hwdev, rp->tx_skbuff_dma[entry])) {
dev_kfree_skb_any(skb);
rp->tx_skbuff_dma[entry] = 0;
dev->stats.tx_dropped++;
@@ -1788,6 +1860,7 @@ static irqreturn_t rhine_interrupt(int irq, void *dev_instance)
static void rhine_tx(struct net_device *dev)
{
struct rhine_private *rp = netdev_priv(dev);
+ struct device *hwdev = dev->dev.parent;
int txstatus = 0, entry = rp->dirty_tx % TX_RING_SIZE;
/* find and cleanup dirty tx descriptors */
@@ -1831,10 +1904,10 @@ static void rhine_tx(struct net_device *dev)
}
/* Free the original skb. */
if (rp->tx_skbuff_dma[entry]) {
- pci_unmap_single(rp->pdev,
+ dma_unmap_single(hwdev,
rp->tx_skbuff_dma[entry],
rp->tx_skbuff[entry]->len,
- PCI_DMA_TODEVICE);
+ DMA_TO_DEVICE);
}
dev_consume_skb_any(rp->tx_skbuff[entry]);
rp->tx_skbuff[entry] = NULL;
@@ -1863,6 +1936,7 @@ static inline u16 rhine_get_vlan_tci(struct sk_buff *skb, int data_size)
static int rhine_rx(struct net_device *dev, int limit)
{
struct rhine_private *rp = netdev_priv(dev);
+ struct device *hwdev = dev->dev.parent;
int count;
int entry = rp->cur_rx % RX_RING_SIZE;
@@ -1924,19 +1998,19 @@ static int rhine_rx(struct net_device *dev, int limit)
if (pkt_len < rx_copybreak)
skb = netdev_alloc_skb_ip_align(dev, pkt_len);
if (skb) {
- pci_dma_sync_single_for_cpu(rp->pdev,
- rp->rx_skbuff_dma[entry],
- rp->rx_buf_sz,
- PCI_DMA_FROMDEVICE);
+ dma_sync_single_for_cpu(hwdev,
+ rp->rx_skbuff_dma[entry],
+ rp->rx_buf_sz,
+ DMA_FROM_DEVICE);
skb_copy_to_linear_data(skb,
rp->rx_skbuff[entry]->data,
pkt_len);
skb_put(skb, pkt_len);
- pci_dma_sync_single_for_device(rp->pdev,
- rp->rx_skbuff_dma[entry],
- rp->rx_buf_sz,
- PCI_DMA_FROMDEVICE);
+ dma_sync_single_for_device(hwdev,
+ rp->rx_skbuff_dma[entry],
+ rp->rx_buf_sz,
+ DMA_FROM_DEVICE);
} else {
skb = rp->rx_skbuff[entry];
if (skb == NULL) {
@@ -1945,10 +2019,10 @@ static int rhine_rx(struct net_device *dev, int limit)
}
rp->rx_skbuff[entry] = NULL;
skb_put(skb, pkt_len);
- pci_unmap_single(rp->pdev,
+ dma_unmap_single(hwdev,
rp->rx_skbuff_dma[entry],
rp->rx_buf_sz,
- PCI_DMA_FROMDEVICE);
+ DMA_FROM_DEVICE);
}
if (unlikely(desc_length & DescTag))
@@ -1979,10 +2053,11 @@ static int rhine_rx(struct net_device *dev, int limit)
if (skb == NULL)
break; /* Better luck next round. */
rp->rx_skbuff_dma[entry] =
- pci_map_single(rp->pdev, skb->data,
+ dma_map_single(hwdev, skb->data,
rp->rx_buf_sz,
- PCI_DMA_FROMDEVICE);
- if (dma_mapping_error(&rp->pdev->dev, rp->rx_skbuff_dma[entry])) {
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(hwdev,
+ rp->rx_skbuff_dma[entry])) {
dev_kfree_skb(skb);
rp->rx_skbuff_dma[entry] = 0;
break;
@@ -2103,7 +2178,7 @@ static void rhine_set_rx_mode(struct net_device *dev)
/* Too many to match, or accept all multicasts. */
iowrite32(0xffffffff, ioaddr + MulticastFilter0);
iowrite32(0xffffffff, ioaddr + MulticastFilter1);
- } else if (rp->pdev->revision >= VT6105M) {
+ } else if (rp->quirks & rqMgmt) {
int i = 0;
u32 mCAMmask = 0; /* 32 mCAMs (6105M and better) */
netdev_for_each_mc_addr(ha, dev) {
@@ -2125,7 +2200,7 @@ static void rhine_set_rx_mode(struct net_device *dev)
iowrite32(mc_filter[1], ioaddr + MulticastFilter1);
}
/* enable/disable VLAN receive filtering */
- if (rp->pdev->revision >= VT6105M) {
+ if (rp->quirks & rqMgmt) {
if (dev->flags & IFF_PROMISC)
BYTE_REG_BITS_OFF(BCR1_VIDFR, ioaddr + PCIBusConfig1);
else
@@ -2136,11 +2211,11 @@ static void rhine_set_rx_mode(struct net_device *dev)
static void netdev_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
{
- struct rhine_private *rp = netdev_priv(dev);
+ struct device *hwdev = dev->dev.parent;
strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
strlcpy(info->version, DRV_VERSION, sizeof(info->version));
- strlcpy(info->bus_info, pci_name(rp->pdev), sizeof(info->bus_info));
+ strlcpy(info->bus_info, dev_name(hwdev), sizeof(info->bus_info));
}
static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
@@ -2277,7 +2352,7 @@ static int rhine_close(struct net_device *dev)
/* Stop the chip's Tx and Rx processes. */
iowrite16(CmdStop, ioaddr + ChipCmd);
- free_irq(rp->pdev->irq, dev);
+ free_irq(rp->irq, dev);
free_rbufs(dev);
free_tbufs(dev);
free_ring(dev);
@@ -2286,7 +2361,7 @@ static int rhine_close(struct net_device *dev)
}
-static void rhine_remove_one(struct pci_dev *pdev)
+static void rhine_remove_one_pci(struct pci_dev *pdev)
{
struct net_device *dev = pci_get_drvdata(pdev);
struct rhine_private *rp = netdev_priv(dev);
@@ -2300,7 +2375,21 @@ static void rhine_remove_one(struct pci_dev *pdev)
pci_disable_device(pdev);
}
-static void rhine_shutdown (struct pci_dev *pdev)
+static int rhine_remove_one_platform(struct platform_device *pdev)
+{
+ struct net_device *dev = platform_get_drvdata(pdev);
+ struct rhine_private *rp = netdev_priv(dev);
+
+ unregister_netdev(dev);
+
+ iounmap(rp->base);
+
+ free_netdev(dev);
+
+ return 0;
+}
+
+static void rhine_shutdown_pci(struct pci_dev *pdev)
{
struct net_device *dev = pci_get_drvdata(pdev);
struct rhine_private *rp = netdev_priv(dev);
@@ -2354,8 +2443,7 @@ static void rhine_shutdown (struct pci_dev *pdev)
#ifdef CONFIG_PM_SLEEP
static int rhine_suspend(struct device *device)
{
- struct pci_dev *pdev = to_pci_dev(device);
- struct net_device *dev = pci_get_drvdata(pdev);
+ struct net_device *dev = dev_get_drvdata(device);
struct rhine_private *rp = netdev_priv(dev);
if (!netif_running(dev))
@@ -2367,23 +2455,21 @@ static int rhine_suspend(struct device *device)
netif_device_detach(dev);
- rhine_shutdown(pdev);
+ if (dev_is_pci(device))
+ rhine_shutdown_pci(to_pci_dev(device));
return 0;
}
static int rhine_resume(struct device *device)
{
- struct pci_dev *pdev = to_pci_dev(device);
- struct net_device *dev = pci_get_drvdata(pdev);
+ struct net_device *dev = dev_get_drvdata(device);
struct rhine_private *rp = netdev_priv(dev);
if (!netif_running(dev))
return 0;
-#ifdef USE_MMIO
enable_mmio(rp->pioaddr, rp->quirks);
-#endif
rhine_power_init(dev);
free_tbufs(dev);
free_rbufs(dev);
@@ -2408,15 +2494,26 @@ static SIMPLE_DEV_PM_OPS(rhine_pm_ops, rhine_suspend, rhine_resume);
#endif /* !CONFIG_PM_SLEEP */
-static struct pci_driver rhine_driver = {
+static struct pci_driver rhine_driver_pci = {
.name = DRV_NAME,
.id_table = rhine_pci_tbl,
- .probe = rhine_init_one,
- .remove = rhine_remove_one,
- .shutdown = rhine_shutdown,
+ .probe = rhine_init_one_pci,
+ .remove = rhine_remove_one_pci,
+ .shutdown = rhine_shutdown_pci,
.driver.pm = RHINE_PM_OPS,
};
+static struct platform_driver rhine_driver_platform = {
+ .probe = rhine_init_one_platform,
+ .remove = rhine_remove_one_platform,
+ .driver = {
+ .name = DRV_NAME,
+ .owner = THIS_MODULE,
+ .of_match_table = rhine_of_tbl,
+ .pm = RHINE_PM_OPS,
+ }
+};
+
static struct dmi_system_id rhine_dmi_table[] __initdata = {
{
.ident = "EPIA-M",
@@ -2437,6 +2534,8 @@ static struct dmi_system_id rhine_dmi_table[] __initdata = {
static int __init rhine_init(void)
{
+ int ret_pci, ret_platform;
+
/* when a module, this is printed whether or not devices are found in probe */
#ifdef MODULE
pr_info("%s\n", version);
@@ -2449,13 +2548,19 @@ static int __init rhine_init(void)
else if (avoid_D3)
pr_info("avoid_D3 set\n");
- return pci_register_driver(&rhine_driver);
+ ret_pci = pci_register_driver(&rhine_driver_pci);
+ ret_platform = platform_driver_register(&rhine_driver_platform);
+ if ((ret_pci < 0) && (ret_platform < 0))
+ return ret_pci;
+
+ return 0;
}
static void __exit rhine_cleanup(void)
{
- pci_unregister_driver(&rhine_driver);
+ platform_driver_unregister(&rhine_driver_platform);
+ pci_unregister_driver(&rhine_driver_pci);
}
diff --git a/drivers/net/ethernet/xilinx/ll_temac_main.c b/drivers/net/ethernet/xilinx/ll_temac_main.c
index fa193c4688da..4ef818a7a6c6 100644
--- a/drivers/net/ethernet/xilinx/ll_temac_main.c
+++ b/drivers/net/ethernet/xilinx/ll_temac_main.c
@@ -75,7 +75,7 @@ int temac_indirect_busywait(struct temac_local *lp)
long end = jiffies + 2;
while (!(temac_ior(lp, XTE_RDY0_OFFSET) & XTE_RDY0_HARD_ACS_RDY_MASK)) {
- if (end - jiffies <= 0) {
+ if (time_before_eq(end, jiffies)) {
WARN_ON(1);
return -ETIMEDOUT;
}
diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c b/drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c
index 64b4639f43b6..d4abf478e2bb 100644
--- a/drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c
+++ b/drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c
@@ -22,7 +22,7 @@ int axienet_mdio_wait_until_ready(struct axienet_local *lp)
long end = jiffies + 2;
while (!(axienet_ior(lp, XAE_MDIO_MCR_OFFSET) &
XAE_MDIO_MCR_READY_MASK)) {
- if (end - jiffies <= 0) {
+ if (time_before_eq(end, jiffies)) {
WARN_ON(1);
return -ETIMEDOUT;
}
diff --git a/drivers/net/ethernet/xilinx/xilinx_emaclite.c b/drivers/net/ethernet/xilinx/xilinx_emaclite.c
index 0d87c67a5ff7..8c4aed3053eb 100644
--- a/drivers/net/ethernet/xilinx/xilinx_emaclite.c
+++ b/drivers/net/ethernet/xilinx/xilinx_emaclite.c
@@ -702,7 +702,7 @@ static int xemaclite_mdio_wait(struct net_local *lp)
*/
while (__raw_readl(lp->base_addr + XEL_MDIOCTRL_OFFSET) &
XEL_MDIOCTRL_MDIOSTS_MASK) {
- if (end - jiffies <= 0) {
+ if (time_before_eq(end, jiffies)) {
WARN_ON(1);
return -ETIMEDOUT;
}
diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h
index d18f711d0b0c..6cc37c15e0bf 100644
--- a/drivers/net/hyperv/hyperv_net.h
+++ b/drivers/net/hyperv/hyperv_net.h
@@ -28,50 +28,119 @@
#include <linux/hyperv.h>
#include <linux/rndis.h>
-/* Fwd declaration */
-struct hv_netvsc_packet;
-struct ndis_tcp_ip_checksum_info;
+/* RSS related */
+#define OID_GEN_RECEIVE_SCALE_CAPABILITIES 0x00010203 /* query only */
+#define OID_GEN_RECEIVE_SCALE_PARAMETERS 0x00010204 /* query and set */
-/* Represent the xfer page packet which contains 1 or more netvsc packet */
-struct xferpage_packet {
- struct list_head list_ent;
- u32 status;
+#define NDIS_OBJECT_TYPE_RSS_CAPABILITIES 0x88
+#define NDIS_OBJECT_TYPE_RSS_PARAMETERS 0x89
- /* # of netvsc packets this xfer packet contains */
- u32 count;
+#define NDIS_RECEIVE_SCALE_CAPABILITIES_REVISION_2 2
+#define NDIS_RECEIVE_SCALE_PARAMETERS_REVISION_2 2
+
+struct ndis_obj_header {
+ u8 type;
+ u8 rev;
+ u16 size;
+} __packed;
+
+/* ndis_recv_scale_cap/cap_flag */
+#define NDIS_RSS_CAPS_MESSAGE_SIGNALED_INTERRUPTS 0x01000000
+#define NDIS_RSS_CAPS_CLASSIFICATION_AT_ISR 0x02000000
+#define NDIS_RSS_CAPS_CLASSIFICATION_AT_DPC 0x04000000
+#define NDIS_RSS_CAPS_USING_MSI_X 0x08000000
+#define NDIS_RSS_CAPS_RSS_AVAILABLE_ON_PORTS 0x10000000
+#define NDIS_RSS_CAPS_SUPPORTS_MSI_X 0x20000000
+#define NDIS_RSS_CAPS_HASH_TYPE_TCP_IPV4 0x00000100
+#define NDIS_RSS_CAPS_HASH_TYPE_TCP_IPV6 0x00000200
+#define NDIS_RSS_CAPS_HASH_TYPE_TCP_IPV6_EX 0x00000400
+
+struct ndis_recv_scale_cap { /* NDIS_RECEIVE_SCALE_CAPABILITIES */
+ struct ndis_obj_header hdr;
+ u32 cap_flag;
+ u32 num_int_msg;
+ u32 num_recv_que;
+ u16 num_indirect_tabent;
+} __packed;
+
+
+/* ndis_recv_scale_param flags */
+#define NDIS_RSS_PARAM_FLAG_BASE_CPU_UNCHANGED 0x0001
+#define NDIS_RSS_PARAM_FLAG_HASH_INFO_UNCHANGED 0x0002
+#define NDIS_RSS_PARAM_FLAG_ITABLE_UNCHANGED 0x0004
+#define NDIS_RSS_PARAM_FLAG_HASH_KEY_UNCHANGED 0x0008
+#define NDIS_RSS_PARAM_FLAG_DISABLE_RSS 0x0010
+
+/* Hash info bits */
+#define NDIS_HASH_FUNC_TOEPLITZ 0x00000001
+#define NDIS_HASH_IPV4 0x00000100
+#define NDIS_HASH_TCP_IPV4 0x00000200
+#define NDIS_HASH_IPV6 0x00000400
+#define NDIS_HASH_IPV6_EX 0x00000800
+#define NDIS_HASH_TCP_IPV6 0x00001000
+#define NDIS_HASH_TCP_IPV6_EX 0x00002000
+
+#define NDIS_RSS_INDIRECTION_TABLE_MAX_SIZE_REVISION_2 (128 * 4)
+#define NDIS_RSS_HASH_SECRET_KEY_MAX_SIZE_REVISION_2 40
+
+#define ITAB_NUM 128
+#define HASH_KEYLEN NDIS_RSS_HASH_SECRET_KEY_MAX_SIZE_REVISION_2
+extern u8 netvsc_hash_key[];
+
+struct ndis_recv_scale_param { /* NDIS_RECEIVE_SCALE_PARAMETERS */
+ struct ndis_obj_header hdr;
+
+ /* Qualifies the rest of the information */
+ u16 flag;
+
+ /* The base CPU number to do receive processing. not used */
+ u16 base_cpu_number;
+
+ /* This describes the hash function and type being enabled */
+ u32 hashinfo;
+
+ /* The size of indirection table array */
+ u16 indirect_tabsize;
+
+ /* The offset of the indirection table from the beginning of this
+ * structure
+ */
+ u32 indirect_taboffset;
+
+ /* The size of the hash secret key */
+ u16 hashkey_size;
+
+ /* The offset of the secret key from the beginning of this structure */
+ u32 kashkey_offset;
+
+ u32 processor_masks_offset;
+ u32 num_processor_masks;
+ u32 processor_masks_entry_size;
};
+/* Fwd declaration */
+struct ndis_tcp_ip_checksum_info;
+
/*
* Represent netvsc packet which contains 1 RNDIS and 1 ethernet frame
* within the RNDIS
*/
struct hv_netvsc_packet {
/* Bookkeeping stuff */
- struct list_head list_ent;
u32 status;
struct hv_device *device;
bool is_data_pkt;
u16 vlan_tci;
- /*
- * Valid only for receives when we break a xfer page packet
- * into multiple netvsc packets
- */
- struct xferpage_packet *xfer_page_pkt;
+ u16 q_idx;
+ struct vmbus_channel *channel;
- union {
- struct {
- u64 recv_completion_tid;
- void *recv_completion_ctx;
- void (*recv_completion)(void *context);
- } recv;
- struct {
- u64 send_completion_tid;
- void *send_completion_ctx;
- void (*send_completion)(void *context);
- } send;
- } completion;
+ u64 send_completion_tid;
+ void *send_completion_ctx;
+ void (*send_completion)(void *context);
+
+ u32 send_buf_index;
/* This points to the memory after page_buf */
struct rndis_message *rndis_msg;
@@ -120,6 +189,7 @@ void netvsc_linkstatus_callback(struct hv_device *device_obj,
int netvsc_recv_callback(struct hv_device *device_obj,
struct hv_netvsc_packet *packet,
struct ndis_tcp_ip_checksum_info *csum_info);
+void netvsc_channel_cb(void *context);
int rndis_filter_open(struct hv_device *dev);
int rndis_filter_close(struct hv_device *dev);
int rndis_filter_device_add(struct hv_device *dev,
@@ -514,14 +584,16 @@ struct nvsp_message {
#define NETVSC_RECEIVE_BUFFER_SIZE (1024*1024*16) /* 16MB */
#define NETVSC_RECEIVE_BUFFER_SIZE_LEGACY (1024*1024*15) /* 15MB */
+#define NETVSC_SEND_BUFFER_SIZE (1024 * 1024) /* 1MB */
+#define NETVSC_INVALID_INDEX -1
-#define NETVSC_RECEIVE_BUFFER_ID 0xcafe
-/* Preallocated receive packets */
-#define NETVSC_RECEIVE_PACKETLIST_COUNT 256
+#define NETVSC_RECEIVE_BUFFER_ID 0xcafe
#define NETVSC_PACKET_SIZE 2048
+#define VRSS_SEND_TAB_SIZE 16
+
/* Per netvsc channel-specific */
struct netvsc_device {
struct hv_device *dev;
@@ -532,12 +604,6 @@ struct netvsc_device {
wait_queue_head_t wait_drain;
bool start_remove;
bool destroy;
- /*
- * List of free preallocated hv_netvsc_packet to represent receive
- * packet
- */
- struct list_head recv_pkt_list;
- spinlock_t recv_pkt_list_lock;
/* Receive buffer allocated by us but manages by NetVSP */
void *recv_buf;
@@ -546,6 +612,15 @@ struct netvsc_device {
u32 recv_section_cnt;
struct nvsp_1_receive_buffer_section *recv_section;
+ /* Send buffer allocated by us */
+ void *send_buf;
+ u32 send_buf_size;
+ u32 send_buf_gpadl_handle;
+ u32 send_section_cnt;
+ u32 send_section_size;
+ unsigned long *send_section_map;
+ int map_words;
+
/* Used for NetVSP initialization protocol */
struct completion channel_init_wait;
struct nvsp_message channel_init_pkt;
@@ -555,10 +630,20 @@ struct netvsc_device {
struct net_device *ndev;
+ struct vmbus_channel *chn_table[NR_CPUS];
+ u32 send_table[VRSS_SEND_TAB_SIZE];
+ u32 num_chn;
+ atomic_t queue_sends[NR_CPUS];
+
/* Holds rndis device info */
void *extension;
- /* The recive buffer for this device */
+
+ int ring_size;
+
+ /* The primary channel callback buffer */
unsigned char cb_buffer[NETVSC_PACKET_SIZE];
+ /* The sub channel callback buffer */
+ unsigned char *sub_cb_buf;
};
/* NdisInitialize message */
@@ -706,6 +791,7 @@ enum ndis_per_pkt_info_type {
IEEE_8021Q_INFO,
ORIGINAL_PKTINFO,
PACKET_CANCEL_ID,
+ NBL_HASH_VALUE = PACKET_CANCEL_ID,
ORIGINAL_NET_BUFLIST,
CACHED_NET_BUFLIST,
SHORT_PKT_PADINFO,
@@ -852,6 +938,9 @@ struct ndis_tcp_lso_info {
#define NDIS_LSO_PPI_SIZE (sizeof(struct rndis_per_packet_info) + \
sizeof(struct ndis_tcp_lso_info))
+#define NDIS_HASH_PPI_SIZE (sizeof(struct rndis_per_packet_info) + \
+ sizeof(u32))
+
/* Format of Information buffer passed in a SetRequest for the OID */
/* OID_GEN_RNDIS_CONFIG_PARAMETER. */
struct rndis_config_parameter_info {
diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c
index f7629ecefa84..c041f63a6d30 100644
--- a/drivers/net/hyperv/netvsc.c
+++ b/drivers/net/hyperv/netvsc.c
@@ -28,6 +28,7 @@
#include <linux/slab.h>
#include <linux/netdevice.h>
#include <linux/if_ether.h>
+#include <asm/sync_bitops.h>
#include "hyperv_net.h"
@@ -80,7 +81,7 @@ get_in_err:
}
-static int netvsc_destroy_recv_buf(struct netvsc_device *net_device)
+static int netvsc_destroy_buf(struct netvsc_device *net_device)
{
struct nvsp_message *revoke_packet;
int ret = 0;
@@ -146,10 +147,62 @@ static int netvsc_destroy_recv_buf(struct netvsc_device *net_device)
net_device->recv_section = NULL;
}
+ /* Deal with the send buffer we may have setup.
+ * If we got a send section size, it means we received a
+ * SendsendBufferComplete msg (ie sent
+ * NvspMessage1TypeSendReceiveBuffer msg) therefore, we need
+ * to send a revoke msg here
+ */
+ if (net_device->send_section_size) {
+ /* Send the revoke receive buffer */
+ revoke_packet = &net_device->revoke_packet;
+ memset(revoke_packet, 0, sizeof(struct nvsp_message));
+
+ revoke_packet->hdr.msg_type =
+ NVSP_MSG1_TYPE_REVOKE_SEND_BUF;
+ revoke_packet->msg.v1_msg.revoke_recv_buf.id = 0;
+
+ ret = vmbus_sendpacket(net_device->dev->channel,
+ revoke_packet,
+ sizeof(struct nvsp_message),
+ (unsigned long)revoke_packet,
+ VM_PKT_DATA_INBAND, 0);
+ /* If we failed here, we might as well return and
+ * have a leak rather than continue and a bugchk
+ */
+ if (ret != 0) {
+ netdev_err(ndev, "unable to send "
+ "revoke send buffer to netvsp\n");
+ return ret;
+ }
+ }
+ /* Teardown the gpadl on the vsp end */
+ if (net_device->send_buf_gpadl_handle) {
+ ret = vmbus_teardown_gpadl(net_device->dev->channel,
+ net_device->send_buf_gpadl_handle);
+
+ /* If we failed here, we might as well return and have a leak
+ * rather than continue and a bugchk
+ */
+ if (ret != 0) {
+ netdev_err(ndev,
+ "unable to teardown send buffer's gpadl\n");
+ return ret;
+ }
+ net_device->recv_buf_gpadl_handle = 0;
+ }
+ if (net_device->send_buf) {
+ /* Free up the receive buffer */
+ free_pages((unsigned long)net_device->send_buf,
+ get_order(net_device->send_buf_size));
+ net_device->send_buf = NULL;
+ }
+ kfree(net_device->send_section_map);
+
return ret;
}
-static int netvsc_init_recv_buf(struct hv_device *device)
+static int netvsc_init_buf(struct hv_device *device)
{
int ret = 0;
int t;
@@ -248,10 +301,90 @@ static int netvsc_init_recv_buf(struct hv_device *device)
goto cleanup;
}
+ /* Now setup the send buffer.
+ */
+ net_device->send_buf =
+ (void *)__get_free_pages(GFP_KERNEL|__GFP_ZERO,
+ get_order(net_device->send_buf_size));
+ if (!net_device->send_buf) {
+ netdev_err(ndev, "unable to allocate send "
+ "buffer of size %d\n", net_device->send_buf_size);
+ ret = -ENOMEM;
+ goto cleanup;
+ }
+
+ /* Establish the gpadl handle for this buffer on this
+ * channel. Note: This call uses the vmbus connection rather
+ * than the channel to establish the gpadl handle.
+ */
+ ret = vmbus_establish_gpadl(device->channel, net_device->send_buf,
+ net_device->send_buf_size,
+ &net_device->send_buf_gpadl_handle);
+ if (ret != 0) {
+ netdev_err(ndev,
+ "unable to establish send buffer's gpadl\n");
+ goto cleanup;
+ }
+
+ /* Notify the NetVsp of the gpadl handle */
+ init_packet = &net_device->channel_init_pkt;
+ memset(init_packet, 0, sizeof(struct nvsp_message));
+ init_packet->hdr.msg_type = NVSP_MSG1_TYPE_SEND_SEND_BUF;
+ init_packet->msg.v1_msg.send_recv_buf.gpadl_handle =
+ net_device->send_buf_gpadl_handle;
+ init_packet->msg.v1_msg.send_recv_buf.id = 0;
+
+ /* Send the gpadl notification request */
+ ret = vmbus_sendpacket(device->channel, init_packet,
+ sizeof(struct nvsp_message),
+ (unsigned long)init_packet,
+ VM_PKT_DATA_INBAND,
+ VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
+ if (ret != 0) {
+ netdev_err(ndev,
+ "unable to send send buffer's gpadl to netvsp\n");
+ goto cleanup;
+ }
+
+ t = wait_for_completion_timeout(&net_device->channel_init_wait, 5*HZ);
+ BUG_ON(t == 0);
+
+ /* Check the response */
+ if (init_packet->msg.v1_msg.
+ send_send_buf_complete.status != NVSP_STAT_SUCCESS) {
+ netdev_err(ndev, "Unable to complete send buffer "
+ "initialization with NetVsp - status %d\n",
+ init_packet->msg.v1_msg.
+ send_recv_buf_complete.status);
+ ret = -EINVAL;
+ goto cleanup;
+ }
+
+ /* Parse the response */
+ net_device->send_section_size = init_packet->msg.
+ v1_msg.send_send_buf_complete.section_size;
+
+ /* Section count is simply the size divided by the section size.
+ */
+ net_device->send_section_cnt =
+ net_device->send_buf_size/net_device->send_section_size;
+
+ dev_info(&device->device, "Send section size: %d, Section count:%d\n",
+ net_device->send_section_size, net_device->send_section_cnt);
+
+ /* Setup state for managing the send buffer. */
+ net_device->map_words = DIV_ROUND_UP(net_device->send_section_cnt,
+ BITS_PER_LONG);
+
+ net_device->send_section_map =
+ kzalloc(net_device->map_words * sizeof(ulong), GFP_KERNEL);
+ if (net_device->send_section_map == NULL)
+ goto cleanup;
+
goto exit;
cleanup:
- netvsc_destroy_recv_buf(net_device);
+ netvsc_destroy_buf(net_device);
exit:
return ret;
@@ -369,8 +502,9 @@ static int netvsc_connect_vsp(struct hv_device *device)
net_device->recv_buf_size = NETVSC_RECEIVE_BUFFER_SIZE_LEGACY;
else
net_device->recv_buf_size = NETVSC_RECEIVE_BUFFER_SIZE;
+ net_device->send_buf_size = NETVSC_SEND_BUFFER_SIZE;
- ret = netvsc_init_recv_buf(device);
+ ret = netvsc_init_buf(device);
cleanup:
return ret;
@@ -378,7 +512,7 @@ cleanup:
static void netvsc_disconnect_vsp(struct netvsc_device *net_device)
{
- netvsc_destroy_recv_buf(net_device);
+ netvsc_destroy_buf(net_device);
}
/*
@@ -387,7 +521,6 @@ static void netvsc_disconnect_vsp(struct netvsc_device *net_device)
int netvsc_device_remove(struct hv_device *device)
{
struct netvsc_device *net_device;
- struct hv_netvsc_packet *netvsc_packet, *pos;
unsigned long flags;
net_device = hv_get_drvdata(device);
@@ -416,11 +549,8 @@ int netvsc_device_remove(struct hv_device *device)
vmbus_close(device->channel);
/* Release all resources */
- list_for_each_entry_safe(netvsc_packet, pos,
- &net_device->recv_pkt_list, list_ent) {
- list_del(&netvsc_packet->list_ent);
- kfree(netvsc_packet);
- }
+ if (net_device->sub_cb_buf)
+ vfree(net_device->sub_cb_buf);
kfree(net_device);
return 0;
@@ -444,6 +574,12 @@ static inline u32 hv_ringbuf_avail_percent(
return avail_write * 100 / ring_info->ring_datasize;
}
+static inline void netvsc_free_send_slot(struct netvsc_device *net_device,
+ u32 index)
+{
+ sync_change_bit(index, net_device->send_section_map);
+}
+
static void netvsc_send_completion(struct netvsc_device *net_device,
struct hv_device *device,
struct vmpacket_descriptor *packet)
@@ -451,6 +587,7 @@ static void netvsc_send_completion(struct netvsc_device *net_device,
struct nvsp_message *nvsp_packet;
struct hv_netvsc_packet *nvsc_packet;
struct net_device *ndev;
+ u32 send_index;
ndev = net_device->ndev;
@@ -461,7 +598,9 @@ static void netvsc_send_completion(struct netvsc_device *net_device,
(nvsp_packet->hdr.msg_type ==
NVSP_MSG1_TYPE_SEND_RECV_BUF_COMPLETE) ||
(nvsp_packet->hdr.msg_type ==
- NVSP_MSG1_TYPE_SEND_SEND_BUF_COMPLETE)) {
+ NVSP_MSG1_TYPE_SEND_SEND_BUF_COMPLETE) ||
+ (nvsp_packet->hdr.msg_type ==
+ NVSP_MSG5_TYPE_SUBCHANNEL)) {
/* Copy the response back */
memcpy(&net_device->channel_init_pkt, nvsp_packet,
sizeof(struct nvsp_message));
@@ -469,28 +608,39 @@ static void netvsc_send_completion(struct netvsc_device *net_device,
} else if (nvsp_packet->hdr.msg_type ==
NVSP_MSG1_TYPE_SEND_RNDIS_PKT_COMPLETE) {
int num_outstanding_sends;
+ u16 q_idx = 0;
+ struct vmbus_channel *channel = device->channel;
+ int queue_sends;
/* Get the send context */
nvsc_packet = (struct hv_netvsc_packet *)(unsigned long)
packet->trans_id;
/* Notify the layer above us */
- if (nvsc_packet)
- nvsc_packet->completion.send.send_completion(
- nvsc_packet->completion.send.
- send_completion_ctx);
+ if (nvsc_packet) {
+ send_index = nvsc_packet->send_buf_index;
+ if (send_index != NETVSC_INVALID_INDEX)
+ netvsc_free_send_slot(net_device, send_index);
+ q_idx = nvsc_packet->q_idx;
+ channel = nvsc_packet->channel;
+ nvsc_packet->send_completion(nvsc_packet->
+ send_completion_ctx);
+ }
num_outstanding_sends =
atomic_dec_return(&net_device->num_outstanding_sends);
+ queue_sends = atomic_dec_return(&net_device->
+ queue_sends[q_idx]);
if (net_device->destroy && num_outstanding_sends == 0)
wake_up(&net_device->wait_drain);
- if (netif_queue_stopped(ndev) && !net_device->start_remove &&
- (hv_ringbuf_avail_percent(&device->channel->outbound)
- > RING_AVAIL_PERCENT_HIWATER ||
- num_outstanding_sends < 1))
- netif_wake_queue(ndev);
+ if (netif_tx_queue_stopped(netdev_get_tx_queue(ndev, q_idx)) &&
+ !net_device->start_remove &&
+ (hv_ringbuf_avail_percent(&channel->outbound) >
+ RING_AVAIL_PERCENT_HIWATER || queue_sends < 1))
+ netif_tx_wake_queue(netdev_get_tx_queue(
+ ndev, q_idx));
} else {
netdev_err(ndev, "Unknown send completion packet type- "
"%d received!!\n", nvsp_packet->hdr.msg_type);
@@ -498,6 +648,52 @@ static void netvsc_send_completion(struct netvsc_device *net_device,
}
+static u32 netvsc_get_next_send_section(struct netvsc_device *net_device)
+{
+ unsigned long index;
+ u32 max_words = net_device->map_words;
+ unsigned long *map_addr = (unsigned long *)net_device->send_section_map;
+ u32 section_cnt = net_device->send_section_cnt;
+ int ret_val = NETVSC_INVALID_INDEX;
+ int i;
+ int prev_val;
+
+ for (i = 0; i < max_words; i++) {
+ if (!~(map_addr[i]))
+ continue;
+ index = ffz(map_addr[i]);
+ prev_val = sync_test_and_set_bit(index, &map_addr[i]);
+ if (prev_val)
+ continue;
+ if ((index + (i * BITS_PER_LONG)) >= section_cnt)
+ break;
+ ret_val = (index + (i * BITS_PER_LONG));
+ break;
+ }
+ return ret_val;
+}
+
+u32 netvsc_copy_to_send_buf(struct netvsc_device *net_device,
+ unsigned int section_index,
+ struct hv_netvsc_packet *packet)
+{
+ char *start = net_device->send_buf;
+ char *dest = (start + (section_index * net_device->send_section_size));
+ int i;
+ u32 msg_size = 0;
+
+ for (i = 0; i < packet->page_buf_cnt; i++) {
+ char *src = phys_to_virt(packet->page_buf[i].pfn << PAGE_SHIFT);
+ u32 offset = packet->page_buf[i].offset;
+ u32 len = packet->page_buf[i].len;
+
+ memcpy(dest, (src + offset), len);
+ msg_size += len;
+ dest += len;
+ }
+ return msg_size;
+}
+
int netvsc_send(struct hv_device *device,
struct hv_netvsc_packet *packet)
{
@@ -505,7 +701,12 @@ int netvsc_send(struct hv_device *device,
int ret = 0;
struct nvsp_message sendMessage;
struct net_device *ndev;
+ struct vmbus_channel *out_channel = NULL;
u64 req_id;
+ unsigned int section_index = NETVSC_INVALID_INDEX;
+ u32 msg_size = 0;
+ struct sk_buff *skb;
+
net_device = get_outbound_net_device(device);
if (!net_device)
@@ -521,25 +722,46 @@ int netvsc_send(struct hv_device *device,
sendMessage.msg.v1_msg.send_rndis_pkt.channel_type = 1;
}
- /* Not using send buffer section */
+ /* Attempt to send via sendbuf */
+ if (packet->total_data_buflen < net_device->send_section_size) {
+ section_index = netvsc_get_next_send_section(net_device);
+ if (section_index != NETVSC_INVALID_INDEX) {
+ msg_size = netvsc_copy_to_send_buf(net_device,
+ section_index,
+ packet);
+ skb = (struct sk_buff *)
+ (unsigned long)packet->send_completion_tid;
+ if (skb)
+ dev_kfree_skb_any(skb);
+ packet->page_buf_cnt = 0;
+ }
+ }
+ packet->send_buf_index = section_index;
+
+
sendMessage.msg.v1_msg.send_rndis_pkt.send_buf_section_index =
- 0xFFFFFFFF;
- sendMessage.msg.v1_msg.send_rndis_pkt.send_buf_section_size = 0;
+ section_index;
+ sendMessage.msg.v1_msg.send_rndis_pkt.send_buf_section_size = msg_size;
- if (packet->completion.send.send_completion)
+ if (packet->send_completion)
req_id = (ulong)packet;
else
req_id = 0;
+ out_channel = net_device->chn_table[packet->q_idx];
+ if (out_channel == NULL)
+ out_channel = device->channel;
+ packet->channel = out_channel;
+
if (packet->page_buf_cnt) {
- ret = vmbus_sendpacket_pagebuffer(device->channel,
+ ret = vmbus_sendpacket_pagebuffer(out_channel,
packet->page_buf,
packet->page_buf_cnt,
&sendMessage,
sizeof(struct nvsp_message),
req_id);
} else {
- ret = vmbus_sendpacket(device->channel, &sendMessage,
+ ret = vmbus_sendpacket(out_channel, &sendMessage,
sizeof(struct nvsp_message),
req_id,
VM_PKT_DATA_INBAND,
@@ -548,17 +770,24 @@ int netvsc_send(struct hv_device *device,
if (ret == 0) {
atomic_inc(&net_device->num_outstanding_sends);
- if (hv_ringbuf_avail_percent(&device->channel->outbound) <
+ atomic_inc(&net_device->queue_sends[packet->q_idx]);
+
+ if (hv_ringbuf_avail_percent(&out_channel->outbound) <
RING_AVAIL_PERCENT_LOWATER) {
- netif_stop_queue(ndev);
+ netif_tx_stop_queue(netdev_get_tx_queue(
+ ndev, packet->q_idx));
+
if (atomic_read(&net_device->
- num_outstanding_sends) < 1)
- netif_wake_queue(ndev);
+ queue_sends[packet->q_idx]) < 1)
+ netif_tx_wake_queue(netdev_get_tx_queue(
+ ndev, packet->q_idx));
}
} else if (ret == -EAGAIN) {
- netif_stop_queue(ndev);
- if (atomic_read(&net_device->num_outstanding_sends) < 1) {
- netif_wake_queue(ndev);
+ netif_tx_stop_queue(netdev_get_tx_queue(
+ ndev, packet->q_idx));
+ if (atomic_read(&net_device->queue_sends[packet->q_idx]) < 1) {
+ netif_tx_wake_queue(netdev_get_tx_queue(
+ ndev, packet->q_idx));
ret = -ENOSPC;
}
} else {
@@ -570,6 +799,7 @@ int netvsc_send(struct hv_device *device,
}
static void netvsc_send_recv_completion(struct hv_device *device,
+ struct vmbus_channel *channel,
struct netvsc_device *net_device,
u64 transaction_id, u32 status)
{
@@ -587,7 +817,7 @@ static void netvsc_send_recv_completion(struct hv_device *device,
retry_send_cmplt:
/* Send the completion */
- ret = vmbus_sendpacket(device->channel, &recvcompMessage,
+ ret = vmbus_sendpacket(channel, &recvcompMessage,
sizeof(struct nvsp_message), transaction_id,
VM_PKT_COMP, 0);
if (ret == 0) {
@@ -613,76 +843,20 @@ retry_send_cmplt:
}
}
-/* Send a receive completion packet to RNDIS device (ie NetVsp) */
-static void netvsc_receive_completion(void *context)
-{
- struct hv_netvsc_packet *packet = context;
- struct hv_device *device = packet->device;
- struct netvsc_device *net_device;
- u64 transaction_id = 0;
- bool fsend_receive_comp = false;
- unsigned long flags;
- struct net_device *ndev;
- u32 status = NVSP_STAT_NONE;
-
- /*
- * Even though it seems logical to do a GetOutboundNetDevice() here to
- * send out receive completion, we are using GetInboundNetDevice()
- * since we may have disable outbound traffic already.
- */
- net_device = get_inbound_net_device(device);
- if (!net_device)
- return;
- ndev = net_device->ndev;
-
- /* Overloading use of the lock. */
- spin_lock_irqsave(&net_device->recv_pkt_list_lock, flags);
-
- if (packet->status != NVSP_STAT_SUCCESS)
- packet->xfer_page_pkt->status = NVSP_STAT_FAIL;
-
- packet->xfer_page_pkt->count--;
-
- /*
- * Last one in the line that represent 1 xfer page packet.
- * Return the xfer page packet itself to the freelist
- */
- if (packet->xfer_page_pkt->count == 0) {
- fsend_receive_comp = true;
- transaction_id = packet->completion.recv.recv_completion_tid;
- status = packet->xfer_page_pkt->status;
- list_add_tail(&packet->xfer_page_pkt->list_ent,
- &net_device->recv_pkt_list);
-
- }
-
- /* Put the packet back */
- list_add_tail(&packet->list_ent, &net_device->recv_pkt_list);
- spin_unlock_irqrestore(&net_device->recv_pkt_list_lock, flags);
-
- /* Send a receive completion for the xfer page packet */
- if (fsend_receive_comp)
- netvsc_send_recv_completion(device, net_device, transaction_id,
- status);
-
-}
-
static void netvsc_receive(struct netvsc_device *net_device,
+ struct vmbus_channel *channel,
struct hv_device *device,
struct vmpacket_descriptor *packet)
{
struct vmtransfer_page_packet_header *vmxferpage_packet;
struct nvsp_message *nvsp_packet;
- struct hv_netvsc_packet *netvsc_packet = NULL;
- /* struct netvsc_driver *netvscDriver; */
- struct xferpage_packet *xferpage_packet = NULL;
+ struct hv_netvsc_packet nv_pkt;
+ struct hv_netvsc_packet *netvsc_packet = &nv_pkt;
+ u32 status = NVSP_STAT_SUCCESS;
int i;
int count = 0;
- unsigned long flags;
struct net_device *ndev;
- LIST_HEAD(listHead);
-
ndev = net_device->ndev;
/*
@@ -715,77 +889,14 @@ static void netvsc_receive(struct netvsc_device *net_device,
return;
}
- /*
- * Grab free packets (range count + 1) to represent this xfer
- * page packet. +1 to represent the xfer page packet itself.
- * We grab it here so that we know exactly how many we can
- * fulfil
- */
- spin_lock_irqsave(&net_device->recv_pkt_list_lock, flags);
- while (!list_empty(&net_device->recv_pkt_list)) {
- list_move_tail(net_device->recv_pkt_list.next, &listHead);
- if (++count == vmxferpage_packet->range_cnt + 1)
- break;
- }
- spin_unlock_irqrestore(&net_device->recv_pkt_list_lock, flags);
-
- /*
- * We need at least 2 netvsc pkts (1 to represent the xfer
- * page and at least 1 for the range) i.e. we can handled
- * some of the xfer page packet ranges...
- */
- if (count < 2) {
- netdev_err(ndev, "Got only %d netvsc pkt...needed "
- "%d pkts. Dropping this xfer page packet completely!\n",
- count, vmxferpage_packet->range_cnt + 1);
-
- /* Return it to the freelist */
- spin_lock_irqsave(&net_device->recv_pkt_list_lock, flags);
- for (i = count; i != 0; i--) {
- list_move_tail(listHead.next,
- &net_device->recv_pkt_list);
- }
- spin_unlock_irqrestore(&net_device->recv_pkt_list_lock,
- flags);
-
- netvsc_send_recv_completion(device, net_device,
- vmxferpage_packet->d.trans_id,
- NVSP_STAT_FAIL);
-
- return;
- }
-
- /* Remove the 1st packet to represent the xfer page packet itself */
- xferpage_packet = (struct xferpage_packet *)listHead.next;
- list_del(&xferpage_packet->list_ent);
- xferpage_packet->status = NVSP_STAT_SUCCESS;
-
- /* This is how much we can satisfy */
- xferpage_packet->count = count - 1;
-
- if (xferpage_packet->count != vmxferpage_packet->range_cnt) {
- netdev_err(ndev, "Needed %d netvsc pkts to satisfy "
- "this xfer page...got %d\n",
- vmxferpage_packet->range_cnt, xferpage_packet->count);
- }
+ count = vmxferpage_packet->range_cnt;
+ netvsc_packet->device = device;
+ netvsc_packet->channel = channel;
/* Each range represents 1 RNDIS pkt that contains 1 ethernet frame */
- for (i = 0; i < (count - 1); i++) {
- netvsc_packet = (struct hv_netvsc_packet *)listHead.next;
- list_del(&netvsc_packet->list_ent);
-
+ for (i = 0; i < count; i++) {
/* Initialize the netvsc packet */
netvsc_packet->status = NVSP_STAT_SUCCESS;
- netvsc_packet->xfer_page_pkt = xferpage_packet;
- netvsc_packet->completion.recv.recv_completion =
- netvsc_receive_completion;
- netvsc_packet->completion.recv.recv_completion_ctx =
- netvsc_packet;
- netvsc_packet->device = device;
- /* Save this so that we can send it back */
- netvsc_packet->completion.recv.recv_completion_tid =
- vmxferpage_packet->d.trans_id;
-
netvsc_packet->data = (void *)((unsigned long)net_device->
recv_buf + vmxferpage_packet->ranges[i].byte_offset);
netvsc_packet->total_data_buflen =
@@ -794,16 +905,53 @@ static void netvsc_receive(struct netvsc_device *net_device,
/* Pass it to the upper layer */
rndis_filter_receive(device, netvsc_packet);
- netvsc_receive_completion(netvsc_packet->
- completion.recv.recv_completion_ctx);
+ if (netvsc_packet->status != NVSP_STAT_SUCCESS)
+ status = NVSP_STAT_FAIL;
+ }
+
+ netvsc_send_recv_completion(device, channel, net_device,
+ vmxferpage_packet->d.trans_id, status);
+}
+
+
+static void netvsc_send_table(struct hv_device *hdev,
+ struct vmpacket_descriptor *vmpkt)
+{
+ struct netvsc_device *nvscdev;
+ struct net_device *ndev;
+ struct nvsp_message *nvmsg;
+ int i;
+ u32 count, *tab;
+
+ nvscdev = get_outbound_net_device(hdev);
+ if (!nvscdev)
+ return;
+ ndev = nvscdev->ndev;
+
+ nvmsg = (struct nvsp_message *)((unsigned long)vmpkt +
+ (vmpkt->offset8 << 3));
+
+ if (nvmsg->hdr.msg_type != NVSP_MSG5_TYPE_SEND_INDIRECTION_TABLE)
+ return;
+
+ count = nvmsg->msg.v5_msg.send_table.count;
+ if (count != VRSS_SEND_TAB_SIZE) {
+ netdev_err(ndev, "Received wrong send-table size:%u\n", count);
+ return;
}
+ tab = (u32 *)((unsigned long)&nvmsg->msg.v5_msg.send_table +
+ nvmsg->msg.v5_msg.send_table.offset);
+
+ for (i = 0; i < count; i++)
+ nvscdev->send_table[i] = tab[i];
}
-static void netvsc_channel_cb(void *context)
+void netvsc_channel_cb(void *context)
{
int ret;
- struct hv_device *device = context;
+ struct vmbus_channel *channel = (struct vmbus_channel *)context;
+ struct hv_device *device;
struct netvsc_device *net_device;
u32 bytes_recvd;
u64 request_id;
@@ -812,14 +960,19 @@ static void netvsc_channel_cb(void *context)
int bufferlen = NETVSC_PACKET_SIZE;
struct net_device *ndev;
+ if (channel->primary_channel != NULL)
+ device = channel->primary_channel->device_obj;
+ else
+ device = channel->device_obj;
+
net_device = get_inbound_net_device(device);
if (!net_device)
return;
ndev = net_device->ndev;
- buffer = net_device->cb_buffer;
+ buffer = get_per_channel_state(channel);
do {
- ret = vmbus_recvpacket_raw(device->channel, buffer, bufferlen,
+ ret = vmbus_recvpacket_raw(channel, buffer, bufferlen,
&bytes_recvd, &request_id);
if (ret == 0) {
if (bytes_recvd > 0) {
@@ -831,8 +984,12 @@ static void netvsc_channel_cb(void *context)
break;
case VM_PKT_DATA_USING_XFER_PAGES:
- netvsc_receive(net_device,
- device, desc);
+ netvsc_receive(net_device, channel,
+ device, desc);
+ break;
+
+ case VM_PKT_DATA_INBAND:
+ netvsc_send_table(device, desc);
break;
default:
@@ -880,11 +1037,9 @@ static void netvsc_channel_cb(void *context)
int netvsc_device_add(struct hv_device *device, void *additional_info)
{
int ret = 0;
- int i;
int ring_size =
((struct netvsc_device_info *)additional_info)->ring_size;
struct netvsc_device *net_device;
- struct hv_netvsc_packet *packet, *pos;
struct net_device *ndev;
net_device = alloc_net_device(device);
@@ -893,6 +1048,8 @@ int netvsc_device_add(struct hv_device *device, void *additional_info)
goto cleanup;
}
+ net_device->ring_size = ring_size;
+
/*
* Coming into this function, struct net_device * is
* registered as the driver private data.
@@ -903,24 +1060,14 @@ int netvsc_device_add(struct hv_device *device, void *additional_info)
ndev = net_device->ndev;
/* Initialize the NetVSC channel extension */
- spin_lock_init(&net_device->recv_pkt_list_lock);
-
- INIT_LIST_HEAD(&net_device->recv_pkt_list);
-
- for (i = 0; i < NETVSC_RECEIVE_PACKETLIST_COUNT; i++) {
- packet = kzalloc(sizeof(struct hv_netvsc_packet), GFP_KERNEL);
- if (!packet)
- break;
-
- list_add_tail(&packet->list_ent,
- &net_device->recv_pkt_list);
- }
init_completion(&net_device->channel_init_wait);
+ set_per_channel_state(device->channel, net_device->cb_buffer);
+
/* Open the channel */
ret = vmbus_open(device->channel, ring_size * PAGE_SIZE,
ring_size * PAGE_SIZE, NULL, 0,
- netvsc_channel_cb, device);
+ netvsc_channel_cb, device->channel);
if (ret != 0) {
netdev_err(ndev, "unable to open channel: %d\n", ret);
@@ -930,6 +1077,8 @@ int netvsc_device_add(struct hv_device *device, void *additional_info)
/* Channel is opened */
pr_info("hv_netvsc channel opened successfully\n");
+ net_device->chn_table[0] = device->channel;
+
/* Connect with the NetVsp */
ret = netvsc_connect_vsp(device);
if (ret != 0) {
@@ -946,16 +1095,8 @@ close:
cleanup:
- if (net_device) {
- list_for_each_entry_safe(packet, pos,
- &net_device->recv_pkt_list,
- list_ent) {
- list_del(&packet->list_ent);
- kfree(packet);
- }
-
+ if (net_device)
kfree(net_device);
- }
return ret;
}
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
index 7918d5132c1f..4fd71b75e666 100644
--- a/drivers/net/hyperv/netvsc_drv.c
+++ b/drivers/net/hyperv/netvsc_drv.c
@@ -101,7 +101,7 @@ static int netvsc_open(struct net_device *net)
return ret;
}
- netif_start_queue(net);
+ netif_tx_start_all_queues(net);
nvdev = hv_get_drvdata(device_obj);
rdev = nvdev->extension;
@@ -149,15 +149,100 @@ static void *init_ppi_data(struct rndis_message *msg, u32 ppi_size,
return ppi;
}
+union sub_key {
+ u64 k;
+ struct {
+ u8 pad[3];
+ u8 kb;
+ u32 ka;
+ };
+};
+
+/* Toeplitz hash function
+ * data: network byte order
+ * return: host byte order
+ */
+static u32 comp_hash(u8 *key, int klen, u8 *data, int dlen)
+{
+ union sub_key subk;
+ int k_next = 4;
+ u8 dt;
+ int i, j;
+ u32 ret = 0;
+
+ subk.k = 0;
+ subk.ka = ntohl(*(u32 *)key);
+
+ for (i = 0; i < dlen; i++) {
+ subk.kb = key[k_next];
+ k_next = (k_next + 1) % klen;
+ dt = data[i];
+ for (j = 0; j < 8; j++) {
+ if (dt & 0x80)
+ ret ^= subk.ka;
+ dt <<= 1;
+ subk.k <<= 1;
+ }
+ }
+
+ return ret;
+}
+
+static bool netvsc_set_hash(u32 *hash, struct sk_buff *skb)
+{
+ struct iphdr *iphdr;
+ int data_len;
+ bool ret = false;
+
+ if (eth_hdr(skb)->h_proto != htons(ETH_P_IP))
+ return false;
+
+ iphdr = ip_hdr(skb);
+
+ if (iphdr->version == 4) {
+ if (iphdr->protocol == IPPROTO_TCP)
+ data_len = 12;
+ else
+ data_len = 8;
+ *hash = comp_hash(netvsc_hash_key, HASH_KEYLEN,
+ (u8 *)&iphdr->saddr, data_len);
+ ret = true;
+ }
+
+ return ret;
+}
+
+static u16 netvsc_select_queue(struct net_device *ndev, struct sk_buff *skb,
+ void *accel_priv, select_queue_fallback_t fallback)
+{
+ struct net_device_context *net_device_ctx = netdev_priv(ndev);
+ struct hv_device *hdev = net_device_ctx->device_ctx;
+ struct netvsc_device *nvsc_dev = hv_get_drvdata(hdev);
+ u32 hash;
+ u16 q_idx = 0;
+
+ if (nvsc_dev == NULL || ndev->real_num_tx_queues <= 1)
+ return 0;
+
+ if (netvsc_set_hash(&hash, skb)) {
+ q_idx = nvsc_dev->send_table[hash % VRSS_SEND_TAB_SIZE] %
+ ndev->real_num_tx_queues;
+ skb_set_hash(skb, hash, PKT_HASH_TYPE_L3);
+ }
+
+ return q_idx;
+}
+
static void netvsc_xmit_completion(void *context)
{
struct hv_netvsc_packet *packet = (struct hv_netvsc_packet *)context;
struct sk_buff *skb = (struct sk_buff *)
- (unsigned long)packet->completion.send.send_completion_tid;
+ (unsigned long)packet->send_completion_tid;
+ u32 index = packet->send_buf_index;
kfree(packet);
- if (skb)
+ if (skb && (index == NETVSC_INVALID_INDEX))
dev_kfree_skb_any(skb);
}
@@ -301,6 +386,7 @@ static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net)
struct ndis_tcp_lso_info *lso_info;
int hdr_offset;
u32 net_trans_info;
+ u32 hash;
/* We will atmost need two pages to describe the rndis
@@ -319,9 +405,8 @@ static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net)
packet = kzalloc(sizeof(struct hv_netvsc_packet) +
(num_data_pgs * sizeof(struct hv_page_buffer)) +
sizeof(struct rndis_message) +
- NDIS_VLAN_PPI_SIZE +
- NDIS_CSUM_PPI_SIZE +
- NDIS_LSO_PPI_SIZE, GFP_ATOMIC);
+ NDIS_VLAN_PPI_SIZE + NDIS_CSUM_PPI_SIZE +
+ NDIS_LSO_PPI_SIZE + NDIS_HASH_PPI_SIZE, GFP_ATOMIC);
if (!packet) {
/* out of memory, drop packet */
netdev_err(net, "unable to allocate hv_netvsc_packet\n");
@@ -333,6 +418,8 @@ static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net)
packet->vlan_tci = skb->vlan_tci;
+ packet->q_idx = skb_get_queue_mapping(skb);
+
packet->is_data_pkt = true;
packet->total_data_buflen = skb->len;
@@ -341,9 +428,9 @@ static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net)
(num_data_pgs * sizeof(struct hv_page_buffer)));
/* Set the completion routine */
- packet->completion.send.send_completion = netvsc_xmit_completion;
- packet->completion.send.send_completion_ctx = packet;
- packet->completion.send.send_completion_tid = (unsigned long)skb;
+ packet->send_completion = netvsc_xmit_completion;
+ packet->send_completion_ctx = packet;
+ packet->send_completion_tid = (unsigned long)skb;
isvlan = packet->vlan_tci & VLAN_TAG_PRESENT;
@@ -358,6 +445,14 @@ static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net)
rndis_msg_size = RNDIS_MESSAGE_SIZE(struct rndis_packet);
+ hash = skb_get_hash_raw(skb);
+ if (hash != 0 && net->real_num_tx_queues > 1) {
+ rndis_msg_size += NDIS_HASH_PPI_SIZE;
+ ppi = init_ppi_data(rndis_msg, NDIS_HASH_PPI_SIZE,
+ NBL_HASH_VALUE);
+ *(u32 *)((void *)ppi + ppi->ppi_offset) = hash;
+ }
+
if (isvlan) {
struct ndis_pkt_8021q_info *vlan;
@@ -558,6 +653,9 @@ int netvsc_recv_callback(struct hv_device *device_obj,
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
packet->vlan_tci);
+ skb_record_rx_queue(skb, packet->channel->
+ offermsg.offer.sub_channel_index);
+
net->stats.rx_packets++;
net->stats.rx_bytes += packet->total_data_buflen;
@@ -606,7 +704,7 @@ static int netvsc_change_mtu(struct net_device *ndev, int mtu)
hv_set_drvdata(hdev, ndev);
device_info.ring_size = ring_size;
rndis_filter_device_add(hdev, &device_info);
- netif_wake_queue(ndev);
+ netif_tx_wake_all_queues(ndev);
return 0;
}
@@ -652,6 +750,7 @@ static const struct net_device_ops device_ops = {
.ndo_change_mtu = netvsc_change_mtu,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = netvsc_set_mac_addr,
+ .ndo_select_queue = netvsc_select_queue,
};
/*
@@ -698,9 +797,11 @@ static int netvsc_probe(struct hv_device *dev,
struct net_device *net = NULL;
struct net_device_context *net_device_ctx;
struct netvsc_device_info device_info;
+ struct netvsc_device *nvdev;
int ret;
- net = alloc_etherdev(sizeof(struct net_device_context));
+ net = alloc_etherdev_mq(sizeof(struct net_device_context),
+ num_online_cpus());
if (!net)
return -ENOMEM;
@@ -719,7 +820,7 @@ static int netvsc_probe(struct hv_device *dev,
net->features = NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_SG | NETIF_F_RXCSUM |
NETIF_F_IP_CSUM | NETIF_F_TSO;
- SET_ETHTOOL_OPS(net, &ethtool_ops);
+ net->ethtool_ops = &ethtool_ops;
SET_NETDEV_DEV(net, &dev->device);
/* Notify the netvsc driver of the new device */
@@ -733,6 +834,10 @@ static int netvsc_probe(struct hv_device *dev,
}
memcpy(net->dev_addr, device_info.mac_adr, ETH_ALEN);
+ nvdev = hv_get_drvdata(dev);
+ netif_set_real_num_tx_queues(net, nvdev->num_chn);
+ netif_set_real_num_rx_queues(net, nvdev->num_chn);
+
ret = register_netdev(net);
if (ret != 0) {
pr_err("Unable to register netdev.\n");
diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c
index 143a98caf618..99c527adae5b 100644
--- a/drivers/net/hyperv/rndis_filter.c
+++ b/drivers/net/hyperv/rndis_filter.c
@@ -31,7 +31,7 @@
#include "hyperv_net.h"
-#define RNDIS_EXT_LEN 100
+#define RNDIS_EXT_LEN PAGE_SIZE
struct rndis_request {
struct list_head list_ent;
struct completion wait_event;
@@ -94,6 +94,8 @@ static struct rndis_request *get_rndis_request(struct rndis_device *dev,
rndis_msg->ndis_msg_type = msg_type;
rndis_msg->msg_len = msg_len;
+ request->pkt.q_idx = 0;
+
/*
* Set the request id. This field is always after the rndis header for
* request/response packet types so we just used the SetRequest as a
@@ -234,7 +236,7 @@ static int rndis_filter_send_request(struct rndis_device *dev,
packet->page_buf[0].len;
}
- packet->completion.send.send_completion = NULL;
+ packet->send_completion = NULL;
ret = netvsc_send(dev->net_dev->dev, packet);
return ret;
@@ -399,8 +401,6 @@ static void rndis_filter_receive_data(struct rndis_device *dev,
pkt->total_data_buflen = rndis_pkt->data_len;
pkt->data = (void *)((unsigned long)pkt->data + data_offset);
- pkt->is_data_pkt = true;
-
vlan = rndis_get_ppi(rndis_pkt, IEEE_8021Q_INFO);
if (vlan) {
pkt->vlan_tci = VLAN_TAG_PRESENT | vlan->vlanid |
@@ -509,6 +509,19 @@ static int rndis_filter_query_device(struct rndis_device *dev, u32 oid,
query->info_buflen = 0;
query->dev_vc_handle = 0;
+ if (oid == OID_GEN_RECEIVE_SCALE_CAPABILITIES) {
+ struct ndis_recv_scale_cap *cap;
+
+ request->request_msg.msg_len +=
+ sizeof(struct ndis_recv_scale_cap);
+ query->info_buflen = sizeof(struct ndis_recv_scale_cap);
+ cap = (struct ndis_recv_scale_cap *)((unsigned long)query +
+ query->info_buf_offset);
+ cap->hdr.type = NDIS_OBJECT_TYPE_RSS_CAPABILITIES;
+ cap->hdr.rev = NDIS_RECEIVE_SCALE_CAPABILITIES_REVISION_2;
+ cap->hdr.size = sizeof(struct ndis_recv_scale_cap);
+ }
+
ret = rndis_filter_send_request(dev, request);
if (ret != 0)
goto cleanup;
@@ -695,6 +708,89 @@ cleanup:
return ret;
}
+u8 netvsc_hash_key[HASH_KEYLEN] = {
+ 0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2,
+ 0x41, 0x67, 0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0,
+ 0xd0, 0xca, 0x2b, 0xcb, 0xae, 0x7b, 0x30, 0xb4,
+ 0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30, 0xf2, 0x0c,
+ 0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa
+};
+
+int rndis_filter_set_rss_param(struct rndis_device *rdev, int num_queue)
+{
+ struct net_device *ndev = rdev->net_dev->ndev;
+ struct rndis_request *request;
+ struct rndis_set_request *set;
+ struct rndis_set_complete *set_complete;
+ u32 extlen = sizeof(struct ndis_recv_scale_param) +
+ 4*ITAB_NUM + HASH_KEYLEN;
+ struct ndis_recv_scale_param *rssp;
+ u32 *itab;
+ u8 *keyp;
+ int i, t, ret;
+
+ request = get_rndis_request(
+ rdev, RNDIS_MSG_SET,
+ RNDIS_MESSAGE_SIZE(struct rndis_set_request) + extlen);
+ if (!request)
+ return -ENOMEM;
+
+ set = &request->request_msg.msg.set_req;
+ set->oid = OID_GEN_RECEIVE_SCALE_PARAMETERS;
+ set->info_buflen = extlen;
+ set->info_buf_offset = sizeof(struct rndis_set_request);
+ set->dev_vc_handle = 0;
+
+ rssp = (struct ndis_recv_scale_param *)(set + 1);
+ rssp->hdr.type = NDIS_OBJECT_TYPE_RSS_PARAMETERS;
+ rssp->hdr.rev = NDIS_RECEIVE_SCALE_PARAMETERS_REVISION_2;
+ rssp->hdr.size = sizeof(struct ndis_recv_scale_param);
+ rssp->flag = 0;
+ rssp->hashinfo = NDIS_HASH_FUNC_TOEPLITZ | NDIS_HASH_IPV4 |
+ NDIS_HASH_TCP_IPV4;
+ rssp->indirect_tabsize = 4*ITAB_NUM;
+ rssp->indirect_taboffset = sizeof(struct ndis_recv_scale_param);
+ rssp->hashkey_size = HASH_KEYLEN;
+ rssp->kashkey_offset = rssp->indirect_taboffset +
+ rssp->indirect_tabsize;
+
+ /* Set indirection table entries */
+ itab = (u32 *)(rssp + 1);
+ for (i = 0; i < ITAB_NUM; i++)
+ itab[i] = i % num_queue;
+
+ /* Set hask key values */
+ keyp = (u8 *)((unsigned long)rssp + rssp->kashkey_offset);
+ for (i = 0; i < HASH_KEYLEN; i++)
+ keyp[i] = netvsc_hash_key[i];
+
+
+ ret = rndis_filter_send_request(rdev, request);
+ if (ret != 0)
+ goto cleanup;
+
+ t = wait_for_completion_timeout(&request->wait_event, 5*HZ);
+ if (t == 0) {
+ netdev_err(ndev, "timeout before we got a set response...\n");
+ /* can't put_rndis_request, since we may still receive a
+ * send-completion.
+ */
+ return -ETIMEDOUT;
+ } else {
+ set_complete = &request->response_msg.msg.set_complete;
+ if (set_complete->status != RNDIS_STATUS_SUCCESS) {
+ netdev_err(ndev, "Fail to set RSS parameters:0x%x\n",
+ set_complete->status);
+ ret = -EINVAL;
+ }
+ }
+
+cleanup:
+ put_rndis_request(rdev, request);
+ return ret;
+}
+
+
static int rndis_filter_query_device_link_status(struct rndis_device *dev)
{
u32 size = sizeof(u32);
@@ -886,6 +982,28 @@ static int rndis_filter_close_device(struct rndis_device *dev)
return ret;
}
+static void netvsc_sc_open(struct vmbus_channel *new_sc)
+{
+ struct netvsc_device *nvscdev;
+ u16 chn_index = new_sc->offermsg.offer.sub_channel_index;
+ int ret;
+
+ nvscdev = hv_get_drvdata(new_sc->primary_channel->device_obj);
+
+ if (chn_index >= nvscdev->num_chn)
+ return;
+
+ set_per_channel_state(new_sc, nvscdev->sub_cb_buf + (chn_index - 1) *
+ NETVSC_PACKET_SIZE);
+
+ ret = vmbus_open(new_sc, nvscdev->ring_size * PAGE_SIZE,
+ nvscdev->ring_size * PAGE_SIZE, NULL, 0,
+ netvsc_channel_cb, new_sc);
+
+ if (ret == 0)
+ nvscdev->chn_table[chn_index] = new_sc;
+}
+
int rndis_filter_device_add(struct hv_device *dev,
void *additional_info)
{
@@ -894,6 +1012,10 @@ int rndis_filter_device_add(struct hv_device *dev,
struct rndis_device *rndis_device;
struct netvsc_device_info *device_info = additional_info;
struct ndis_offload_params offloads;
+ struct nvsp_message *init_packet;
+ int t;
+ struct ndis_recv_scale_cap rsscap;
+ u32 rsscap_size = sizeof(struct ndis_recv_scale_cap);
rndis_device = get_rndis_device();
if (!rndis_device)
@@ -913,6 +1035,7 @@ int rndis_filter_device_add(struct hv_device *dev,
/* Initialize the rndis device */
net_device = hv_get_drvdata(dev);
+ net_device->num_chn = 1;
net_device->extension = rndis_device;
rndis_device->net_dev = net_device;
@@ -952,7 +1075,6 @@ int rndis_filter_device_add(struct hv_device *dev,
if (ret)
goto err_dev_remv;
-
rndis_filter_query_device_link_status(rndis_device);
device_info->link_state = rndis_device->link_state;
@@ -961,7 +1083,66 @@ int rndis_filter_device_add(struct hv_device *dev,
rndis_device->hw_mac_adr,
device_info->link_state ? "down" : "up");
- return ret;
+ if (net_device->nvsp_version < NVSP_PROTOCOL_VERSION_5)
+ return 0;
+
+ /* vRSS setup */
+ memset(&rsscap, 0, rsscap_size);
+ ret = rndis_filter_query_device(rndis_device,
+ OID_GEN_RECEIVE_SCALE_CAPABILITIES,
+ &rsscap, &rsscap_size);
+ if (ret || rsscap.num_recv_que < 2)
+ goto out;
+
+ net_device->num_chn = (num_online_cpus() < rsscap.num_recv_que) ?
+ num_online_cpus() : rsscap.num_recv_que;
+ if (net_device->num_chn == 1)
+ goto out;
+
+ net_device->sub_cb_buf = vzalloc((net_device->num_chn - 1) *
+ NETVSC_PACKET_SIZE);
+ if (!net_device->sub_cb_buf) {
+ net_device->num_chn = 1;
+ dev_info(&dev->device, "No memory for subchannels.\n");
+ goto out;
+ }
+
+ vmbus_set_sc_create_callback(dev->channel, netvsc_sc_open);
+
+ init_packet = &net_device->channel_init_pkt;
+ memset(init_packet, 0, sizeof(struct nvsp_message));
+ init_packet->hdr.msg_type = NVSP_MSG5_TYPE_SUBCHANNEL;
+ init_packet->msg.v5_msg.subchn_req.op = NVSP_SUBCHANNEL_ALLOCATE;
+ init_packet->msg.v5_msg.subchn_req.num_subchannels =
+ net_device->num_chn - 1;
+ ret = vmbus_sendpacket(dev->channel, init_packet,
+ sizeof(struct nvsp_message),
+ (unsigned long)init_packet,
+ VM_PKT_DATA_INBAND,
+ VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
+ if (ret)
+ goto out;
+ t = wait_for_completion_timeout(&net_device->channel_init_wait, 5*HZ);
+ if (t == 0) {
+ ret = -ETIMEDOUT;
+ goto out;
+ }
+ if (init_packet->msg.v5_msg.subchn_comp.status !=
+ NVSP_STAT_SUCCESS) {
+ ret = -ENODEV;
+ goto out;
+ }
+ net_device->num_chn = 1 +
+ init_packet->msg.v5_msg.subchn_comp.num_subchannels;
+
+ vmbus_are_subchannels_present(dev->channel);
+
+ ret = rndis_filter_set_rss_param(rndis_device, net_device->num_chn);
+
+out:
+ if (ret)
+ net_device->num_chn = 1;
+ return 0; /* return 0 because primary channel can be used alone */
err_dev_remv:
rndis_filter_device_remove(dev);
diff --git a/drivers/net/ieee802154/at86rf230.c b/drivers/net/ieee802154/at86rf230.c
index e36f194673a4..4517b149ed07 100644
--- a/drivers/net/ieee802154/at86rf230.c
+++ b/drivers/net/ieee802154/at86rf230.c
@@ -23,6 +23,7 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/interrupt.h>
+#include <linux/irq.h>
#include <linux/gpio.h>
#include <linux/delay.h>
#include <linux/mutex.h>
@@ -692,10 +693,7 @@ at86rf230_xmit(struct ieee802154_dev *dev, struct sk_buff *skb)
if (rc < 0)
goto err_rx;
- rc = at86rf230_start(dev);
-
- return rc;
-
+ return at86rf230_start(dev);
err_rx:
at86rf230_start(dev);
err:
@@ -963,33 +961,24 @@ static irqreturn_t at86rf230_isr_level(int irq, void *data)
return at86rf230_isr(irq, data);
}
-static int at86rf230_irq_polarity(struct at86rf230_local *lp, int pol)
-{
- return at86rf230_write_subreg(lp, SR_IRQ_POLARITY, pol);
-}
-
static int at86rf230_hw_init(struct at86rf230_local *lp)
{
- struct at86rf230_platform_data *pdata = lp->spi->dev.platform_data;
- int rc, irq_pol;
- u8 status;
+ int rc, irq_pol, irq_type;
+ u8 dvdd;
u8 csma_seed[2];
- rc = at86rf230_read_subreg(lp, SR_TRX_STATUS, &status);
- if (rc)
- return rc;
-
rc = at86rf230_write_subreg(lp, SR_TRX_CMD, STATE_FORCE_TRX_OFF);
if (rc)
return rc;
+ irq_type = irq_get_trigger_type(lp->spi->irq);
/* configure irq polarity, defaults to high active */
- if (pdata->irq_type & (IRQF_TRIGGER_FALLING | IRQF_TRIGGER_LOW))
+ if (irq_type & (IRQF_TRIGGER_FALLING | IRQF_TRIGGER_LOW))
irq_pol = IRQ_ACTIVE_LOW;
else
irq_pol = IRQ_ACTIVE_HIGH;
- rc = at86rf230_irq_polarity(lp, irq_pol);
+ rc = at86rf230_write_subreg(lp, SR_IRQ_POLARITY, irq_pol);
if (rc)
return rc;
@@ -1017,10 +1006,10 @@ static int at86rf230_hw_init(struct at86rf230_local *lp)
/* Wait the next SLEEP cycle */
msleep(100);
- rc = at86rf230_read_subreg(lp, SR_DVDD_OK, &status);
+ rc = at86rf230_read_subreg(lp, SR_DVDD_OK, &dvdd);
if (rc)
return rc;
- if (!status) {
+ if (!dvdd) {
dev_err(&lp->spi->dev, "DVDD error\n");
return -EINVAL;
}
@@ -1032,7 +1021,6 @@ static struct at86rf230_platform_data *
at86rf230_get_pdata(struct spi_device *spi)
{
struct at86rf230_platform_data *pdata;
- const char *irq_type;
if (!IS_ENABLED(CONFIG_OF) || !spi->dev.of_node)
return spi->dev.platform_data;
@@ -1044,19 +1032,6 @@ at86rf230_get_pdata(struct spi_device *spi)
pdata->rstn = of_get_named_gpio(spi->dev.of_node, "reset-gpio", 0);
pdata->slp_tr = of_get_named_gpio(spi->dev.of_node, "sleep-gpio", 0);
- pdata->irq_type = IRQF_TRIGGER_RISING;
- of_property_read_string(spi->dev.of_node, "irq-type", &irq_type);
- if (!strcmp(irq_type, "level-high"))
- pdata->irq_type = IRQF_TRIGGER_HIGH;
- else if (!strcmp(irq_type, "level-low"))
- pdata->irq_type = IRQF_TRIGGER_LOW;
- else if (!strcmp(irq_type, "edge-rising"))
- pdata->irq_type = IRQF_TRIGGER_RISING;
- else if (!strcmp(irq_type, "edge-falling"))
- pdata->irq_type = IRQF_TRIGGER_FALLING;
- else
- dev_warn(&spi->dev, "wrong irq-type specified using edge-rising\n");
-
spi->dev.platform_data = pdata;
done:
return pdata;
@@ -1071,7 +1046,7 @@ static int at86rf230_probe(struct spi_device *spi)
u8 part = 0, version = 0, status;
irq_handler_t irq_handler;
work_func_t irq_worker;
- int rc;
+ int rc, irq_type;
const char *chip;
struct ieee802154_ops *ops = NULL;
@@ -1087,27 +1062,17 @@ static int at86rf230_probe(struct spi_device *spi)
}
if (gpio_is_valid(pdata->rstn)) {
- rc = gpio_request(pdata->rstn, "rstn");
+ rc = devm_gpio_request_one(&spi->dev, pdata->rstn,
+ GPIOF_OUT_INIT_HIGH, "rstn");
if (rc)
return rc;
}
if (gpio_is_valid(pdata->slp_tr)) {
- rc = gpio_request(pdata->slp_tr, "slp_tr");
- if (rc)
- goto err_slp_tr;
- }
-
- if (gpio_is_valid(pdata->rstn)) {
- rc = gpio_direction_output(pdata->rstn, 1);
- if (rc)
- goto err_gpio_dir;
- }
-
- if (gpio_is_valid(pdata->slp_tr)) {
- rc = gpio_direction_output(pdata->slp_tr, 0);
+ rc = devm_gpio_request_one(&spi->dev, pdata->slp_tr,
+ GPIOF_OUT_INIT_LOW, "slp_tr");
if (rc)
- goto err_gpio_dir;
+ return rc;
}
/* Reset */
@@ -1121,13 +1086,12 @@ static int at86rf230_probe(struct spi_device *spi)
rc = __at86rf230_detect_device(spi, &man_id, &part, &version);
if (rc < 0)
- goto err_gpio_dir;
+ return rc;
if (man_id != 0x001f) {
dev_err(&spi->dev, "Non-Atmel dev found (MAN_ID %02x %02x)\n",
man_id >> 8, man_id & 0xFF);
- rc = -EINVAL;
- goto err_gpio_dir;
+ return -EINVAL;
}
switch (part) {
@@ -1154,16 +1118,12 @@ static int at86rf230_probe(struct spi_device *spi)
}
dev_info(&spi->dev, "Detected %s chip version %d\n", chip, version);
- if (!ops) {
- rc = -ENOTSUPP;
- goto err_gpio_dir;
- }
+ if (!ops)
+ return -ENOTSUPP;
dev = ieee802154_alloc_device(sizeof(*lp), ops);
- if (!dev) {
- rc = -ENOMEM;
- goto err_gpio_dir;
- }
+ if (!dev)
+ return -ENOMEM;
lp = dev->priv;
lp->dev = dev;
@@ -1176,7 +1136,8 @@ static int at86rf230_probe(struct spi_device *spi)
dev->extra_tx_headroom = 0;
dev->flags = IEEE802154_HW_OMIT_CKSUM | IEEE802154_HW_AACK;
- if (pdata->irq_type & (IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING)) {
+ irq_type = irq_get_trigger_type(spi->irq);
+ if (irq_type & (IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING)) {
irq_worker = at86rf230_irqwork;
irq_handler = at86rf230_isr;
} else {
@@ -1202,75 +1163,65 @@ static int at86rf230_probe(struct spi_device *spi)
if (rc)
goto err_hw_init;
- rc = request_irq(spi->irq, irq_handler,
- IRQF_SHARED | pdata->irq_type,
- dev_name(&spi->dev), lp);
+ /* Read irq status register to reset irq line */
+ rc = at86rf230_read_subreg(lp, RG_IRQ_STATUS, 0xff, 0, &status);
if (rc)
goto err_hw_init;
- /* Read irq status register to reset irq line */
- rc = at86rf230_read_subreg(lp, RG_IRQ_STATUS, 0xff, 0, &status);
+ rc = devm_request_irq(&spi->dev, spi->irq, irq_handler, IRQF_SHARED,
+ dev_name(&spi->dev), lp);
if (rc)
- goto err_irq;
+ goto err_hw_init;
rc = ieee802154_register_device(lp->dev);
if (rc)
- goto err_irq;
+ goto err_hw_init;
return rc;
-err_irq:
- free_irq(spi->irq, lp);
err_hw_init:
flush_work(&lp->irqwork);
- spi_set_drvdata(spi, NULL);
mutex_destroy(&lp->bmux);
ieee802154_free_device(lp->dev);
-err_gpio_dir:
- if (gpio_is_valid(pdata->slp_tr))
- gpio_free(pdata->slp_tr);
-err_slp_tr:
- if (gpio_is_valid(pdata->rstn))
- gpio_free(pdata->rstn);
return rc;
}
static int at86rf230_remove(struct spi_device *spi)
{
struct at86rf230_local *lp = spi_get_drvdata(spi);
- struct at86rf230_platform_data *pdata = spi->dev.platform_data;
/* mask all at86rf230 irq's */
at86rf230_write_subreg(lp, SR_IRQ_MASK, 0);
ieee802154_unregister_device(lp->dev);
-
- free_irq(spi->irq, lp);
flush_work(&lp->irqwork);
-
- if (gpio_is_valid(pdata->slp_tr))
- gpio_free(pdata->slp_tr);
- if (gpio_is_valid(pdata->rstn))
- gpio_free(pdata->rstn);
-
mutex_destroy(&lp->bmux);
ieee802154_free_device(lp->dev);
-
dev_dbg(&spi->dev, "unregistered at86rf230\n");
+
return 0;
}
-#if IS_ENABLED(CONFIG_OF)
-static struct of_device_id at86rf230_of_match[] = {
+static const struct of_device_id at86rf230_of_match[] = {
{ .compatible = "atmel,at86rf230", },
{ .compatible = "atmel,at86rf231", },
{ .compatible = "atmel,at86rf233", },
{ .compatible = "atmel,at86rf212", },
{ },
};
-#endif
+MODULE_DEVICE_TABLE(of, at86rf230_of_match);
+
+static const struct spi_device_id at86rf230_device_id[] = {
+ { .name = "at86rf230", },
+ { .name = "at86rf231", },
+ { .name = "at86rf233", },
+ { .name = "at86rf212", },
+ { },
+};
+MODULE_DEVICE_TABLE(spi, at86rf230_device_id);
static struct spi_driver at86rf230_driver = {
+ .id_table = at86rf230_device_id,
.driver = {
.of_match_table = of_match_ptr(at86rf230_of_match),
.name = "at86rf230",
diff --git a/drivers/net/ieee802154/fakelb.c b/drivers/net/ieee802154/fakelb.c
index b8d22173925d..27d83207d24c 100644
--- a/drivers/net/ieee802154/fakelb.c
+++ b/drivers/net/ieee802154/fakelb.c
@@ -26,6 +26,7 @@
#include <linux/timer.h>
#include <linux/platform_device.h>
#include <linux/netdevice.h>
+#include <linux/device.h>
#include <linux/spinlock.h>
#include <net/mac802154.h>
#include <net/wpan-phy.h>
@@ -228,7 +229,8 @@ static int fakelb_probe(struct platform_device *pdev)
int err = -ENOMEM;
int i;
- priv = kzalloc(sizeof(struct fakelb_priv), GFP_KERNEL);
+ priv = devm_kzalloc(&pdev->dev, sizeof(struct fakelb_priv),
+ GFP_KERNEL);
if (!priv)
goto err_alloc;
@@ -248,7 +250,6 @@ static int fakelb_probe(struct platform_device *pdev)
err_slave:
list_for_each_entry(dp, &priv->list, list)
fakelb_del(dp);
- kfree(priv);
err_alloc:
return err;
}
@@ -260,7 +261,6 @@ static int fakelb_remove(struct platform_device *pdev)
list_for_each_entry_safe(dp, temp, &priv->list, list)
fakelb_del(dp);
- kfree(priv);
return 0;
}
diff --git a/drivers/net/ieee802154/mrf24j40.c b/drivers/net/ieee802154/mrf24j40.c
index 78a6552ed707..4048062011ba 100644
--- a/drivers/net/ieee802154/mrf24j40.c
+++ b/drivers/net/ieee802154/mrf24j40.c
@@ -618,12 +618,12 @@ static int mrf24j40_probe(struct spi_device *spi)
printk(KERN_INFO "mrf24j40: probe(). IRQ: %d\n", spi->irq);
- devrec = kzalloc(sizeof(struct mrf24j40), GFP_KERNEL);
+ devrec = devm_kzalloc(&spi->dev, sizeof(struct mrf24j40), GFP_KERNEL);
if (!devrec)
- goto err_devrec;
- devrec->buf = kzalloc(3, GFP_KERNEL);
+ goto err_ret;
+ devrec->buf = devm_kzalloc(&spi->dev, 3, GFP_KERNEL);
if (!devrec->buf)
- goto err_buf;
+ goto err_ret;
spi->mode = SPI_MODE_0; /* TODO: Is this appropriate for right here? */
if (spi->max_speed_hz > MAX_SPI_SPEED_HZ)
@@ -638,7 +638,7 @@ static int mrf24j40_probe(struct spi_device *spi)
devrec->dev = ieee802154_alloc_device(0, &mrf24j40_ops);
if (!devrec->dev)
- goto err_alloc_dev;
+ goto err_ret;
devrec->dev->priv = devrec;
devrec->dev->parent = &devrec->spi->dev;
@@ -676,12 +676,13 @@ static int mrf24j40_probe(struct spi_device *spi)
val &= ~0x3; /* Clear RX mode (normal) */
write_short_reg(devrec, REG_RXMCR, val);
- ret = request_threaded_irq(spi->irq,
- NULL,
- mrf24j40_isr,
- IRQF_TRIGGER_LOW|IRQF_ONESHOT,
- dev_name(&spi->dev),
- devrec);
+ ret = devm_request_threaded_irq(&spi->dev,
+ spi->irq,
+ NULL,
+ mrf24j40_isr,
+ IRQF_TRIGGER_LOW|IRQF_ONESHOT,
+ dev_name(&spi->dev),
+ devrec);
if (ret) {
dev_err(printdev(devrec), "Unable to get IRQ");
@@ -695,11 +696,7 @@ err_read_reg:
ieee802154_unregister_device(devrec->dev);
err_register_device:
ieee802154_free_device(devrec->dev);
-err_alloc_dev:
- kfree(devrec->buf);
-err_buf:
- kfree(devrec);
-err_devrec:
+err_ret:
return ret;
}
@@ -709,15 +706,11 @@ static int mrf24j40_remove(struct spi_device *spi)
dev_dbg(printdev(devrec), "remove\n");
- free_irq(spi->irq, devrec);
ieee802154_unregister_device(devrec->dev);
ieee802154_free_device(devrec->dev);
/* TODO: Will ieee802154_free_device() wait until ->xmit() is
* complete? */
- /* Clean up the SPI stuff. */
- kfree(devrec->buf);
- kfree(devrec);
return 0;
}
diff --git a/drivers/net/irda/Kconfig b/drivers/net/irda/Kconfig
index 3da44d5d9149..8d101d63abca 100644
--- a/drivers/net/irda/Kconfig
+++ b/drivers/net/irda/Kconfig
@@ -396,7 +396,8 @@ config MCS_FIR
config SH_IRDA
tristate "SuperH IrDA driver"
- depends on IRDA && ARCH_SHMOBILE
+ depends on IRDA
+ depends on ARCH_SHMOBILE || COMPILE_TEST
help
Say Y here if your want to enable SuperH IrDA devices.
diff --git a/drivers/net/irda/via-ircc.c b/drivers/net/irda/via-ircc.c
index 2900af091c2d..998bb89ede71 100644
--- a/drivers/net/irda/via-ircc.c
+++ b/drivers/net/irda/via-ircc.c
@@ -510,10 +510,8 @@ static void via_hw_init(struct via_ircc_cb *self)
*/
static int via_ircc_read_dongle_id(int iobase)
{
- int dongle_id = 9; /* Default to IBM */
-
IRDA_ERROR("via-ircc: dongle probing not supported, please specify dongle_id module parameter.\n");
- return dongle_id;
+ return 9; /* Default to IBM */
}
/*
@@ -926,7 +924,6 @@ static int via_ircc_dma_xmit(struct via_ircc_cb *self, u16 iobase)
static int via_ircc_dma_xmit_complete(struct via_ircc_cb *self)
{
int iobase;
- int ret = TRUE;
u8 Tx_status;
IRDA_DEBUG(3, "%s()\n", __func__);
@@ -983,7 +980,7 @@ F01_E*/
// Tell the network layer, that we can accept more frames
netif_wake_queue(self->netdev);
//F01 }
- return ret;
+ return TRUE;
}
/*
diff --git a/drivers/net/irda/w83977af_ir.c b/drivers/net/irda/w83977af_ir.c
index e641bb240362..11dbdf36d9c1 100644
--- a/drivers/net/irda/w83977af_ir.c
+++ b/drivers/net/irda/w83977af_ir.c
@@ -62,10 +62,6 @@
#include "w83977af.h"
#include "w83977af_ir.h"
-#ifdef CONFIG_ARCH_NETWINDER /* Adjust to NetWinder differences */
-#undef CONFIG_NETWINDER_TX_DMA_PROBLEMS /* Not needed */
-#define CONFIG_NETWINDER_RX_DMA_PROBLEMS /* Must have this one! */
-#endif
#define CONFIG_USE_W977_PNP /* Currently needed */
#define PIO_MAX_SPEED 115200
@@ -332,7 +328,7 @@ static int w83977af_probe(int iobase, int irq, int dma)
w977_write_reg(0x74, dma+1, efbase[i]);
#else
w977_write_reg(0x74, dma, efbase[i]);
-#endif /*CONFIG_ARCH_NETWINDER */
+#endif /* CONFIG_ARCH_NETWINDER */
w977_write_reg(0x75, 0x04, efbase[i]); /* Disable Tx DMA */
/* Set append hardware CRC, enable IR bank selection */
@@ -563,10 +559,6 @@ static netdev_tx_t w83977af_hard_xmit(struct sk_buff *skb,
static void w83977af_dma_write(struct w83977af_ir *self, int iobase)
{
__u8 set;
-#ifdef CONFIG_NETWINDER_TX_DMA_PROBLEMS
- unsigned long flags;
- __u8 hcr;
-#endif
IRDA_DEBUG(4, "%s(), len=%d\n", __func__ , self->tx_buff.len);
/* Save current set */
@@ -579,30 +571,13 @@ static void w83977af_dma_write(struct w83977af_ir *self, int iobase)
/* Choose transmit DMA channel */
switch_bank(iobase, SET2);
outb(ADCR1_D_CHSW|/*ADCR1_DMA_F|*/ADCR1_ADV_SL, iobase+ADCR1);
-#ifdef CONFIG_NETWINDER_TX_DMA_PROBLEMS
- spin_lock_irqsave(&self->lock, flags);
-
- disable_dma(self->io.dma);
- clear_dma_ff(self->io.dma);
- set_dma_mode(self->io.dma, DMA_MODE_READ);
- set_dma_addr(self->io.dma, self->tx_buff_dma);
- set_dma_count(self->io.dma, self->tx_buff.len);
-#else
irda_setup_dma(self->io.dma, self->tx_buff_dma, self->tx_buff.len,
DMA_MODE_WRITE);
-#endif
self->io.direction = IO_XMIT;
/* Enable DMA */
switch_bank(iobase, SET0);
-#ifdef CONFIG_NETWINDER_TX_DMA_PROBLEMS
- hcr = inb(iobase+HCR);
- outb(hcr | HCR_EN_DMA, iobase+HCR);
- enable_dma(self->io.dma);
- spin_unlock_irqrestore(&self->lock, flags);
-#else
outb(inb(iobase+HCR) | HCR_EN_DMA | HCR_TX_WT, iobase+HCR);
-#endif
/* Restore set register */
outb(set, iobase+SSR);
@@ -711,7 +686,7 @@ static int w83977af_dma_receive(struct w83977af_ir *self)
{
int iobase;
__u8 set;
-#ifdef CONFIG_NETWINDER_RX_DMA_PROBLEMS
+#ifdef CONFIG_ARCH_NETWINDER
unsigned long flags;
__u8 hcr;
#endif
@@ -736,7 +711,7 @@ static int w83977af_dma_receive(struct w83977af_ir *self)
self->io.direction = IO_RECV;
self->rx_buff.data = self->rx_buff.head;
-#ifdef CONFIG_NETWINDER_RX_DMA_PROBLEMS
+#ifdef CONFIG_ARCH_NETWINDER
spin_lock_irqsave(&self->lock, flags);
disable_dma(self->io.dma);
@@ -759,7 +734,7 @@ static int w83977af_dma_receive(struct w83977af_ir *self)
/* Enable DMA */
switch_bank(iobase, SET0);
-#ifdef CONFIG_NETWINDER_RX_DMA_PROBLEMS
+#ifdef CONFIG_ARCH_NETWINDER
hcr = inb(iobase+HCR);
outb(hcr | HCR_EN_DMA, iobase+HCR);
enable_dma(self->io.dma);
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
index d53e299ae1d9..958df383068a 100644
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -30,8 +30,10 @@
#include <linux/if_link.h>
#include <linux/if_macvlan.h>
#include <linux/hash.h>
+#include <linux/workqueue.h>
#include <net/rtnetlink.h>
#include <net/xfrm.h>
+#include <linux/netpoll.h>
#define MACVLAN_HASH_SIZE (1 << BITS_PER_BYTE)
@@ -40,10 +42,19 @@ struct macvlan_port {
struct hlist_head vlan_hash[MACVLAN_HASH_SIZE];
struct list_head vlans;
struct rcu_head rcu;
+ struct sk_buff_head bc_queue;
+ struct work_struct bc_work;
bool passthru;
- int count;
};
+#define MACVLAN_PORT_IS_EMPTY(port) list_empty(&port->vlans)
+
+struct macvlan_skb_cb {
+ const struct macvlan_dev *src;
+};
+
+#define MACVLAN_SKB_CB(__skb) ((struct macvlan_skb_cb *)&((__skb)->cb[0]))
+
static void macvlan_port_destroy(struct net_device *dev);
static struct macvlan_port *macvlan_port_get_rcu(const struct net_device *dev)
@@ -120,7 +131,7 @@ static int macvlan_broadcast_one(struct sk_buff *skb,
struct net_device *dev = vlan->dev;
if (local)
- return dev_forward_skb(dev, skb);
+ return __dev_forward_skb(dev, skb);
skb->dev = dev;
if (ether_addr_equal_64bits(eth->h_dest, dev->broadcast))
@@ -128,7 +139,7 @@ static int macvlan_broadcast_one(struct sk_buff *skb,
else
skb->pkt_type = PACKET_MULTICAST;
- return netif_rx(skb);
+ return 0;
}
static u32 macvlan_hash_mix(const struct macvlan_dev *vlan)
@@ -175,32 +186,32 @@ static void macvlan_broadcast(struct sk_buff *skb,
if (likely(nskb))
err = macvlan_broadcast_one(
nskb, vlan, eth,
- mode == MACVLAN_MODE_BRIDGE);
+ mode == MACVLAN_MODE_BRIDGE) ?:
+ netif_rx_ni(nskb);
macvlan_count_rx(vlan, skb->len + ETH_HLEN,
err == NET_RX_SUCCESS, 1);
}
}
}
-/* called under rcu_read_lock() from netif_receive_skb */
-static rx_handler_result_t macvlan_handle_frame(struct sk_buff **pskb)
+static void macvlan_process_broadcast(struct work_struct *w)
{
- struct macvlan_port *port;
- struct sk_buff *skb = *pskb;
- const struct ethhdr *eth = eth_hdr(skb);
- const struct macvlan_dev *vlan;
- const struct macvlan_dev *src;
- struct net_device *dev;
- unsigned int len = 0;
- int ret = NET_RX_DROP;
+ struct macvlan_port *port = container_of(w, struct macvlan_port,
+ bc_work);
+ struct sk_buff *skb;
+ struct sk_buff_head list;
+
+ skb_queue_head_init(&list);
+
+ spin_lock_bh(&port->bc_queue.lock);
+ skb_queue_splice_tail_init(&port->bc_queue, &list);
+ spin_unlock_bh(&port->bc_queue.lock);
+
+ while ((skb = __skb_dequeue(&list))) {
+ const struct macvlan_dev *src = MACVLAN_SKB_CB(skb)->src;
+
+ rcu_read_lock();
- port = macvlan_port_get_rcu(skb->dev);
- if (is_multicast_ether_addr(eth->h_dest)) {
- skb = ip_check_defrag(skb, IP_DEFRAG_MACVLAN);
- if (!skb)
- return RX_HANDLER_CONSUMED;
- eth = eth_hdr(skb);
- src = macvlan_hash_lookup(port, eth->h_source);
if (!src)
/* frame comes from an external address */
macvlan_broadcast(skb, port, NULL,
@@ -213,20 +224,80 @@ static rx_handler_result_t macvlan_handle_frame(struct sk_buff **pskb)
macvlan_broadcast(skb, port, src->dev,
MACVLAN_MODE_VEPA |
MACVLAN_MODE_BRIDGE);
- else if (src->mode == MACVLAN_MODE_BRIDGE)
+ else
/*
* flood only to VEPA ports, bridge ports
* already saw the frame on the way out.
*/
macvlan_broadcast(skb, port, src->dev,
MACVLAN_MODE_VEPA);
- else {
+
+ rcu_read_unlock();
+
+ kfree_skb(skb);
+ }
+}
+
+static void macvlan_broadcast_enqueue(struct macvlan_port *port,
+ struct sk_buff *skb)
+{
+ struct sk_buff *nskb;
+ int err = -ENOMEM;
+
+ nskb = skb_clone(skb, GFP_ATOMIC);
+ if (!nskb)
+ goto err;
+
+ spin_lock(&port->bc_queue.lock);
+ if (skb_queue_len(&port->bc_queue) < skb->dev->tx_queue_len) {
+ __skb_queue_tail(&port->bc_queue, nskb);
+ err = 0;
+ }
+ spin_unlock(&port->bc_queue.lock);
+
+ if (err)
+ goto free_nskb;
+
+ schedule_work(&port->bc_work);
+ return;
+
+free_nskb:
+ kfree_skb(nskb);
+err:
+ atomic_long_inc(&skb->dev->rx_dropped);
+}
+
+/* called under rcu_read_lock() from netif_receive_skb */
+static rx_handler_result_t macvlan_handle_frame(struct sk_buff **pskb)
+{
+ struct macvlan_port *port;
+ struct sk_buff *skb = *pskb;
+ const struct ethhdr *eth = eth_hdr(skb);
+ const struct macvlan_dev *vlan;
+ const struct macvlan_dev *src;
+ struct net_device *dev;
+ unsigned int len = 0;
+ int ret = NET_RX_DROP;
+
+ port = macvlan_port_get_rcu(skb->dev);
+ if (is_multicast_ether_addr(eth->h_dest)) {
+ skb = ip_check_defrag(skb, IP_DEFRAG_MACVLAN);
+ if (!skb)
+ return RX_HANDLER_CONSUMED;
+ eth = eth_hdr(skb);
+ src = macvlan_hash_lookup(port, eth->h_source);
+ if (src && src->mode != MACVLAN_MODE_VEPA &&
+ src->mode != MACVLAN_MODE_BRIDGE) {
/* forward to original port. */
vlan = src;
- ret = macvlan_broadcast_one(skb, vlan, eth, 0);
+ ret = macvlan_broadcast_one(skb, vlan, eth, 0) ?:
+ netif_rx(skb);
goto out;
}
+ MACVLAN_SKB_CB(skb)->src = src;
+ macvlan_broadcast_enqueue(port, skb);
+
return RX_HANDLER_PASS;
}
@@ -287,12 +358,26 @@ xmit_world:
return dev_queue_xmit(skb);
}
+static inline netdev_tx_t macvlan_netpoll_send_skb(struct macvlan_dev *vlan, struct sk_buff *skb)
+{
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ if (vlan->netpoll)
+ netpoll_send_skb(vlan->netpoll, skb);
+#else
+ BUG();
+#endif
+ return NETDEV_TX_OK;
+}
+
static netdev_tx_t macvlan_start_xmit(struct sk_buff *skb,
struct net_device *dev)
{
unsigned int len = skb->len;
int ret;
- const struct macvlan_dev *vlan = netdev_priv(dev);
+ struct macvlan_dev *vlan = netdev_priv(dev);
+
+ if (unlikely(netpoll_tx_running(dev)))
+ return macvlan_netpoll_send_skb(vlan, skb);
if (vlan->fwd_priv) {
skb->dev = vlan->lowerdev;
@@ -424,35 +509,49 @@ hash_del:
return 0;
}
-static int macvlan_set_mac_address(struct net_device *dev, void *p)
+static int macvlan_sync_address(struct net_device *dev, unsigned char *addr)
{
struct macvlan_dev *vlan = netdev_priv(dev);
struct net_device *lowerdev = vlan->lowerdev;
- struct sockaddr *addr = p;
int err;
- if (!is_valid_ether_addr(addr->sa_data))
- return -EADDRNOTAVAIL;
-
if (!(dev->flags & IFF_UP)) {
/* Just copy in the new address */
- memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN);
+ ether_addr_copy(dev->dev_addr, addr);
} else {
/* Rehash and update the device filters */
- if (macvlan_addr_busy(vlan->port, addr->sa_data))
+ if (macvlan_addr_busy(vlan->port, addr))
return -EBUSY;
- err = dev_uc_add(lowerdev, addr->sa_data);
- if (err)
- return err;
+ if (!vlan->port->passthru) {
+ err = dev_uc_add(lowerdev, addr);
+ if (err)
+ return err;
- dev_uc_del(lowerdev, dev->dev_addr);
+ dev_uc_del(lowerdev, dev->dev_addr);
+ }
- macvlan_hash_change_addr(vlan, addr->sa_data);
+ macvlan_hash_change_addr(vlan, addr);
}
return 0;
}
+static int macvlan_set_mac_address(struct net_device *dev, void *p)
+{
+ struct macvlan_dev *vlan = netdev_priv(dev);
+ struct sockaddr *addr = p;
+
+ if (!is_valid_ether_addr(addr->sa_data))
+ return -EADDRNOTAVAIL;
+
+ if (vlan->mode == MACVLAN_MODE_PASSTHRU) {
+ dev_set_mac_address(vlan->lowerdev, addr);
+ return 0;
+ }
+
+ return macvlan_sync_address(dev, addr->sa_data);
+}
+
static void macvlan_change_rx_flags(struct net_device *dev, int change)
{
struct macvlan_dev *vlan = netdev_priv(dev);
@@ -567,8 +666,7 @@ static void macvlan_uninit(struct net_device *dev)
free_percpu(vlan->pcpu_stats);
- port->count -= 1;
- if (!port->count)
+ if (MACVLAN_PORT_IS_EMPTY(port))
macvlan_port_destroy(port->dev);
}
@@ -705,6 +803,50 @@ static netdev_features_t macvlan_fix_features(struct net_device *dev,
return features;
}
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void macvlan_dev_poll_controller(struct net_device *dev)
+{
+ return;
+}
+
+static int macvlan_dev_netpoll_setup(struct net_device *dev, struct netpoll_info *npinfo)
+{
+ struct macvlan_dev *vlan = netdev_priv(dev);
+ struct net_device *real_dev = vlan->lowerdev;
+ struct netpoll *netpoll;
+ int err = 0;
+
+ netpoll = kzalloc(sizeof(*netpoll), GFP_KERNEL);
+ err = -ENOMEM;
+ if (!netpoll)
+ goto out;
+
+ err = __netpoll_setup(netpoll, real_dev);
+ if (err) {
+ kfree(netpoll);
+ goto out;
+ }
+
+ vlan->netpoll = netpoll;
+
+out:
+ return err;
+}
+
+static void macvlan_dev_netpoll_cleanup(struct net_device *dev)
+{
+ struct macvlan_dev *vlan = netdev_priv(dev);
+ struct netpoll *netpoll = vlan->netpoll;
+
+ if (!netpoll)
+ return;
+
+ vlan->netpoll = NULL;
+
+ __netpoll_free_async(netpoll);
+}
+#endif /* CONFIG_NET_POLL_CONTROLLER */
+
static const struct ethtool_ops macvlan_ethtool_ops = {
.get_link = ethtool_op_get_link,
.get_settings = macvlan_ethtool_get_settings,
@@ -730,6 +872,11 @@ static const struct net_device_ops macvlan_netdev_ops = {
.ndo_fdb_del = macvlan_fdb_del,
.ndo_fdb_dump = ndo_dflt_fdb_dump,
.ndo_get_lock_subclass = macvlan_get_nest_level,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = macvlan_dev_poll_controller,
+ .ndo_netpoll_setup = macvlan_dev_netpoll_setup,
+ .ndo_netpoll_cleanup = macvlan_dev_netpoll_cleanup,
+#endif
};
void macvlan_common_setup(struct net_device *dev)
@@ -770,6 +917,9 @@ static int macvlan_port_create(struct net_device *dev)
for (i = 0; i < MACVLAN_HASH_SIZE; i++)
INIT_HLIST_HEAD(&port->vlan_hash[i]);
+ skb_queue_head_init(&port->bc_queue);
+ INIT_WORK(&port->bc_work, macvlan_process_broadcast);
+
err = netdev_rx_handler_register(dev, macvlan_handle_frame, port);
if (err)
kfree(port);
@@ -782,6 +932,7 @@ static void macvlan_port_destroy(struct net_device *dev)
{
struct macvlan_port *port = macvlan_port_get_rtnl(dev);
+ cancel_work_sync(&port->bc_work);
dev->priv_flags &= ~IFF_MACVLAN_PORT;
netdev_rx_handler_unregister(dev);
kfree_rcu(port, rcu);
@@ -868,13 +1019,12 @@ int macvlan_common_newlink(struct net *src_net, struct net_device *dev,
vlan->flags = nla_get_u16(data[IFLA_MACVLAN_FLAGS]);
if (vlan->mode == MACVLAN_MODE_PASSTHRU) {
- if (port->count)
+ if (!MACVLAN_PORT_IS_EMPTY(port))
return -EINVAL;
port->passthru = true;
eth_hw_addr_inherit(dev, lowerdev);
}
- port->count += 1;
err = register_netdevice(dev);
if (err < 0)
goto destroy_port;
@@ -892,8 +1042,7 @@ int macvlan_common_newlink(struct net *src_net, struct net_device *dev,
unregister_netdev:
unregister_netdevice(dev);
destroy_port:
- port->count -= 1;
- if (!port->count)
+ if (MACVLAN_PORT_IS_EMPTY(port))
macvlan_port_destroy(lowerdev);
return err;
@@ -1028,6 +1177,25 @@ static int macvlan_device_event(struct notifier_block *unused,
netdev_update_features(vlan->dev);
}
break;
+ case NETDEV_CHANGEMTU:
+ list_for_each_entry(vlan, &port->vlans, list) {
+ if (vlan->dev->mtu <= dev->mtu)
+ continue;
+ dev_set_mtu(vlan->dev, dev->mtu);
+ }
+ break;
+ case NETDEV_CHANGEADDR:
+ if (!port->passthru)
+ return NOTIFY_DONE;
+
+ vlan = list_first_entry_or_null(&port->vlans,
+ struct macvlan_dev,
+ list);
+
+ if (macvlan_sync_address(vlan->dev, dev->dev_addr))
+ return NOTIFY_BAD;
+
+ break;
case NETDEV_UNREGISTER:
/* twiddle thumbs on netns device moves */
if (dev->reg_state != NETREG_UNREGISTERING)
@@ -1036,11 +1204,17 @@ static int macvlan_device_event(struct notifier_block *unused,
list_for_each_entry_safe(vlan, next, &port->vlans, list)
vlan->dev->rtnl_link_ops->dellink(vlan->dev, &list_kill);
unregister_netdevice_many(&list_kill);
- list_del(&list_kill);
break;
case NETDEV_PRE_TYPE_CHANGE:
/* Forbid underlaying device to change its type. */
return NOTIFY_BAD;
+
+ case NETDEV_NOTIFY_PEERS:
+ case NETDEV_BONDING_FAILOVER:
+ case NETDEV_RESEND_IGMP:
+ /* Propagate to all vlans */
+ list_for_each_entry(vlan, &port->vlans, list)
+ call_netdevice_notifiers(event, vlan->dev);
}
return NOTIFY_DONE;
}
diff --git a/drivers/net/ntb_netdev.c b/drivers/net/ntb_netdev.c
index 63aa9d9e34c5..5a7e6397440a 100644
--- a/drivers/net/ntb_netdev.c
+++ b/drivers/net/ntb_netdev.c
@@ -298,7 +298,6 @@ static int ntb_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
{
cmd->supported = SUPPORTED_Backplane;
cmd->advertising = ADVERTISED_Backplane;
- cmd->speed = SPEED_UNKNOWN;
ethtool_cmd_speed_set(cmd, SPEED_UNKNOWN);
cmd->duplex = DUPLEX_FULL;
cmd->port = PORT_OTHER;
@@ -348,7 +347,7 @@ static int ntb_netdev_probe(struct pci_dev *pdev)
memcpy(ndev->dev_addr, ndev->perm_addr, ndev->addr_len);
ndev->netdev_ops = &ntb_netdev_ops;
- SET_ETHTOOL_OPS(ndev, &ntb_ethtool_ops);
+ ndev->ethtool_ops = &ntb_ethtool_ops;
dev->qp = ntb_transport_create_queue(ndev, pdev, &ntb_netdev_handlers);
if (!dev->qp) {
diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig
index 6a17f92153b3..65de0cab8d07 100644
--- a/drivers/net/phy/Kconfig
+++ b/drivers/net/phy/Kconfig
@@ -24,6 +24,12 @@ config AMD_PHY
---help---
Currently supports the am79c874
+config AMD_XGBE_PHY
+ tristate "Driver for the AMD 10GbE (amd-xgbe) PHYs"
+ depends on OF
+ ---help---
+ Currently supports the AMD 10GbE PHY
+
config MARVELL_PHY
tristate "Drivers for Marvell PHYs"
---help---
diff --git a/drivers/net/phy/Makefile b/drivers/net/phy/Makefile
index 07d24024863e..7dc3d5b304cf 100644
--- a/drivers/net/phy/Makefile
+++ b/drivers/net/phy/Makefile
@@ -33,3 +33,4 @@ obj-$(CONFIG_MDIO_BUS_MUX_GPIO) += mdio-mux-gpio.o
obj-$(CONFIG_MDIO_BUS_MUX_MMIOREG) += mdio-mux-mmioreg.o
obj-$(CONFIG_MDIO_SUN4I) += mdio-sun4i.o
obj-$(CONFIG_MDIO_MOXART) += mdio-moxart.o
+obj-$(CONFIG_AMD_XGBE_PHY) += amd-xgbe-phy.o
diff --git a/drivers/net/phy/amd-xgbe-phy.c b/drivers/net/phy/amd-xgbe-phy.c
new file mode 100644
index 000000000000..b57c22442867
--- /dev/null
+++ b/drivers/net/phy/amd-xgbe-phy.c
@@ -0,0 +1,1357 @@
+/*
+ * AMD 10Gb Ethernet PHY driver
+ *
+ * This file is available to you under your choice of the following two
+ * licenses:
+ *
+ * License 1: GPLv2
+ *
+ * Copyright (c) 2014 Advanced Micro Devices, Inc.
+ *
+ * This file is free software; you may copy, redistribute and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 2 of the License, or (at
+ * your option) any later version.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ *
+ * License 2: Modified BSD
+ *
+ * Copyright (c) 2014 Advanced Micro Devices, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Advanced Micro Devices, Inc. nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/kernel.h>
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/unistd.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/mii.h>
+#include <linux/ethtool.h>
+#include <linux/phy.h>
+#include <linux/mdio.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/of_device.h>
+#include <linux/uaccess.h>
+#include <asm/irq.h>
+
+
+MODULE_AUTHOR("Tom Lendacky <thomas.lendacky@amd.com>");
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_VERSION("1.0.0-a");
+MODULE_DESCRIPTION("AMD 10GbE (amd-xgbe) PHY driver");
+
+#define XGBE_PHY_ID 0x000162d0
+#define XGBE_PHY_MASK 0xfffffff0
+
+#define XGBE_AN_INT_CMPLT 0x01
+#define XGBE_AN_INC_LINK 0x02
+#define XGBE_AN_PG_RCV 0x04
+
+#define XNP_MCF_NULL_MESSAGE 0x001
+#define XNP_ACK_PROCESSED (1 << 12)
+#define XNP_MP_FORMATTED (1 << 13)
+#define XNP_NP_EXCHANGE (1 << 15)
+
+#ifndef MDIO_PMA_10GBR_PMD_CTRL
+#define MDIO_PMA_10GBR_PMD_CTRL 0x0096
+#endif
+#ifndef MDIO_PMA_10GBR_FEC_CTRL
+#define MDIO_PMA_10GBR_FEC_CTRL 0x00ab
+#endif
+#ifndef MDIO_AN_XNP
+#define MDIO_AN_XNP 0x0016
+#endif
+
+#ifndef MDIO_AN_INTMASK
+#define MDIO_AN_INTMASK 0x8001
+#endif
+#ifndef MDIO_AN_INT
+#define MDIO_AN_INT 0x8002
+#endif
+
+#ifndef MDIO_CTRL1_SPEED1G
+#define MDIO_CTRL1_SPEED1G (MDIO_CTRL1_SPEED10G & ~BMCR_SPEED100)
+#endif
+
+/* SerDes integration register offsets */
+#define SIR0_STATUS 0x0040
+#define SIR1_SPEED 0x0000
+
+/* SerDes integration register entry bit positions and sizes */
+#define SIR0_STATUS_RX_READY_INDEX 0
+#define SIR0_STATUS_RX_READY_WIDTH 1
+#define SIR0_STATUS_TX_READY_INDEX 8
+#define SIR0_STATUS_TX_READY_WIDTH 1
+#define SIR1_SPEED_DATARATE_INDEX 4
+#define SIR1_SPEED_DATARATE_WIDTH 2
+#define SIR1_SPEED_PI_SPD_SEL_INDEX 12
+#define SIR1_SPEED_PI_SPD_SEL_WIDTH 4
+#define SIR1_SPEED_PLLSEL_INDEX 3
+#define SIR1_SPEED_PLLSEL_WIDTH 1
+#define SIR1_SPEED_RATECHANGE_INDEX 6
+#define SIR1_SPEED_RATECHANGE_WIDTH 1
+#define SIR1_SPEED_TXAMP_INDEX 8
+#define SIR1_SPEED_TXAMP_WIDTH 4
+#define SIR1_SPEED_WORDMODE_INDEX 0
+#define SIR1_SPEED_WORDMODE_WIDTH 3
+
+#define SPEED_10000_CDR 0x7
+#define SPEED_10000_PLL 0x1
+#define SPEED_10000_RATE 0x0
+#define SPEED_10000_TXAMP 0xa
+#define SPEED_10000_WORD 0x7
+
+#define SPEED_2500_CDR 0x2
+#define SPEED_2500_PLL 0x0
+#define SPEED_2500_RATE 0x2
+#define SPEED_2500_TXAMP 0xf
+#define SPEED_2500_WORD 0x1
+
+#define SPEED_1000_CDR 0x2
+#define SPEED_1000_PLL 0x0
+#define SPEED_1000_RATE 0x3
+#define SPEED_1000_TXAMP 0xf
+#define SPEED_1000_WORD 0x1
+
+
+/* SerDes RxTx register offsets */
+#define RXTX_REG20 0x0050
+#define RXTX_REG114 0x01c8
+
+/* SerDes RxTx register entry bit positions and sizes */
+#define RXTX_REG20_BLWC_ENA_INDEX 2
+#define RXTX_REG20_BLWC_ENA_WIDTH 1
+#define RXTX_REG114_PQ_REG_INDEX 9
+#define RXTX_REG114_PQ_REG_WIDTH 7
+
+#define RXTX_10000_BLWC 0
+#define RXTX_10000_PQ 0x1e
+
+#define RXTX_2500_BLWC 1
+#define RXTX_2500_PQ 0xa
+
+#define RXTX_1000_BLWC 1
+#define RXTX_1000_PQ 0xa
+
+/* Bit setting and getting macros
+ * The get macro will extract the current bit field value from within
+ * the variable
+ *
+ * The set macro will clear the current bit field value within the
+ * variable and then set the bit field of the variable to the
+ * specified value
+ */
+#define GET_BITS(_var, _index, _width) \
+ (((_var) >> (_index)) & ((0x1 << (_width)) - 1))
+
+#define SET_BITS(_var, _index, _width, _val) \
+do { \
+ (_var) &= ~(((0x1 << (_width)) - 1) << (_index)); \
+ (_var) |= (((_val) & ((0x1 << (_width)) - 1)) << (_index)); \
+} while (0)
+
+/* Macros for reading or writing SerDes integration registers
+ * The ioread macros will get bit fields or full values using the
+ * register definitions formed using the input names
+ *
+ * The iowrite macros will set bit fields or full values using the
+ * register definitions formed using the input names
+ */
+#define XSIR0_IOREAD(_priv, _reg) \
+ ioread16((_priv)->sir0_regs + _reg)
+
+#define XSIR0_IOREAD_BITS(_priv, _reg, _field) \
+ GET_BITS(XSIR0_IOREAD((_priv), _reg), \
+ _reg##_##_field##_INDEX, \
+ _reg##_##_field##_WIDTH)
+
+#define XSIR0_IOWRITE(_priv, _reg, _val) \
+ iowrite16((_val), (_priv)->sir0_regs + _reg)
+
+#define XSIR0_IOWRITE_BITS(_priv, _reg, _field, _val) \
+do { \
+ u16 reg_val = XSIR0_IOREAD((_priv), _reg); \
+ SET_BITS(reg_val, \
+ _reg##_##_field##_INDEX, \
+ _reg##_##_field##_WIDTH, (_val)); \
+ XSIR0_IOWRITE((_priv), _reg, reg_val); \
+} while (0)
+
+#define XSIR1_IOREAD(_priv, _reg) \
+ ioread16((_priv)->sir1_regs + _reg)
+
+#define XSIR1_IOREAD_BITS(_priv, _reg, _field) \
+ GET_BITS(XSIR1_IOREAD((_priv), _reg), \
+ _reg##_##_field##_INDEX, \
+ _reg##_##_field##_WIDTH)
+
+#define XSIR1_IOWRITE(_priv, _reg, _val) \
+ iowrite16((_val), (_priv)->sir1_regs + _reg)
+
+#define XSIR1_IOWRITE_BITS(_priv, _reg, _field, _val) \
+do { \
+ u16 reg_val = XSIR1_IOREAD((_priv), _reg); \
+ SET_BITS(reg_val, \
+ _reg##_##_field##_INDEX, \
+ _reg##_##_field##_WIDTH, (_val)); \
+ XSIR1_IOWRITE((_priv), _reg, reg_val); \
+} while (0)
+
+
+/* Macros for reading or writing SerDes RxTx registers
+ * The ioread macros will get bit fields or full values using the
+ * register definitions formed using the input names
+ *
+ * The iowrite macros will set bit fields or full values using the
+ * register definitions formed using the input names
+ */
+#define XRXTX_IOREAD(_priv, _reg) \
+ ioread16((_priv)->rxtx_regs + _reg)
+
+#define XRXTX_IOREAD_BITS(_priv, _reg, _field) \
+ GET_BITS(XRXTX_IOREAD((_priv), _reg), \
+ _reg##_##_field##_INDEX, \
+ _reg##_##_field##_WIDTH)
+
+#define XRXTX_IOWRITE(_priv, _reg, _val) \
+ iowrite16((_val), (_priv)->rxtx_regs + _reg)
+
+#define XRXTX_IOWRITE_BITS(_priv, _reg, _field, _val) \
+do { \
+ u16 reg_val = XRXTX_IOREAD((_priv), _reg); \
+ SET_BITS(reg_val, \
+ _reg##_##_field##_INDEX, \
+ _reg##_##_field##_WIDTH, (_val)); \
+ XRXTX_IOWRITE((_priv), _reg, reg_val); \
+} while (0)
+
+
+enum amd_xgbe_phy_an {
+ AMD_XGBE_AN_READY = 0,
+ AMD_XGBE_AN_START,
+ AMD_XGBE_AN_EVENT,
+ AMD_XGBE_AN_PAGE_RECEIVED,
+ AMD_XGBE_AN_INCOMPAT_LINK,
+ AMD_XGBE_AN_COMPLETE,
+ AMD_XGBE_AN_NO_LINK,
+ AMD_XGBE_AN_EXIT,
+ AMD_XGBE_AN_ERROR,
+};
+
+enum amd_xgbe_phy_rx {
+ AMD_XGBE_RX_READY = 0,
+ AMD_XGBE_RX_BPA,
+ AMD_XGBE_RX_XNP,
+ AMD_XGBE_RX_COMPLETE,
+};
+
+enum amd_xgbe_phy_mode {
+ AMD_XGBE_MODE_KR,
+ AMD_XGBE_MODE_KX,
+};
+
+struct amd_xgbe_phy_priv {
+ struct platform_device *pdev;
+ struct device *dev;
+
+ struct phy_device *phydev;
+
+ /* SerDes related mmio resources */
+ struct resource *rxtx_res;
+ struct resource *sir0_res;
+ struct resource *sir1_res;
+
+ /* SerDes related mmio registers */
+ void __iomem *rxtx_regs; /* SerDes Rx/Tx CSRs */
+ void __iomem *sir0_regs; /* SerDes integration registers (1/2) */
+ void __iomem *sir1_regs; /* SerDes integration registers (2/2) */
+
+ /* Maintain link status for re-starting auto-negotiation */
+ unsigned int link;
+ enum amd_xgbe_phy_mode mode;
+
+ /* Auto-negotiation state machine support */
+ struct mutex an_mutex;
+ enum amd_xgbe_phy_an an_result;
+ enum amd_xgbe_phy_an an_state;
+ enum amd_xgbe_phy_rx kr_state;
+ enum amd_xgbe_phy_rx kx_state;
+ struct work_struct an_work;
+ struct workqueue_struct *an_workqueue;
+};
+
+static int amd_xgbe_an_enable_kr_training(struct phy_device *phydev)
+{
+ int ret;
+
+ ret = phy_read_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL);
+ if (ret < 0)
+ return ret;
+
+ ret |= 0x02;
+ phy_write_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL, ret);
+
+ return 0;
+}
+
+static int amd_xgbe_an_disable_kr_training(struct phy_device *phydev)
+{
+ int ret;
+
+ ret = phy_read_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL);
+ if (ret < 0)
+ return ret;
+
+ ret &= ~0x02;
+ phy_write_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL, ret);
+
+ return 0;
+}
+
+static int amd_xgbe_phy_pcs_power_cycle(struct phy_device *phydev)
+{
+ int ret;
+
+ ret = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL1);
+ if (ret < 0)
+ return ret;
+
+ ret |= MDIO_CTRL1_LPOWER;
+ phy_write_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL1, ret);
+
+ usleep_range(75, 100);
+
+ ret &= ~MDIO_CTRL1_LPOWER;
+ phy_write_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL1, ret);
+
+ return 0;
+}
+
+static void amd_xgbe_phy_serdes_start_ratechange(struct phy_device *phydev)
+{
+ struct amd_xgbe_phy_priv *priv = phydev->priv;
+
+ /* Assert Rx and Tx ratechange */
+ XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, RATECHANGE, 1);
+}
+
+static void amd_xgbe_phy_serdes_complete_ratechange(struct phy_device *phydev)
+{
+ struct amd_xgbe_phy_priv *priv = phydev->priv;
+
+ /* Release Rx and Tx ratechange */
+ XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, RATECHANGE, 0);
+
+ /* Wait for Rx and Tx ready */
+ while (!XSIR0_IOREAD_BITS(priv, SIR0_STATUS, RX_READY) &&
+ !XSIR0_IOREAD_BITS(priv, SIR0_STATUS, TX_READY))
+ usleep_range(10, 20);
+}
+
+static int amd_xgbe_phy_xgmii_mode(struct phy_device *phydev)
+{
+ struct amd_xgbe_phy_priv *priv = phydev->priv;
+ int ret;
+
+ /* Enable KR training */
+ ret = amd_xgbe_an_enable_kr_training(phydev);
+ if (ret < 0)
+ return ret;
+
+ /* Set PCS to KR/10G speed */
+ ret = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL2);
+ if (ret < 0)
+ return ret;
+
+ ret &= ~MDIO_PCS_CTRL2_TYPE;
+ ret |= MDIO_PCS_CTRL2_10GBR;
+ phy_write_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL2, ret);
+
+ ret = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL1);
+ if (ret < 0)
+ return ret;
+
+ ret &= ~MDIO_CTRL1_SPEEDSEL;
+ ret |= MDIO_CTRL1_SPEED10G;
+ phy_write_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL1, ret);
+
+ ret = amd_xgbe_phy_pcs_power_cycle(phydev);
+ if (ret < 0)
+ return ret;
+
+ /* Set SerDes to 10G speed */
+ amd_xgbe_phy_serdes_start_ratechange(phydev);
+
+ XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, DATARATE, SPEED_10000_RATE);
+ XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, WORDMODE, SPEED_10000_WORD);
+ XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, TXAMP, SPEED_10000_TXAMP);
+ XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, PLLSEL, SPEED_10000_PLL);
+ XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, PI_SPD_SEL, SPEED_10000_CDR);
+
+ XRXTX_IOWRITE_BITS(priv, RXTX_REG20, BLWC_ENA, RXTX_10000_BLWC);
+ XRXTX_IOWRITE_BITS(priv, RXTX_REG114, PQ_REG, RXTX_10000_PQ);
+
+ amd_xgbe_phy_serdes_complete_ratechange(phydev);
+
+ priv->mode = AMD_XGBE_MODE_KR;
+
+ return 0;
+}
+
+static int amd_xgbe_phy_gmii_2500_mode(struct phy_device *phydev)
+{
+ struct amd_xgbe_phy_priv *priv = phydev->priv;
+ int ret;
+
+ /* Disable KR training */
+ ret = amd_xgbe_an_disable_kr_training(phydev);
+ if (ret < 0)
+ return ret;
+
+ /* Set PCS to KX/1G speed */
+ ret = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL2);
+ if (ret < 0)
+ return ret;
+
+ ret &= ~MDIO_PCS_CTRL2_TYPE;
+ ret |= MDIO_PCS_CTRL2_10GBX;
+ phy_write_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL2, ret);
+
+ ret = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL1);
+ if (ret < 0)
+ return ret;
+
+ ret &= ~MDIO_CTRL1_SPEEDSEL;
+ ret |= MDIO_CTRL1_SPEED1G;
+ phy_write_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL1, ret);
+
+ ret = amd_xgbe_phy_pcs_power_cycle(phydev);
+ if (ret < 0)
+ return ret;
+
+ /* Set SerDes to 2.5G speed */
+ amd_xgbe_phy_serdes_start_ratechange(phydev);
+
+ XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, DATARATE, SPEED_2500_RATE);
+ XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, WORDMODE, SPEED_2500_WORD);
+ XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, TXAMP, SPEED_2500_TXAMP);
+ XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, PLLSEL, SPEED_2500_PLL);
+ XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, PI_SPD_SEL, SPEED_2500_CDR);
+
+ XRXTX_IOWRITE_BITS(priv, RXTX_REG20, BLWC_ENA, RXTX_2500_BLWC);
+ XRXTX_IOWRITE_BITS(priv, RXTX_REG114, PQ_REG, RXTX_2500_PQ);
+
+ amd_xgbe_phy_serdes_complete_ratechange(phydev);
+
+ priv->mode = AMD_XGBE_MODE_KX;
+
+ return 0;
+}
+
+static int amd_xgbe_phy_gmii_mode(struct phy_device *phydev)
+{
+ struct amd_xgbe_phy_priv *priv = phydev->priv;
+ int ret;
+
+ /* Disable KR training */
+ ret = amd_xgbe_an_disable_kr_training(phydev);
+ if (ret < 0)
+ return ret;
+
+ /* Set PCS to KX/1G speed */
+ ret = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL2);
+ if (ret < 0)
+ return ret;
+
+ ret &= ~MDIO_PCS_CTRL2_TYPE;
+ ret |= MDIO_PCS_CTRL2_10GBX;
+ phy_write_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL2, ret);
+
+ ret = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL1);
+ if (ret < 0)
+ return ret;
+
+ ret &= ~MDIO_CTRL1_SPEEDSEL;
+ ret |= MDIO_CTRL1_SPEED1G;
+ phy_write_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL1, ret);
+
+ ret = amd_xgbe_phy_pcs_power_cycle(phydev);
+ if (ret < 0)
+ return ret;
+
+ /* Set SerDes to 1G speed */
+ amd_xgbe_phy_serdes_start_ratechange(phydev);
+
+ XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, DATARATE, SPEED_1000_RATE);
+ XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, WORDMODE, SPEED_1000_WORD);
+ XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, TXAMP, SPEED_1000_TXAMP);
+ XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, PLLSEL, SPEED_1000_PLL);
+ XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, PI_SPD_SEL, SPEED_1000_CDR);
+
+ XRXTX_IOWRITE_BITS(priv, RXTX_REG20, BLWC_ENA, RXTX_1000_BLWC);
+ XRXTX_IOWRITE_BITS(priv, RXTX_REG114, PQ_REG, RXTX_1000_PQ);
+
+ amd_xgbe_phy_serdes_complete_ratechange(phydev);
+
+ priv->mode = AMD_XGBE_MODE_KX;
+
+ return 0;
+}
+
+static int amd_xgbe_phy_switch_mode(struct phy_device *phydev)
+{
+ struct amd_xgbe_phy_priv *priv = phydev->priv;
+ int ret;
+
+ /* If we are in KR switch to KX, and vice-versa */
+ if (priv->mode == AMD_XGBE_MODE_KR)
+ ret = amd_xgbe_phy_gmii_mode(phydev);
+ else
+ ret = amd_xgbe_phy_xgmii_mode(phydev);
+
+ return ret;
+}
+
+static enum amd_xgbe_phy_an amd_xgbe_an_switch_mode(struct phy_device *phydev)
+{
+ int ret;
+
+ ret = amd_xgbe_phy_switch_mode(phydev);
+ if (ret < 0)
+ return AMD_XGBE_AN_ERROR;
+
+ return AMD_XGBE_AN_START;
+}
+
+static enum amd_xgbe_phy_an amd_xgbe_an_tx_training(struct phy_device *phydev,
+ enum amd_xgbe_phy_rx *state)
+{
+ struct amd_xgbe_phy_priv *priv = phydev->priv;
+ int ad_reg, lp_reg, ret;
+
+ *state = AMD_XGBE_RX_COMPLETE;
+
+ /* If we're in KX mode then we're done */
+ if (priv->mode == AMD_XGBE_MODE_KX)
+ return AMD_XGBE_AN_EVENT;
+
+ /* Enable/Disable FEC */
+ ad_reg = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 2);
+ if (ad_reg < 0)
+ return AMD_XGBE_AN_ERROR;
+
+ lp_reg = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_LPA + 2);
+ if (lp_reg < 0)
+ return AMD_XGBE_AN_ERROR;
+
+ ret = phy_read_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_FEC_CTRL);
+ if (ret < 0)
+ return AMD_XGBE_AN_ERROR;
+
+ if ((ad_reg & 0xc000) && (lp_reg & 0xc000))
+ ret |= 0x01;
+ else
+ ret &= ~0x01;
+
+ phy_write_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_FEC_CTRL, ret);
+
+ /* Start KR training */
+ ret = phy_read_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL);
+ if (ret < 0)
+ return AMD_XGBE_AN_ERROR;
+
+ ret |= 0x01;
+ phy_write_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL, ret);
+
+ return AMD_XGBE_AN_EVENT;
+}
+
+static enum amd_xgbe_phy_an amd_xgbe_an_tx_xnp(struct phy_device *phydev,
+ enum amd_xgbe_phy_rx *state)
+{
+ u16 msg;
+
+ *state = AMD_XGBE_RX_XNP;
+
+ msg = XNP_MCF_NULL_MESSAGE;
+ msg |= XNP_MP_FORMATTED;
+
+ phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_XNP + 2, 0);
+ phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_XNP + 1, 0);
+ phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_XNP, msg);
+
+ return AMD_XGBE_AN_EVENT;
+}
+
+static enum amd_xgbe_phy_an amd_xgbe_an_rx_bpa(struct phy_device *phydev,
+ enum amd_xgbe_phy_rx *state)
+{
+ struct amd_xgbe_phy_priv *priv = phydev->priv;
+ unsigned int link_support;
+ int ret, ad_reg, lp_reg;
+
+ /* Read Base Ability register 2 first */
+ ret = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_LPA + 1);
+ if (ret < 0)
+ return AMD_XGBE_AN_ERROR;
+
+ /* Check for a supported mode, otherwise restart in a different one */
+ link_support = (priv->mode == AMD_XGBE_MODE_KR) ? 0x80 : 0x20;
+ if (!(ret & link_support))
+ return amd_xgbe_an_switch_mode(phydev);
+
+ /* Check Extended Next Page support */
+ ad_reg = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_ADVERTISE);
+ if (ad_reg < 0)
+ return AMD_XGBE_AN_ERROR;
+
+ lp_reg = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_LPA);
+ if (lp_reg < 0)
+ return AMD_XGBE_AN_ERROR;
+
+ return ((ad_reg & XNP_NP_EXCHANGE) || (lp_reg & XNP_NP_EXCHANGE)) ?
+ amd_xgbe_an_tx_xnp(phydev, state) :
+ amd_xgbe_an_tx_training(phydev, state);
+}
+
+static enum amd_xgbe_phy_an amd_xgbe_an_rx_xnp(struct phy_device *phydev,
+ enum amd_xgbe_phy_rx *state)
+{
+ int ad_reg, lp_reg;
+
+ /* Check Extended Next Page support */
+ ad_reg = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_ADVERTISE);
+ if (ad_reg < 0)
+ return AMD_XGBE_AN_ERROR;
+
+ lp_reg = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_LPA);
+ if (lp_reg < 0)
+ return AMD_XGBE_AN_ERROR;
+
+ return ((ad_reg & XNP_NP_EXCHANGE) || (lp_reg & XNP_NP_EXCHANGE)) ?
+ amd_xgbe_an_tx_xnp(phydev, state) :
+ amd_xgbe_an_tx_training(phydev, state);
+}
+
+static enum amd_xgbe_phy_an amd_xgbe_an_start(struct phy_device *phydev)
+{
+ struct amd_xgbe_phy_priv *priv = phydev->priv;
+ int ret;
+
+ /* Be sure we aren't looping trying to negotiate */
+ if (priv->mode == AMD_XGBE_MODE_KR) {
+ if (priv->kr_state != AMD_XGBE_RX_READY)
+ return AMD_XGBE_AN_NO_LINK;
+ priv->kr_state = AMD_XGBE_RX_BPA;
+ } else {
+ if (priv->kx_state != AMD_XGBE_RX_READY)
+ return AMD_XGBE_AN_NO_LINK;
+ priv->kx_state = AMD_XGBE_RX_BPA;
+ }
+
+ /* Set up Advertisement register 3 first */
+ ret = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 2);
+ if (ret < 0)
+ return AMD_XGBE_AN_ERROR;
+
+ if (phydev->supported & SUPPORTED_10000baseR_FEC)
+ ret |= 0xc000;
+ else
+ ret &= ~0xc000;
+
+ phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 2, ret);
+
+ /* Set up Advertisement register 2 next */
+ ret = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 1);
+ if (ret < 0)
+ return AMD_XGBE_AN_ERROR;
+
+ if (phydev->supported & SUPPORTED_10000baseKR_Full)
+ ret |= 0x80;
+ else
+ ret &= ~0x80;
+
+ if (phydev->supported & SUPPORTED_1000baseKX_Full)
+ ret |= 0x20;
+ else
+ ret &= ~0x20;
+
+ phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 1, ret);
+
+ /* Set up Advertisement register 1 last */
+ ret = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_ADVERTISE);
+ if (ret < 0)
+ return AMD_XGBE_AN_ERROR;
+
+ if (phydev->supported & SUPPORTED_Pause)
+ ret |= 0x400;
+ else
+ ret &= ~0x400;
+
+ if (phydev->supported & SUPPORTED_Asym_Pause)
+ ret |= 0x800;
+ else
+ ret &= ~0x800;
+
+ /* We don't intend to perform XNP */
+ ret &= ~XNP_NP_EXCHANGE;
+
+ phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_ADVERTISE, ret);
+
+ /* Enable and start auto-negotiation */
+ phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_INT, 0);
+
+ ret = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_CTRL1);
+ if (ret < 0)
+ return AMD_XGBE_AN_ERROR;
+
+ ret |= MDIO_AN_CTRL1_ENABLE;
+ ret |= MDIO_AN_CTRL1_RESTART;
+ phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_CTRL1, ret);
+
+ return AMD_XGBE_AN_EVENT;
+}
+
+static enum amd_xgbe_phy_an amd_xgbe_an_event(struct phy_device *phydev)
+{
+ enum amd_xgbe_phy_an new_state;
+ int ret;
+
+ ret = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_INT);
+ if (ret < 0)
+ return AMD_XGBE_AN_ERROR;
+
+ new_state = AMD_XGBE_AN_EVENT;
+ if (ret & XGBE_AN_PG_RCV)
+ new_state = AMD_XGBE_AN_PAGE_RECEIVED;
+ else if (ret & XGBE_AN_INC_LINK)
+ new_state = AMD_XGBE_AN_INCOMPAT_LINK;
+ else if (ret & XGBE_AN_INT_CMPLT)
+ new_state = AMD_XGBE_AN_COMPLETE;
+
+ if (new_state != AMD_XGBE_AN_EVENT)
+ phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_INT, 0);
+
+ return new_state;
+}
+
+static enum amd_xgbe_phy_an amd_xgbe_an_page_received(struct phy_device *phydev)
+{
+ struct amd_xgbe_phy_priv *priv = phydev->priv;
+ enum amd_xgbe_phy_rx *state;
+ int ret;
+
+ state = (priv->mode == AMD_XGBE_MODE_KR) ? &priv->kr_state
+ : &priv->kx_state;
+
+ switch (*state) {
+ case AMD_XGBE_RX_BPA:
+ ret = amd_xgbe_an_rx_bpa(phydev, state);
+ break;
+
+ case AMD_XGBE_RX_XNP:
+ ret = amd_xgbe_an_rx_xnp(phydev, state);
+ break;
+
+ default:
+ ret = AMD_XGBE_AN_ERROR;
+ }
+
+ return ret;
+}
+
+static enum amd_xgbe_phy_an amd_xgbe_an_incompat_link(struct phy_device *phydev)
+{
+ return amd_xgbe_an_switch_mode(phydev);
+}
+
+static void amd_xgbe_an_state_machine(struct work_struct *work)
+{
+ struct amd_xgbe_phy_priv *priv = container_of(work,
+ struct amd_xgbe_phy_priv,
+ an_work);
+ struct phy_device *phydev = priv->phydev;
+ enum amd_xgbe_phy_an cur_state;
+ int sleep;
+
+ while (1) {
+ mutex_lock(&priv->an_mutex);
+
+ cur_state = priv->an_state;
+
+ switch (priv->an_state) {
+ case AMD_XGBE_AN_START:
+ priv->an_state = amd_xgbe_an_start(phydev);
+ break;
+
+ case AMD_XGBE_AN_EVENT:
+ priv->an_state = amd_xgbe_an_event(phydev);
+ break;
+
+ case AMD_XGBE_AN_PAGE_RECEIVED:
+ priv->an_state = amd_xgbe_an_page_received(phydev);
+ break;
+
+ case AMD_XGBE_AN_INCOMPAT_LINK:
+ priv->an_state = amd_xgbe_an_incompat_link(phydev);
+ break;
+
+ case AMD_XGBE_AN_COMPLETE:
+ case AMD_XGBE_AN_NO_LINK:
+ case AMD_XGBE_AN_EXIT:
+ goto exit_unlock;
+
+ default:
+ priv->an_state = AMD_XGBE_AN_ERROR;
+ }
+
+ if (priv->an_state == AMD_XGBE_AN_ERROR) {
+ netdev_err(phydev->attached_dev,
+ "error during auto-negotiation, state=%u\n",
+ cur_state);
+ goto exit_unlock;
+ }
+
+ sleep = (priv->an_state == AMD_XGBE_AN_EVENT) ? 1 : 0;
+
+ mutex_unlock(&priv->an_mutex);
+
+ if (sleep)
+ usleep_range(20, 50);
+ }
+
+exit_unlock:
+ priv->an_result = priv->an_state;
+ priv->an_state = AMD_XGBE_AN_READY;
+
+ mutex_unlock(&priv->an_mutex);
+}
+
+static int amd_xgbe_phy_soft_reset(struct phy_device *phydev)
+{
+ int count, ret;
+
+ ret = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL1);
+ if (ret < 0)
+ return ret;
+
+ ret |= MDIO_CTRL1_RESET;
+ phy_write_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL1, ret);
+
+ count = 50;
+ do {
+ msleep(20);
+ ret = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL1);
+ if (ret < 0)
+ return ret;
+ } while ((ret & MDIO_CTRL1_RESET) && --count);
+
+ if (ret & MDIO_CTRL1_RESET)
+ return -ETIMEDOUT;
+
+ return 0;
+}
+
+static int amd_xgbe_phy_config_init(struct phy_device *phydev)
+{
+ /* Initialize supported features */
+ phydev->supported = SUPPORTED_Autoneg;
+ phydev->supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause;
+ phydev->supported |= SUPPORTED_Backplane;
+ phydev->supported |= SUPPORTED_1000baseKX_Full |
+ SUPPORTED_2500baseX_Full;
+ phydev->supported |= SUPPORTED_10000baseKR_Full |
+ SUPPORTED_10000baseR_FEC;
+ phydev->advertising = phydev->supported;
+
+ /* Turn off and clear interrupts */
+ phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_INTMASK, 0);
+ phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_INT, 0);
+
+ return 0;
+}
+
+static int amd_xgbe_phy_setup_forced(struct phy_device *phydev)
+{
+ int ret;
+
+ /* Disable auto-negotiation */
+ ret = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_CTRL1);
+ if (ret < 0)
+ return ret;
+
+ ret &= ~MDIO_AN_CTRL1_ENABLE;
+ phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_CTRL1, ret);
+
+ /* Validate/Set specified speed */
+ switch (phydev->speed) {
+ case SPEED_10000:
+ ret = amd_xgbe_phy_xgmii_mode(phydev);
+ break;
+
+ case SPEED_2500:
+ ret = amd_xgbe_phy_gmii_2500_mode(phydev);
+ break;
+
+ case SPEED_1000:
+ ret = amd_xgbe_phy_gmii_mode(phydev);
+ break;
+
+ default:
+ ret = -EINVAL;
+ }
+
+ if (ret < 0)
+ return ret;
+
+ /* Validate duplex mode */
+ if (phydev->duplex != DUPLEX_FULL)
+ return -EINVAL;
+
+ phydev->pause = 0;
+ phydev->asym_pause = 0;
+
+ return 0;
+}
+
+static int amd_xgbe_phy_config_aneg(struct phy_device *phydev)
+{
+ struct amd_xgbe_phy_priv *priv = phydev->priv;
+ u32 mmd_mask = phydev->c45_ids.devices_in_package;
+ int ret;
+
+ if (phydev->autoneg != AUTONEG_ENABLE)
+ return amd_xgbe_phy_setup_forced(phydev);
+
+ /* Make sure we have the AN MMD present */
+ if (!(mmd_mask & MDIO_DEVS_AN))
+ return -EINVAL;
+
+ /* Get the current speed mode */
+ ret = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL2);
+ if (ret < 0)
+ return ret;
+
+ /* Start/Restart the auto-negotiation state machine */
+ mutex_lock(&priv->an_mutex);
+ priv->an_result = AMD_XGBE_AN_READY;
+ priv->an_state = AMD_XGBE_AN_START;
+ priv->kr_state = AMD_XGBE_RX_READY;
+ priv->kx_state = AMD_XGBE_RX_READY;
+ mutex_unlock(&priv->an_mutex);
+
+ queue_work(priv->an_workqueue, &priv->an_work);
+
+ return 0;
+}
+
+static int amd_xgbe_phy_aneg_done(struct phy_device *phydev)
+{
+ struct amd_xgbe_phy_priv *priv = phydev->priv;
+ enum amd_xgbe_phy_an state;
+
+ mutex_lock(&priv->an_mutex);
+ state = priv->an_result;
+ mutex_unlock(&priv->an_mutex);
+
+ return (state == AMD_XGBE_AN_COMPLETE);
+}
+
+static int amd_xgbe_phy_update_link(struct phy_device *phydev)
+{
+ struct amd_xgbe_phy_priv *priv = phydev->priv;
+ enum amd_xgbe_phy_an state;
+ unsigned int check_again, autoneg;
+ int ret;
+
+ /* If we're doing auto-negotiation don't report link down */
+ mutex_lock(&priv->an_mutex);
+ state = priv->an_state;
+ mutex_unlock(&priv->an_mutex);
+
+ if (state != AMD_XGBE_AN_READY) {
+ phydev->link = 1;
+ return 0;
+ }
+
+ /* Since the device can be in the wrong mode when a link is
+ * (re-)established (cable connected after the interface is
+ * up, etc.), the link status may report no link. If there
+ * is no link, try switching modes and checking the status
+ * again.
+ */
+ check_again = 1;
+again:
+ /* Link status is latched low, so read once to clear
+ * and then read again to get current state
+ */
+ ret = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_STAT1);
+ if (ret < 0)
+ return ret;
+
+ ret = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_STAT1);
+ if (ret < 0)
+ return ret;
+
+ phydev->link = (ret & MDIO_STAT1_LSTATUS) ? 1 : 0;
+
+ if (!phydev->link) {
+ ret = amd_xgbe_phy_switch_mode(phydev);
+ if (check_again) {
+ check_again = 0;
+ goto again;
+ }
+ }
+
+ autoneg = (phydev->link && !priv->link) ? 1 : 0;
+ priv->link = phydev->link;
+ if (autoneg) {
+ /* Link is (back) up, re-start auto-negotiation */
+ ret = amd_xgbe_phy_config_aneg(phydev);
+ if (ret < 0)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int amd_xgbe_phy_read_status(struct phy_device *phydev)
+{
+ u32 mmd_mask = phydev->c45_ids.devices_in_package;
+ int ret, mode, ad_ret, lp_ret;
+
+ ret = amd_xgbe_phy_update_link(phydev);
+ if (ret)
+ return ret;
+
+ mode = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL2);
+ if (mode < 0)
+ return mode;
+ mode &= MDIO_PCS_CTRL2_TYPE;
+
+ if (phydev->autoneg == AUTONEG_ENABLE) {
+ if (!(mmd_mask & MDIO_DEVS_AN))
+ return -EINVAL;
+
+ if (!amd_xgbe_phy_aneg_done(phydev))
+ return 0;
+
+ /* Compare Advertisement and Link Partner register 1 */
+ ad_ret = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_ADVERTISE);
+ if (ad_ret < 0)
+ return ad_ret;
+ lp_ret = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_LPA);
+ if (lp_ret < 0)
+ return lp_ret;
+
+ ad_ret &= lp_ret;
+ phydev->pause = (ad_ret & 0x400) ? 1 : 0;
+ phydev->asym_pause = (ad_ret & 0x800) ? 1 : 0;
+
+ /* Compare Advertisement and Link Partner register 2 */
+ ad_ret = phy_read_mmd(phydev, MDIO_MMD_AN,
+ MDIO_AN_ADVERTISE + 1);
+ if (ad_ret < 0)
+ return ad_ret;
+ lp_ret = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_LPA + 1);
+ if (lp_ret < 0)
+ return lp_ret;
+
+ ad_ret &= lp_ret;
+ if (ad_ret & 0x80) {
+ phydev->speed = SPEED_10000;
+ if (mode != MDIO_PCS_CTRL2_10GBR) {
+ ret = amd_xgbe_phy_xgmii_mode(phydev);
+ if (ret < 0)
+ return ret;
+ }
+ } else {
+ phydev->speed = SPEED_1000;
+ if (mode == MDIO_PCS_CTRL2_10GBR) {
+ ret = amd_xgbe_phy_gmii_mode(phydev);
+ if (ret < 0)
+ return ret;
+ }
+ }
+
+ phydev->duplex = DUPLEX_FULL;
+ } else {
+ phydev->speed = (mode == MDIO_PCS_CTRL2_10GBR) ? SPEED_10000
+ : SPEED_1000;
+ phydev->duplex = DUPLEX_FULL;
+ phydev->pause = 0;
+ phydev->asym_pause = 0;
+ }
+
+ return 0;
+}
+
+static int amd_xgbe_phy_suspend(struct phy_device *phydev)
+{
+ int ret;
+
+ mutex_lock(&phydev->lock);
+
+ ret = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL1);
+ if (ret < 0)
+ goto unlock;
+
+ ret |= MDIO_CTRL1_LPOWER;
+ phy_write_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL1, ret);
+
+ ret = 0;
+
+unlock:
+ mutex_unlock(&phydev->lock);
+
+ return ret;
+}
+
+static int amd_xgbe_phy_resume(struct phy_device *phydev)
+{
+ int ret;
+
+ mutex_lock(&phydev->lock);
+
+ ret = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL1);
+ if (ret < 0)
+ goto unlock;
+
+ ret &= ~MDIO_CTRL1_LPOWER;
+ phy_write_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL1, ret);
+
+ ret = 0;
+
+unlock:
+ mutex_unlock(&phydev->lock);
+
+ return ret;
+}
+
+static int amd_xgbe_phy_probe(struct phy_device *phydev)
+{
+ struct amd_xgbe_phy_priv *priv;
+ struct platform_device *pdev;
+ struct device *dev;
+ char *wq_name;
+ int ret;
+
+ if (!phydev->dev.of_node)
+ return -EINVAL;
+
+ pdev = of_find_device_by_node(phydev->dev.of_node);
+ if (!pdev)
+ return -EINVAL;
+ dev = &pdev->dev;
+
+ wq_name = kasprintf(GFP_KERNEL, "%s-amd-xgbe-phy", phydev->bus->name);
+ if (!wq_name) {
+ ret = -ENOMEM;
+ goto err_pdev;
+ }
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv) {
+ ret = -ENOMEM;
+ goto err_name;
+ }
+
+ priv->pdev = pdev;
+ priv->dev = dev;
+ priv->phydev = phydev;
+
+ /* Get the device mmio areas */
+ priv->rxtx_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ priv->rxtx_regs = devm_ioremap_resource(dev, priv->rxtx_res);
+ if (IS_ERR(priv->rxtx_regs)) {
+ dev_err(dev, "rxtx ioremap failed\n");
+ ret = PTR_ERR(priv->rxtx_regs);
+ goto err_priv;
+ }
+
+ priv->sir0_res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ priv->sir0_regs = devm_ioremap_resource(dev, priv->sir0_res);
+ if (IS_ERR(priv->sir0_regs)) {
+ dev_err(dev, "sir0 ioremap failed\n");
+ ret = PTR_ERR(priv->sir0_regs);
+ goto err_rxtx;
+ }
+
+ priv->sir1_res = platform_get_resource(pdev, IORESOURCE_MEM, 2);
+ priv->sir1_regs = devm_ioremap_resource(dev, priv->sir1_res);
+ if (IS_ERR(priv->sir1_regs)) {
+ dev_err(dev, "sir1 ioremap failed\n");
+ ret = PTR_ERR(priv->sir1_regs);
+ goto err_sir0;
+ }
+
+ priv->link = 1;
+
+ ret = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL2);
+ if (ret < 0)
+ goto err_sir1;
+ if ((ret & MDIO_PCS_CTRL2_TYPE) == MDIO_PCS_CTRL2_10GBR)
+ priv->mode = AMD_XGBE_MODE_KR;
+ else
+ priv->mode = AMD_XGBE_MODE_KX;
+
+ mutex_init(&priv->an_mutex);
+ INIT_WORK(&priv->an_work, amd_xgbe_an_state_machine);
+ priv->an_workqueue = create_singlethread_workqueue(wq_name);
+ if (!priv->an_workqueue) {
+ ret = -ENOMEM;
+ goto err_sir1;
+ }
+
+ phydev->priv = priv;
+
+ kfree(wq_name);
+ of_dev_put(pdev);
+
+ return 0;
+
+err_sir1:
+ devm_iounmap(dev, priv->sir1_regs);
+ devm_release_mem_region(dev, priv->sir1_res->start,
+ resource_size(priv->sir1_res));
+
+err_sir0:
+ devm_iounmap(dev, priv->sir0_regs);
+ devm_release_mem_region(dev, priv->sir0_res->start,
+ resource_size(priv->sir0_res));
+
+err_rxtx:
+ devm_iounmap(dev, priv->rxtx_regs);
+ devm_release_mem_region(dev, priv->rxtx_res->start,
+ resource_size(priv->rxtx_res));
+
+err_priv:
+ devm_kfree(dev, priv);
+
+err_name:
+ kfree(wq_name);
+
+err_pdev:
+ of_dev_put(pdev);
+
+ return ret;
+}
+
+static void amd_xgbe_phy_remove(struct phy_device *phydev)
+{
+ struct amd_xgbe_phy_priv *priv = phydev->priv;
+ struct device *dev = priv->dev;
+
+ /* Stop any in process auto-negotiation */
+ mutex_lock(&priv->an_mutex);
+ priv->an_state = AMD_XGBE_AN_EXIT;
+ mutex_unlock(&priv->an_mutex);
+
+ flush_workqueue(priv->an_workqueue);
+ destroy_workqueue(priv->an_workqueue);
+
+ /* Release resources */
+ devm_iounmap(dev, priv->sir1_regs);
+ devm_release_mem_region(dev, priv->sir1_res->start,
+ resource_size(priv->sir1_res));
+
+ devm_iounmap(dev, priv->sir0_regs);
+ devm_release_mem_region(dev, priv->sir0_res->start,
+ resource_size(priv->sir0_res));
+
+ devm_iounmap(dev, priv->rxtx_regs);
+ devm_release_mem_region(dev, priv->rxtx_res->start,
+ resource_size(priv->rxtx_res));
+
+ devm_kfree(dev, priv);
+}
+
+static int amd_xgbe_match_phy_device(struct phy_device *phydev)
+{
+ return phydev->c45_ids.device_ids[MDIO_MMD_PCS] == XGBE_PHY_ID;
+}
+
+static struct phy_driver amd_xgbe_phy_driver[] = {
+ {
+ .phy_id = XGBE_PHY_ID,
+ .phy_id_mask = XGBE_PHY_MASK,
+ .name = "AMD XGBE PHY",
+ .features = 0,
+ .probe = amd_xgbe_phy_probe,
+ .remove = amd_xgbe_phy_remove,
+ .soft_reset = amd_xgbe_phy_soft_reset,
+ .config_init = amd_xgbe_phy_config_init,
+ .suspend = amd_xgbe_phy_suspend,
+ .resume = amd_xgbe_phy_resume,
+ .config_aneg = amd_xgbe_phy_config_aneg,
+ .aneg_done = amd_xgbe_phy_aneg_done,
+ .read_status = amd_xgbe_phy_read_status,
+ .match_phy_device = amd_xgbe_match_phy_device,
+ .driver = {
+ .owner = THIS_MODULE,
+ },
+ },
+};
+
+static int __init amd_xgbe_phy_init(void)
+{
+ return phy_drivers_register(amd_xgbe_phy_driver,
+ ARRAY_SIZE(amd_xgbe_phy_driver));
+}
+
+static void __exit amd_xgbe_phy_exit(void)
+{
+ phy_drivers_unregister(amd_xgbe_phy_driver,
+ ARRAY_SIZE(amd_xgbe_phy_driver));
+}
+
+module_init(amd_xgbe_phy_init);
+module_exit(amd_xgbe_phy_exit);
+
+static struct mdio_device_id __maybe_unused amd_xgbe_phy_ids[] = {
+ { XGBE_PHY_ID, XGBE_PHY_MASK },
+ { }
+};
+MODULE_DEVICE_TABLE(mdio, amd_xgbe_phy_ids);
diff --git a/drivers/net/phy/at803x.c b/drivers/net/phy/at803x.c
index 643464d5a727..6c622aedbae1 100644
--- a/drivers/net/phy/at803x.c
+++ b/drivers/net/phy/at803x.c
@@ -144,41 +144,11 @@ static int at803x_resume(struct phy_device *phydev)
static int at803x_config_init(struct phy_device *phydev)
{
- int val;
int ret;
- u32 features;
-
- features = SUPPORTED_TP | SUPPORTED_MII | SUPPORTED_AUI |
- SUPPORTED_FIBRE | SUPPORTED_BNC;
-
- val = phy_read(phydev, MII_BMSR);
- if (val < 0)
- return val;
-
- if (val & BMSR_ANEGCAPABLE)
- features |= SUPPORTED_Autoneg;
- if (val & BMSR_100FULL)
- features |= SUPPORTED_100baseT_Full;
- if (val & BMSR_100HALF)
- features |= SUPPORTED_100baseT_Half;
- if (val & BMSR_10FULL)
- features |= SUPPORTED_10baseT_Full;
- if (val & BMSR_10HALF)
- features |= SUPPORTED_10baseT_Half;
-
- if (val & BMSR_ESTATEN) {
- val = phy_read(phydev, MII_ESTATUS);
- if (val < 0)
- return val;
-
- if (val & ESTATUS_1000_TFULL)
- features |= SUPPORTED_1000baseT_Full;
- if (val & ESTATUS_1000_THALF)
- features |= SUPPORTED_1000baseT_Half;
- }
- phydev->supported = features;
- phydev->advertising = features;
+ ret = genphy_config_init(phydev);
+ if (ret < 0)
+ return ret;
if (phydev->interface == PHY_INTERFACE_MODE_RGMII_TXID) {
ret = phy_write(phydev, AT803X_DEBUG_ADDR,
@@ -283,8 +253,7 @@ static int __init atheros_init(void)
static void __exit atheros_exit(void)
{
- return phy_drivers_unregister(at803x_driver,
- ARRAY_SIZE(at803x_driver));
+ phy_drivers_unregister(at803x_driver, ARRAY_SIZE(at803x_driver));
}
module_init(atheros_init);
diff --git a/drivers/net/phy/fixed.c b/drivers/net/phy/fixed.c
index ba55adfc7aae..d60d875cb445 100644
--- a/drivers/net/phy/fixed.c
+++ b/drivers/net/phy/fixed.c
@@ -21,6 +21,7 @@
#include <linux/phy_fixed.h>
#include <linux/err.h>
#include <linux/slab.h>
+#include <linux/of.h>
#define MII_REGS_NUM 29
@@ -31,7 +32,7 @@ struct fixed_mdio_bus {
};
struct fixed_phy {
- int id;
+ int addr;
u16 regs[MII_REGS_NUM];
struct phy_device *phydev;
struct fixed_phy_status status;
@@ -104,8 +105,8 @@ static int fixed_phy_update_regs(struct fixed_phy *fp)
if (fp->status.asym_pause)
lpa |= LPA_PAUSE_ASYM;
- fp->regs[MII_PHYSID1] = fp->id >> 16;
- fp->regs[MII_PHYSID2] = fp->id;
+ fp->regs[MII_PHYSID1] = 0;
+ fp->regs[MII_PHYSID2] = 0;
fp->regs[MII_BMSR] = bmsr;
fp->regs[MII_BMCR] = bmcr;
@@ -115,7 +116,7 @@ static int fixed_phy_update_regs(struct fixed_phy *fp)
return 0;
}
-static int fixed_mdio_read(struct mii_bus *bus, int phy_id, int reg_num)
+static int fixed_mdio_read(struct mii_bus *bus, int phy_addr, int reg_num)
{
struct fixed_mdio_bus *fmb = bus->priv;
struct fixed_phy *fp;
@@ -124,7 +125,7 @@ static int fixed_mdio_read(struct mii_bus *bus, int phy_id, int reg_num)
return -1;
list_for_each_entry(fp, &fmb->phys, node) {
- if (fp->id == phy_id) {
+ if (fp->addr == phy_addr) {
/* Issue callback if user registered it. */
if (fp->link_update) {
fp->link_update(fp->phydev->attached_dev,
@@ -138,7 +139,7 @@ static int fixed_mdio_read(struct mii_bus *bus, int phy_id, int reg_num)
return 0xFFFF;
}
-static int fixed_mdio_write(struct mii_bus *bus, int phy_id, int reg_num,
+static int fixed_mdio_write(struct mii_bus *bus, int phy_addr, int reg_num,
u16 val)
{
return 0;
@@ -160,7 +161,7 @@ int fixed_phy_set_link_update(struct phy_device *phydev,
return -EINVAL;
list_for_each_entry(fp, &fmb->phys, node) {
- if (fp->id == phydev->phy_id) {
+ if (fp->addr == phydev->addr) {
fp->link_update = link_update;
fp->phydev = phydev;
return 0;
@@ -171,7 +172,7 @@ int fixed_phy_set_link_update(struct phy_device *phydev,
}
EXPORT_SYMBOL_GPL(fixed_phy_set_link_update);
-int fixed_phy_add(unsigned int irq, int phy_id,
+int fixed_phy_add(unsigned int irq, int phy_addr,
struct fixed_phy_status *status)
{
int ret;
@@ -184,9 +185,9 @@ int fixed_phy_add(unsigned int irq, int phy_id,
memset(fp->regs, 0xFF, sizeof(fp->regs[0]) * MII_REGS_NUM);
- fmb->irqs[phy_id] = irq;
+ fmb->irqs[phy_addr] = irq;
- fp->id = phy_id;
+ fp->addr = phy_addr;
fp->status = *status;
ret = fixed_phy_update_regs(fp);
@@ -203,6 +204,66 @@ err_regs:
}
EXPORT_SYMBOL_GPL(fixed_phy_add);
+void fixed_phy_del(int phy_addr)
+{
+ struct fixed_mdio_bus *fmb = &platform_fmb;
+ struct fixed_phy *fp, *tmp;
+
+ list_for_each_entry_safe(fp, tmp, &fmb->phys, node) {
+ if (fp->addr == phy_addr) {
+ list_del(&fp->node);
+ kfree(fp);
+ return;
+ }
+ }
+}
+EXPORT_SYMBOL_GPL(fixed_phy_del);
+
+static int phy_fixed_addr;
+static DEFINE_SPINLOCK(phy_fixed_addr_lock);
+
+int fixed_phy_register(unsigned int irq,
+ struct fixed_phy_status *status,
+ struct device_node *np)
+{
+ struct fixed_mdio_bus *fmb = &platform_fmb;
+ struct phy_device *phy;
+ int phy_addr;
+ int ret;
+
+ /* Get the next available PHY address, up to PHY_MAX_ADDR */
+ spin_lock(&phy_fixed_addr_lock);
+ if (phy_fixed_addr == PHY_MAX_ADDR) {
+ spin_unlock(&phy_fixed_addr_lock);
+ return -ENOSPC;
+ }
+ phy_addr = phy_fixed_addr++;
+ spin_unlock(&phy_fixed_addr_lock);
+
+ ret = fixed_phy_add(PHY_POLL, phy_addr, status);
+ if (ret < 0)
+ return ret;
+
+ phy = get_phy_device(fmb->mii_bus, phy_addr, false);
+ if (!phy || IS_ERR(phy)) {
+ fixed_phy_del(phy_addr);
+ return -EINVAL;
+ }
+
+ of_node_get(np);
+ phy->dev.of_node = np;
+
+ ret = phy_device_register(phy);
+ if (ret) {
+ phy_device_free(phy);
+ of_node_put(np);
+ fixed_phy_del(phy_addr);
+ return ret;
+ }
+
+ return 0;
+}
+
static int __init fixed_mdio_bus_init(void)
{
struct fixed_mdio_bus *fmb = &platform_fmb;
diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c
index 76f54b32a120..2e58aa54484c 100644
--- a/drivers/net/phy/mdio_bus.c
+++ b/drivers/net/phy/mdio_bus.c
@@ -69,6 +69,73 @@ struct mii_bus *mdiobus_alloc_size(size_t size)
}
EXPORT_SYMBOL(mdiobus_alloc_size);
+static void _devm_mdiobus_free(struct device *dev, void *res)
+{
+ mdiobus_free(*(struct mii_bus **)res);
+}
+
+static int devm_mdiobus_match(struct device *dev, void *res, void *data)
+{
+ struct mii_bus **r = res;
+
+ if (WARN_ON(!r || !*r))
+ return 0;
+
+ return *r == data;
+}
+
+/**
+ * devm_mdiobus_alloc_size - Resource-managed mdiobus_alloc_size()
+ * @dev: Device to allocate mii_bus for
+ * @sizeof_priv: Space to allocate for private structure.
+ *
+ * Managed mdiobus_alloc_size. mii_bus allocated with this function is
+ * automatically freed on driver detach.
+ *
+ * If an mii_bus allocated with this function needs to be freed separately,
+ * devm_mdiobus_free() must be used.
+ *
+ * RETURNS:
+ * Pointer to allocated mii_bus on success, NULL on failure.
+ */
+struct mii_bus *devm_mdiobus_alloc_size(struct device *dev, int sizeof_priv)
+{
+ struct mii_bus **ptr, *bus;
+
+ ptr = devres_alloc(_devm_mdiobus_free, sizeof(*ptr), GFP_KERNEL);
+ if (!ptr)
+ return NULL;
+
+ /* use raw alloc_dr for kmalloc caller tracing */
+ bus = mdiobus_alloc_size(sizeof_priv);
+ if (bus) {
+ *ptr = bus;
+ devres_add(dev, ptr);
+ } else {
+ devres_free(ptr);
+ }
+
+ return bus;
+}
+EXPORT_SYMBOL_GPL(devm_mdiobus_alloc_size);
+
+/**
+ * devm_mdiobus_free - Resource-managed mdiobus_free()
+ * @dev: Device this mii_bus belongs to
+ * @bus: the mii_bus associated with the device
+ *
+ * Free mii_bus allocated with devm_mdiobus_alloc_size().
+ */
+void devm_mdiobus_free(struct device *dev, struct mii_bus *bus)
+{
+ int rc;
+
+ rc = devres_release(dev, _devm_mdiobus_free,
+ devm_mdiobus_match, bus);
+ WARN_ON(rc);
+}
+EXPORT_SYMBOL_GPL(devm_mdiobus_free);
+
/**
* mdiobus_release - mii_bus device release callback
* @d: the target struct device that contains the mii_bus
@@ -233,6 +300,12 @@ struct phy_device *mdiobus_scan(struct mii_bus *bus, int addr)
if (IS_ERR(phydev) || phydev == NULL)
return phydev;
+ /*
+ * For DT, see if the auto-probed phy has a correspoding child
+ * in the bus node, and set the of_node pointer in this case.
+ */
+ of_mdiobus_link_phydev(bus, phydev);
+
err = phy_device_register(phydev);
if (err) {
phy_device_free(phydev);
diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c
index d849684231c1..bc7c7d2f75f2 100644
--- a/drivers/net/phy/micrel.c
+++ b/drivers/net/phy/micrel.c
@@ -283,6 +283,110 @@ static int ksz9021_config_init(struct phy_device *phydev)
return 0;
}
+#define MII_KSZ9031RN_MMD_CTRL_REG 0x0d
+#define MII_KSZ9031RN_MMD_REGDATA_REG 0x0e
+#define OP_DATA 1
+#define KSZ9031_PS_TO_REG 60
+
+/* Extended registers */
+#define MII_KSZ9031RN_CONTROL_PAD_SKEW 4
+#define MII_KSZ9031RN_RX_DATA_PAD_SKEW 5
+#define MII_KSZ9031RN_TX_DATA_PAD_SKEW 6
+#define MII_KSZ9031RN_CLK_PAD_SKEW 8
+
+static int ksz9031_extended_write(struct phy_device *phydev,
+ u8 mode, u32 dev_addr, u32 regnum, u16 val)
+{
+ phy_write(phydev, MII_KSZ9031RN_MMD_CTRL_REG, dev_addr);
+ phy_write(phydev, MII_KSZ9031RN_MMD_REGDATA_REG, regnum);
+ phy_write(phydev, MII_KSZ9031RN_MMD_CTRL_REG, (mode << 14) | dev_addr);
+ return phy_write(phydev, MII_KSZ9031RN_MMD_REGDATA_REG, val);
+}
+
+static int ksz9031_extended_read(struct phy_device *phydev,
+ u8 mode, u32 dev_addr, u32 regnum)
+{
+ phy_write(phydev, MII_KSZ9031RN_MMD_CTRL_REG, dev_addr);
+ phy_write(phydev, MII_KSZ9031RN_MMD_REGDATA_REG, regnum);
+ phy_write(phydev, MII_KSZ9031RN_MMD_CTRL_REG, (mode << 14) | dev_addr);
+ return phy_read(phydev, MII_KSZ9031RN_MMD_REGDATA_REG);
+}
+
+static int ksz9031_of_load_skew_values(struct phy_device *phydev,
+ struct device_node *of_node,
+ u16 reg, size_t field_sz,
+ char *field[], u8 numfields)
+{
+ int val[4] = {-1, -2, -3, -4};
+ int matches = 0;
+ u16 mask;
+ u16 maxval;
+ u16 newval;
+ int i;
+
+ for (i = 0; i < numfields; i++)
+ if (!of_property_read_u32(of_node, field[i], val + i))
+ matches++;
+
+ if (!matches)
+ return 0;
+
+ if (matches < numfields)
+ newval = ksz9031_extended_read(phydev, OP_DATA, 2, reg);
+ else
+ newval = 0;
+
+ maxval = (field_sz == 4) ? 0xf : 0x1f;
+ for (i = 0; i < numfields; i++)
+ if (val[i] != -(i + 1)) {
+ mask = 0xffff;
+ mask ^= maxval << (field_sz * i);
+ newval = (newval & mask) |
+ (((val[i] / KSZ9031_PS_TO_REG) & maxval)
+ << (field_sz * i));
+ }
+
+ return ksz9031_extended_write(phydev, OP_DATA, 2, reg, newval);
+}
+
+static int ksz9031_config_init(struct phy_device *phydev)
+{
+ struct device *dev = &phydev->dev;
+ struct device_node *of_node = dev->of_node;
+ char *clk_skews[2] = {"rxc-skew-ps", "txc-skew-ps"};
+ char *rx_data_skews[4] = {
+ "rxd0-skew-ps", "rxd1-skew-ps",
+ "rxd2-skew-ps", "rxd3-skew-ps"
+ };
+ char *tx_data_skews[4] = {
+ "txd0-skew-ps", "txd1-skew-ps",
+ "txd2-skew-ps", "txd3-skew-ps"
+ };
+ char *control_skews[2] = {"txen-skew-ps", "rxdv-skew-ps"};
+
+ if (!of_node && dev->parent->of_node)
+ of_node = dev->parent->of_node;
+
+ if (of_node) {
+ ksz9031_of_load_skew_values(phydev, of_node,
+ MII_KSZ9031RN_CLK_PAD_SKEW, 5,
+ clk_skews, 2);
+
+ ksz9031_of_load_skew_values(phydev, of_node,
+ MII_KSZ9031RN_CONTROL_PAD_SKEW, 4,
+ control_skews, 2);
+
+ ksz9031_of_load_skew_values(phydev, of_node,
+ MII_KSZ9031RN_RX_DATA_PAD_SKEW, 4,
+ rx_data_skews, 4);
+
+ ksz9031_of_load_skew_values(phydev, of_node,
+ MII_KSZ9031RN_TX_DATA_PAD_SKEW, 4,
+ tx_data_skews, 4);
+ }
+ return 0;
+}
+
#define KSZ8873MLL_GLOBAL_CONTROL_4 0x06
#define KSZ8873MLL_GLOBAL_CONTROL_4_DUPLEX (1 << 6)
#define KSZ8873MLL_GLOBAL_CONTROL_4_SPEED (1 << 4)
@@ -469,7 +573,7 @@ static struct phy_driver ksphy_driver[] = {
.features = (PHY_GBIT_FEATURES | SUPPORTED_Pause
| SUPPORTED_Asym_Pause),
.flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT,
- .config_init = kszphy_config_init,
+ .config_init = ksz9031_config_init,
.config_aneg = genphy_config_aneg,
.read_status = genphy_read_status,
.ack_interrupt = kszphy_ack_interrupt,
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index 4987a1c6dc52..35d753d22f78 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -33,6 +33,7 @@
#include <linux/mdio.h>
#include <linux/io.h>
#include <linux/uaccess.h>
+#include <linux/of.h>
#include <asm/irq.h>
@@ -1067,14 +1068,11 @@ int genphy_soft_reset(struct phy_device *phydev)
}
EXPORT_SYMBOL(genphy_soft_reset);
-static int genphy_config_init(struct phy_device *phydev)
+int genphy_config_init(struct phy_device *phydev)
{
int val;
u32 features;
- /* For now, I'll claim that the generic driver supports
- * all possible port types
- */
features = (SUPPORTED_TP | SUPPORTED_MII
| SUPPORTED_AUI | SUPPORTED_FIBRE |
SUPPORTED_BNC);
@@ -1107,8 +1105,8 @@ static int genphy_config_init(struct phy_device *phydev)
features |= SUPPORTED_1000baseT_Half;
}
- phydev->supported = features;
- phydev->advertising = features;
+ phydev->supported &= features;
+ phydev->advertising &= features;
return 0;
}
@@ -1118,6 +1116,7 @@ static int gen10g_soft_reset(struct phy_device *phydev)
/* Do nothing for now */
return 0;
}
+EXPORT_SYMBOL(genphy_config_init);
static int gen10g_config_init(struct phy_device *phydev)
{
@@ -1168,6 +1167,38 @@ static int gen10g_resume(struct phy_device *phydev)
return 0;
}
+static void of_set_phy_supported(struct phy_device *phydev)
+{
+ struct device_node *node = phydev->dev.of_node;
+ u32 max_speed;
+
+ if (!IS_ENABLED(CONFIG_OF_MDIO))
+ return;
+
+ if (!node)
+ return;
+
+ if (!of_property_read_u32(node, "max-speed", &max_speed)) {
+ /* The default values for phydev->supported are provided by the PHY
+ * driver "features" member, we want to reset to sane defaults fist
+ * before supporting higher speeds.
+ */
+ phydev->supported &= PHY_DEFAULT_FEATURES;
+
+ switch (max_speed) {
+ default:
+ return;
+
+ case SPEED_1000:
+ phydev->supported |= PHY_1000BT_FEATURES;
+ case SPEED_100:
+ phydev->supported |= PHY_100BT_FEATURES;
+ case SPEED_10:
+ phydev->supported |= PHY_10BT_FEATURES;
+ }
+ }
+}
+
/**
* phy_probe - probe and init a PHY device
* @dev: device to probe and init
@@ -1202,7 +1233,8 @@ static int phy_probe(struct device *dev)
* or both of these values
*/
phydev->supported = phydrv->features;
- phydev->advertising = phydrv->features;
+ of_set_phy_supported(phydev);
+ phydev->advertising = phydev->supported;
/* Set the state to READY by default */
phydev->state = PHY_READY;
@@ -1295,7 +1327,9 @@ static struct phy_driver genphy_driver[] = {
.name = "Generic PHY",
.soft_reset = genphy_soft_reset,
.config_init = genphy_config_init,
- .features = 0,
+ .features = PHY_GBIT_FEATURES | SUPPORTED_MII |
+ SUPPORTED_AUI | SUPPORTED_FIBRE |
+ SUPPORTED_BNC,
.config_aneg = genphy_config_aneg,
.aneg_done = genphy_aneg_done,
.read_status = genphy_read_status,
diff --git a/drivers/net/phy/realtek.c b/drivers/net/phy/realtek.c
index fa1d69a38ccf..45483fdfbe06 100644
--- a/drivers/net/phy/realtek.c
+++ b/drivers/net/phy/realtek.c
@@ -64,65 +64,51 @@ static int rtl8211e_config_intr(struct phy_device *phydev)
return err;
}
-/* RTL8201CP */
-static struct phy_driver rtl8201cp_driver = {
- .phy_id = 0x00008201,
- .name = "RTL8201CP Ethernet",
- .phy_id_mask = 0x0000ffff,
- .features = PHY_BASIC_FEATURES,
- .flags = PHY_HAS_INTERRUPT,
- .config_aneg = &genphy_config_aneg,
- .read_status = &genphy_read_status,
- .driver = { .owner = THIS_MODULE,},
-};
-
-/* RTL8211B */
-static struct phy_driver rtl8211b_driver = {
- .phy_id = 0x001cc912,
- .name = "RTL8211B Gigabit Ethernet",
- .phy_id_mask = 0x001fffff,
- .features = PHY_GBIT_FEATURES,
- .flags = PHY_HAS_INTERRUPT,
- .config_aneg = &genphy_config_aneg,
- .read_status = &genphy_read_status,
- .ack_interrupt = &rtl821x_ack_interrupt,
- .config_intr = &rtl8211b_config_intr,
- .driver = { .owner = THIS_MODULE,},
-};
-
-/* RTL8211E */
-static struct phy_driver rtl8211e_driver = {
- .phy_id = 0x001cc915,
- .name = "RTL8211E Gigabit Ethernet",
- .phy_id_mask = 0x001fffff,
- .features = PHY_GBIT_FEATURES,
- .flags = PHY_HAS_INTERRUPT,
- .config_aneg = &genphy_config_aneg,
- .read_status = &genphy_read_status,
- .ack_interrupt = &rtl821x_ack_interrupt,
- .config_intr = &rtl8211e_config_intr,
- .suspend = genphy_suspend,
- .resume = genphy_resume,
- .driver = { .owner = THIS_MODULE,},
+static struct phy_driver realtek_drvs[] = {
+ {
+ .phy_id = 0x00008201,
+ .name = "RTL8201CP Ethernet",
+ .phy_id_mask = 0x0000ffff,
+ .features = PHY_BASIC_FEATURES,
+ .flags = PHY_HAS_INTERRUPT,
+ .config_aneg = &genphy_config_aneg,
+ .read_status = &genphy_read_status,
+ .driver = { .owner = THIS_MODULE,},
+ }, {
+ .phy_id = 0x001cc912,
+ .name = "RTL8211B Gigabit Ethernet",
+ .phy_id_mask = 0x001fffff,
+ .features = PHY_GBIT_FEATURES,
+ .flags = PHY_HAS_INTERRUPT,
+ .config_aneg = &genphy_config_aneg,
+ .read_status = &genphy_read_status,
+ .ack_interrupt = &rtl821x_ack_interrupt,
+ .config_intr = &rtl8211b_config_intr,
+ .driver = { .owner = THIS_MODULE,},
+ }, {
+ .phy_id = 0x001cc915,
+ .name = "RTL8211E Gigabit Ethernet",
+ .phy_id_mask = 0x001fffff,
+ .features = PHY_GBIT_FEATURES,
+ .flags = PHY_HAS_INTERRUPT,
+ .config_aneg = &genphy_config_aneg,
+ .read_status = &genphy_read_status,
+ .ack_interrupt = &rtl821x_ack_interrupt,
+ .config_intr = &rtl8211e_config_intr,
+ .suspend = genphy_suspend,
+ .resume = genphy_resume,
+ .driver = { .owner = THIS_MODULE,},
+ },
};
static int __init realtek_init(void)
{
- int ret;
-
- ret = phy_driver_register(&rtl8201cp_driver);
- if (ret < 0)
- return -ENODEV;
- ret = phy_driver_register(&rtl8211b_driver);
- if (ret < 0)
- return -ENODEV;
- return phy_driver_register(&rtl8211e_driver);
+ return phy_drivers_register(realtek_drvs, ARRAY_SIZE(realtek_drvs));
}
static void __exit realtek_exit(void)
{
- phy_driver_unregister(&rtl8211b_driver);
- phy_driver_unregister(&rtl8211e_driver);
+ phy_drivers_unregister(realtek_drvs, ARRAY_SIZE(realtek_drvs));
}
module_init(realtek_init);
diff --git a/drivers/net/phy/smsc.c b/drivers/net/phy/smsc.c
index 11f34813e23f..180c49479c42 100644
--- a/drivers/net/phy/smsc.c
+++ b/drivers/net/phy/smsc.c
@@ -249,8 +249,7 @@ static int __init smsc_init(void)
static void __exit smsc_exit(void)
{
- return phy_drivers_unregister(smsc_phy_driver,
- ARRAY_SIZE(smsc_phy_driver));
+ phy_drivers_unregister(smsc_phy_driver, ARRAY_SIZE(smsc_phy_driver));
}
MODULE_DESCRIPTION("SMSC PHY driver");
diff --git a/drivers/net/phy/vitesse.c b/drivers/net/phy/vitesse.c
index 14372c65a7e8..5dc0935da99c 100644
--- a/drivers/net/phy/vitesse.c
+++ b/drivers/net/phy/vitesse.c
@@ -319,8 +319,7 @@ static int __init vsc82xx_init(void)
static void __exit vsc82xx_exit(void)
{
- return phy_drivers_unregister(vsc82xx_driver,
- ARRAY_SIZE(vsc82xx_driver));
+ phy_drivers_unregister(vsc82xx_driver, ARRAY_SIZE(vsc82xx_driver));
}
module_init(vsc82xx_init);
diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
index e3923ebb693f..91d6c1272fcf 100644
--- a/drivers/net/ppp/ppp_generic.c
+++ b/drivers/net/ppp/ppp_generic.c
@@ -757,7 +757,7 @@ static long ppp_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
err = get_filter(argp, &code);
if (err >= 0) {
- struct sock_fprog fprog = {
+ struct sock_fprog_kern fprog = {
.len = err,
.filter = code,
};
@@ -778,7 +778,7 @@ static long ppp_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
err = get_filter(argp, &code);
if (err >= 0) {
- struct sock_fprog fprog = {
+ struct sock_fprog_kern fprog = {
.len = err,
.filter = code,
};
diff --git a/drivers/net/ppp/pptp.c b/drivers/net/ppp/pptp.c
index 01805319e1e0..1aff970be33e 100644
--- a/drivers/net/ppp/pptp.c
+++ b/drivers/net/ppp/pptp.c
@@ -281,7 +281,7 @@ static int pptp_xmit(struct ppp_channel *chan, struct sk_buff *skb)
nf_reset(skb);
skb->ip_summed = CHECKSUM_NONE;
- ip_select_ident(skb, &rt->dst, NULL);
+ ip_select_ident(skb, NULL);
ip_send_check(iph);
ip_local_out(skb);
diff --git a/drivers/net/rionet.c b/drivers/net/rionet.c
index a8497183ff8b..dac7a0d9bb46 100644
--- a/drivers/net/rionet.c
+++ b/drivers/net/rionet.c
@@ -494,7 +494,7 @@ static int rionet_setup_netdev(struct rio_mport *mport, struct net_device *ndev)
ndev->mtu = RIO_MAX_MSG_SIZE - 14;
ndev->features = NETIF_F_LLTX;
SET_NETDEV_DEV(ndev, &mport->dev);
- SET_ETHTOOL_OPS(ndev, &rionet_ethtool_ops);
+ ndev->ethtool_ops = &rionet_ethtool_ops;
spin_lock_init(&rnet->lock);
spin_lock_init(&rnet->tx_lock);
diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
index ce4989be86d9..b4958c7ffa84 100644
--- a/drivers/net/team/team.c
+++ b/drivers/net/team/team.c
@@ -968,7 +968,7 @@ static void team_port_disable(struct team *team,
static void __team_compute_features(struct team *team)
{
struct team_port *port;
- u32 vlan_features = TEAM_VLAN_FEATURES;
+ u32 vlan_features = TEAM_VLAN_FEATURES & NETIF_F_ALL_FOR_ALL;
unsigned short max_hard_header_len = ETH_HLEN;
unsigned int flags, dst_release_flag = IFF_XMIT_DST_RELEASE;
diff --git a/drivers/net/team/team_mode_loadbalance.c b/drivers/net/team/team_mode_loadbalance.c
index dbde3412ee5e..a58dfebb5512 100644
--- a/drivers/net/team/team_mode_loadbalance.c
+++ b/drivers/net/team/team_mode_loadbalance.c
@@ -49,7 +49,7 @@ struct lb_port_mapping {
struct lb_priv_ex {
struct team *team;
struct lb_port_mapping tx_hash_to_port_mapping[LB_TX_HASHTABLE_SIZE];
- struct sock_fprog *orig_fprog;
+ struct sock_fprog_kern *orig_fprog;
struct {
unsigned int refresh_interval; /* in tenths of second */
struct delayed_work refresh_dw;
@@ -241,15 +241,15 @@ static int lb_bpf_func_get(struct team *team, struct team_gsetter_ctx *ctx)
return 0;
}
-static int __fprog_create(struct sock_fprog **pfprog, u32 data_len,
+static int __fprog_create(struct sock_fprog_kern **pfprog, u32 data_len,
const void *data)
{
- struct sock_fprog *fprog;
+ struct sock_fprog_kern *fprog;
struct sock_filter *filter = (struct sock_filter *) data;
if (data_len % sizeof(struct sock_filter))
return -EINVAL;
- fprog = kmalloc(sizeof(struct sock_fprog), GFP_KERNEL);
+ fprog = kmalloc(sizeof(*fprog), GFP_KERNEL);
if (!fprog)
return -ENOMEM;
fprog->filter = kmemdup(filter, data_len, GFP_KERNEL);
@@ -262,7 +262,7 @@ static int __fprog_create(struct sock_fprog **pfprog, u32 data_len,
return 0;
}
-static void __fprog_destroy(struct sock_fprog *fprog)
+static void __fprog_destroy(struct sock_fprog_kern *fprog)
{
kfree(fprog->filter);
kfree(fprog);
@@ -273,7 +273,7 @@ static int lb_bpf_func_set(struct team *team, struct team_gsetter_ctx *ctx)
struct lb_priv *lb_priv = get_lb_priv(team);
struct sk_filter *fp = NULL;
struct sk_filter *orig_fp;
- struct sock_fprog *fprog = NULL;
+ struct sock_fprog_kern *fprog = NULL;
int err;
if (ctx->data.bin_val.len) {
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index ee328ba101e7..98bad1fb1bfb 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -498,12 +498,12 @@ static void tun_detach_all(struct net_device *dev)
for (i = 0; i < n; i++) {
tfile = rtnl_dereference(tun->tfiles[i]);
BUG_ON(!tfile);
- wake_up_all(&tfile->wq.wait);
+ tfile->socket.sk->sk_data_ready(tfile->socket.sk);
RCU_INIT_POINTER(tfile->tun, NULL);
--tun->numqueues;
}
list_for_each_entry(tfile, &tun->disabled, next) {
- wake_up_all(&tfile->wq.wait);
+ tfile->socket.sk->sk_data_ready(tfile->socket.sk);
RCU_INIT_POINTER(tfile->tun, NULL);
}
BUG_ON(tun->numqueues != 0);
@@ -807,8 +807,7 @@ static netdev_tx_t tun_net_xmit(struct sk_buff *skb, struct net_device *dev)
/* Notify and wake up reader process */
if (tfile->flags & TUN_FASYNC)
kill_fasync(&tfile->fasync, SIGIO, POLL_IN);
- wake_up_interruptible_poll(&tfile->wq.wait, POLLIN |
- POLLRDNORM | POLLRDBAND);
+ tfile->socket.sk->sk_data_ready(tfile->socket.sk);
rcu_read_unlock();
return NETDEV_TX_OK;
@@ -965,7 +964,7 @@ static unsigned int tun_chr_poll(struct file *file, poll_table *wait)
tun_debug(KERN_INFO, tun, "tun_chr_poll\n");
- poll_wait(file, &tfile->wq.wait, wait);
+ poll_wait(file, sk_sleep(sk), wait);
if (!skb_queue_empty(&sk->sk_receive_queue))
mask |= POLLIN | POLLRDNORM;
@@ -1330,47 +1329,26 @@ done:
static ssize_t tun_do_read(struct tun_struct *tun, struct tun_file *tfile,
const struct iovec *iv, ssize_t len, int noblock)
{
- DECLARE_WAITQUEUE(wait, current);
struct sk_buff *skb;
ssize_t ret = 0;
+ int peeked, err, off = 0;
tun_debug(KERN_INFO, tun, "tun_do_read\n");
- if (unlikely(!noblock))
- add_wait_queue(&tfile->wq.wait, &wait);
- while (len) {
- if (unlikely(!noblock))
- current->state = TASK_INTERRUPTIBLE;
+ if (!len)
+ return ret;
- /* Read frames from the queue */
- if (!(skb = skb_dequeue(&tfile->socket.sk->sk_receive_queue))) {
- if (noblock) {
- ret = -EAGAIN;
- break;
- }
- if (signal_pending(current)) {
- ret = -ERESTARTSYS;
- break;
- }
- if (tun->dev->reg_state != NETREG_REGISTERED) {
- ret = -EIO;
- break;
- }
-
- /* Nothing to read, let's sleep */
- schedule();
- continue;
- }
+ if (tun->dev->reg_state != NETREG_REGISTERED)
+ return -EIO;
+ /* Read frames from queue */
+ skb = __skb_recv_datagram(tfile->socket.sk, noblock ? MSG_DONTWAIT : 0,
+ &peeked, &off, &err);
+ if (skb) {
ret = tun_put_user(tun, tfile, skb, iv, len);
kfree_skb(skb);
- break;
- }
-
- if (unlikely(!noblock)) {
- current->state = TASK_RUNNING;
- remove_wait_queue(&tfile->wq.wait, &wait);
- }
+ } else
+ ret = err;
return ret;
}
@@ -2199,8 +2177,8 @@ static int tun_chr_open(struct inode *inode, struct file * file)
tfile->flags = 0;
tfile->ifindex = 0;
- rcu_assign_pointer(tfile->socket.wq, &tfile->wq);
init_waitqueue_head(&tfile->wq.wait);
+ RCU_INIT_POINTER(tfile->socket.wq, &tfile->wq);
tfile->socket.file = file;
tfile->socket.ops = &tun_socket_ops;
diff --git a/drivers/net/usb/catc.c b/drivers/net/usb/catc.c
index 630caf48f63a..8cfc3bb0c6a6 100644
--- a/drivers/net/usb/catc.c
+++ b/drivers/net/usb/catc.c
@@ -793,7 +793,7 @@ static int catc_probe(struct usb_interface *intf, const struct usb_device_id *id
netdev->netdev_ops = &catc_netdev_ops;
netdev->watchdog_timeo = TX_TIMEOUT;
- SET_ETHTOOL_OPS(netdev, &ops);
+ netdev->ethtool_ops = &ops;
catc->usbdev = usbdev;
catc->netdev = netdev;
diff --git a/drivers/net/usb/cdc_mbim.c b/drivers/net/usb/cdc_mbim.c
index 2e025ddcef21..5ee7a1dbc023 100644
--- a/drivers/net/usb/cdc_mbim.c
+++ b/drivers/net/usb/cdc_mbim.c
@@ -24,13 +24,21 @@
#include <net/ipv6.h>
#include <net/addrconf.h>
+/* alternative VLAN for IP session 0 if not untagged */
+#define MBIM_IPS0_VID 4094
+
/* driver specific data - must match cdc_ncm usage */
struct cdc_mbim_state {
struct cdc_ncm_ctx *ctx;
atomic_t pmcount;
struct usb_driver *subdriver;
- struct usb_interface *control;
- struct usb_interface *data;
+ unsigned long _unused;
+ unsigned long flags;
+};
+
+/* flags for the cdc_mbim_state.flags field */
+enum cdc_mbim_flags {
+ FLAG_IPS0_VLAN = 1 << 0, /* IP session 0 is tagged */
};
/* using a counter to merge subdriver requests with our own into a combined state */
@@ -62,16 +70,91 @@ static int cdc_mbim_wdm_manage_power(struct usb_interface *intf, int status)
return cdc_mbim_manage_power(dev, status);
}
+static int cdc_mbim_rx_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
+{
+ struct usbnet *dev = netdev_priv(netdev);
+ struct cdc_mbim_state *info = (void *)&dev->data;
+
+ /* creation of this VLAN is a request to tag IP session 0 */
+ if (vid == MBIM_IPS0_VID)
+ info->flags |= FLAG_IPS0_VLAN;
+ else
+ if (vid >= 512) /* we don't map these to MBIM session */
+ return -EINVAL;
+ return 0;
+}
+
+static int cdc_mbim_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid)
+{
+ struct usbnet *dev = netdev_priv(netdev);
+ struct cdc_mbim_state *info = (void *)&dev->data;
+
+ /* this is a request for an untagged IP session 0 */
+ if (vid == MBIM_IPS0_VID)
+ info->flags &= ~FLAG_IPS0_VLAN;
+ return 0;
+}
+
+static const struct net_device_ops cdc_mbim_netdev_ops = {
+ .ndo_open = usbnet_open,
+ .ndo_stop = usbnet_stop,
+ .ndo_start_xmit = usbnet_start_xmit,
+ .ndo_tx_timeout = usbnet_tx_timeout,
+ .ndo_change_mtu = usbnet_change_mtu,
+ .ndo_set_mac_address = eth_mac_addr,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_vlan_rx_add_vid = cdc_mbim_rx_add_vid,
+ .ndo_vlan_rx_kill_vid = cdc_mbim_rx_kill_vid,
+};
+
+/* Change the control interface altsetting and update the .driver_info
+ * pointer if the matching entry after changing class codes points to
+ * a different struct
+ */
+static int cdc_mbim_set_ctrlalt(struct usbnet *dev, struct usb_interface *intf, u8 alt)
+{
+ struct usb_driver *driver = to_usb_driver(intf->dev.driver);
+ const struct usb_device_id *id;
+ struct driver_info *info;
+ int ret;
+
+ ret = usb_set_interface(dev->udev,
+ intf->cur_altsetting->desc.bInterfaceNumber,
+ alt);
+ if (ret)
+ return ret;
+
+ id = usb_match_id(intf, driver->id_table);
+ if (!id)
+ return -ENODEV;
+
+ info = (struct driver_info *)id->driver_info;
+ if (info != dev->driver_info) {
+ dev_dbg(&intf->dev, "driver_info updated to '%s'\n",
+ info->description);
+ dev->driver_info = info;
+ }
+ return 0;
+}
static int cdc_mbim_bind(struct usbnet *dev, struct usb_interface *intf)
{
struct cdc_ncm_ctx *ctx;
struct usb_driver *subdriver = ERR_PTR(-ENODEV);
int ret = -ENODEV;
- u8 data_altsetting = cdc_ncm_select_altsetting(dev, intf);
+ u8 data_altsetting = 1;
struct cdc_mbim_state *info = (void *)&dev->data;
- /* Probably NCM, defer for cdc_ncm_bind */
+ /* should we change control altsetting on a NCM/MBIM function? */
+ if (cdc_ncm_select_altsetting(intf) == CDC_NCM_COMM_ALTSETTING_MBIM) {
+ data_altsetting = CDC_NCM_DATA_ALTSETTING_MBIM;
+ ret = cdc_mbim_set_ctrlalt(dev, intf, CDC_NCM_COMM_ALTSETTING_MBIM);
+ if (ret)
+ goto err;
+ ret = -ENODEV;
+ }
+
+ /* we will hit this for NCM/MBIM functions if prefer_mbim is false */
if (!cdc_ncm_comm_intf_is_mbim(intf->cur_altsetting))
goto err;
@@ -101,7 +184,10 @@ static int cdc_mbim_bind(struct usbnet *dev, struct usb_interface *intf)
dev->net->flags |= IFF_NOARP;
/* no need to put the VLAN tci in the packet headers */
- dev->net->features |= NETIF_F_HW_VLAN_CTAG_TX;
+ dev->net->features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_FILTER;
+
+ /* monitor VLAN additions and removals */
+ dev->net->netdev_ops = &cdc_mbim_netdev_ops;
err:
return ret;
}
@@ -164,12 +250,24 @@ static struct sk_buff *cdc_mbim_tx_fixup(struct usbnet *dev, struct sk_buff *skb
skb_pull(skb, ETH_HLEN);
}
+ /* Is IP session <0> tagged too? */
+ if (info->flags & FLAG_IPS0_VLAN) {
+ /* drop all untagged packets */
+ if (!tci)
+ goto error;
+ /* map MBIM_IPS0_VID to IPS<0> */
+ if (tci == MBIM_IPS0_VID)
+ tci = 0;
+ }
+
/* mapping VLANs to MBIM sessions:
- * no tag => IPS session <0>
+ * no tag => IPS session <0> if !FLAG_IPS0_VLAN
* 1 - 255 => IPS session <vlanid>
* 256 - 511 => DSS session <vlanid - 256>
- * 512 - 4095 => unsupported, drop
+ * 512 - 4093 => unsupported, drop
+ * 4094 => IPS session <0> if FLAG_IPS0_VLAN
*/
+
switch (tci & 0x0f00) {
case 0x0000: /* VLAN ID 0 - 255 */
if (!is_ip)
@@ -178,6 +276,8 @@ static struct sk_buff *cdc_mbim_tx_fixup(struct usbnet *dev, struct sk_buff *skb
c[3] = tci;
break;
case 0x0100: /* VLAN ID 256 - 511 */
+ if (is_ip)
+ goto error;
sign = cpu_to_le32(USB_CDC_MBIM_NDP16_DSS_SIGN);
c = (u8 *)&sign;
c[3] = tci;
@@ -223,8 +323,8 @@ static void do_neigh_solicit(struct usbnet *dev, u8 *buf, u16 tci)
/* need to send the NA on the VLAN dev, if any */
rcu_read_lock();
if (tci) {
- netdev = __vlan_find_dev_deep(dev->net, htons(ETH_P_8021Q),
- tci);
+ netdev = __vlan_find_dev_deep_rcu(dev->net, htons(ETH_P_8021Q),
+ tci);
if (!netdev) {
rcu_read_unlock();
return;
@@ -268,7 +368,7 @@ static struct sk_buff *cdc_mbim_process_dgram(struct usbnet *dev, u8 *buf, size_
__be16 proto = htons(ETH_P_802_3);
struct sk_buff *skb = NULL;
- if (tci < 256) { /* IPS session? */
+ if (tci < 256 || tci == MBIM_IPS0_VID) { /* IPS session? */
if (len < sizeof(struct iphdr))
goto err;
@@ -320,6 +420,7 @@ static int cdc_mbim_rx_fixup(struct usbnet *dev, struct sk_buff *skb_in)
struct usb_cdc_ncm_dpe16 *dpe16;
int ndpoffset;
int loopcount = 50; /* arbitrary max preventing infinite loop */
+ u32 payload = 0;
u8 *c;
u16 tci;
@@ -338,6 +439,9 @@ next_ndp:
case cpu_to_le32(USB_CDC_MBIM_NDP16_IPS_SIGN):
c = (u8 *)&ndp16->dwSignature;
tci = c[3];
+ /* tag IPS<0> packets too if MBIM_IPS0_VID exists */
+ if (!tci && info->flags & FLAG_IPS0_VLAN)
+ tci = MBIM_IPS0_VID;
break;
case cpu_to_le32(USB_CDC_MBIM_NDP16_DSS_SIGN):
c = (u8 *)&ndp16->dwSignature;
@@ -379,6 +483,7 @@ next_ndp:
if (!skb)
goto error;
usbnet_skb_return(dev, skb);
+ payload += len; /* count payload bytes in this NTB */
}
}
err_ndp:
@@ -387,6 +492,10 @@ err_ndp:
if (ndpoffset && loopcount--)
goto next_ndp;
+ /* update stats */
+ ctx->rx_overhead += skb_in->len - payload;
+ ctx->rx_ntbs++;
+
return 1;
error:
return 0;
diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c
index 9a2bd11943eb..80a844e0ae03 100644
--- a/drivers/net/usb/cdc_ncm.c
+++ b/drivers/net/usb/cdc_ncm.c
@@ -65,19 +65,384 @@ static void cdc_ncm_tx_timeout_start(struct cdc_ncm_ctx *ctx);
static enum hrtimer_restart cdc_ncm_tx_timer_cb(struct hrtimer *hr_timer);
static struct usb_driver cdc_ncm_driver;
-static int cdc_ncm_setup(struct usbnet *dev)
+struct cdc_ncm_stats {
+ char stat_string[ETH_GSTRING_LEN];
+ int sizeof_stat;
+ int stat_offset;
+};
+
+#define CDC_NCM_STAT(str, m) { \
+ .stat_string = str, \
+ .sizeof_stat = sizeof(((struct cdc_ncm_ctx *)0)->m), \
+ .stat_offset = offsetof(struct cdc_ncm_ctx, m) }
+#define CDC_NCM_SIMPLE_STAT(m) CDC_NCM_STAT(__stringify(m), m)
+
+static const struct cdc_ncm_stats cdc_ncm_gstrings_stats[] = {
+ CDC_NCM_SIMPLE_STAT(tx_reason_ntb_full),
+ CDC_NCM_SIMPLE_STAT(tx_reason_ndp_full),
+ CDC_NCM_SIMPLE_STAT(tx_reason_timeout),
+ CDC_NCM_SIMPLE_STAT(tx_reason_max_datagram),
+ CDC_NCM_SIMPLE_STAT(tx_overhead),
+ CDC_NCM_SIMPLE_STAT(tx_ntbs),
+ CDC_NCM_SIMPLE_STAT(rx_overhead),
+ CDC_NCM_SIMPLE_STAT(rx_ntbs),
+};
+
+static int cdc_ncm_get_sset_count(struct net_device __always_unused *netdev, int sset)
+{
+ switch (sset) {
+ case ETH_SS_STATS:
+ return ARRAY_SIZE(cdc_ncm_gstrings_stats);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static void cdc_ncm_get_ethtool_stats(struct net_device *netdev,
+ struct ethtool_stats __always_unused *stats,
+ u64 *data)
+{
+ struct usbnet *dev = netdev_priv(netdev);
+ struct cdc_ncm_ctx *ctx = (struct cdc_ncm_ctx *)dev->data[0];
+ int i;
+ char *p = NULL;
+
+ for (i = 0; i < ARRAY_SIZE(cdc_ncm_gstrings_stats); i++) {
+ p = (char *)ctx + cdc_ncm_gstrings_stats[i].stat_offset;
+ data[i] = (cdc_ncm_gstrings_stats[i].sizeof_stat == sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
+ }
+}
+
+static void cdc_ncm_get_strings(struct net_device __always_unused *netdev, u32 stringset, u8 *data)
+{
+ u8 *p = data;
+ int i;
+
+ switch (stringset) {
+ case ETH_SS_STATS:
+ for (i = 0; i < ARRAY_SIZE(cdc_ncm_gstrings_stats); i++) {
+ memcpy(p, cdc_ncm_gstrings_stats[i].stat_string, ETH_GSTRING_LEN);
+ p += ETH_GSTRING_LEN;
+ }
+ }
+}
+
+static void cdc_ncm_update_rxtx_max(struct usbnet *dev, u32 new_rx, u32 new_tx);
+
+static const struct ethtool_ops cdc_ncm_ethtool_ops = {
+ .get_settings = usbnet_get_settings,
+ .set_settings = usbnet_set_settings,
+ .get_link = usbnet_get_link,
+ .nway_reset = usbnet_nway_reset,
+ .get_drvinfo = usbnet_get_drvinfo,
+ .get_msglevel = usbnet_get_msglevel,
+ .set_msglevel = usbnet_set_msglevel,
+ .get_ts_info = ethtool_op_get_ts_info,
+ .get_sset_count = cdc_ncm_get_sset_count,
+ .get_strings = cdc_ncm_get_strings,
+ .get_ethtool_stats = cdc_ncm_get_ethtool_stats,
+};
+
+static u32 cdc_ncm_check_rx_max(struct usbnet *dev, u32 new_rx)
+{
+ struct cdc_ncm_ctx *ctx = (struct cdc_ncm_ctx *)dev->data[0];
+ u32 val, max, min;
+
+ /* clamp new_rx to sane values */
+ min = USB_CDC_NCM_NTB_MIN_IN_SIZE;
+ max = min_t(u32, CDC_NCM_NTB_MAX_SIZE_RX, le32_to_cpu(ctx->ncm_parm.dwNtbInMaxSize));
+
+ /* dwNtbInMaxSize spec violation? Use MIN size for both limits */
+ if (max < min) {
+ dev_warn(&dev->intf->dev, "dwNtbInMaxSize=%u is too small. Using %u\n",
+ le32_to_cpu(ctx->ncm_parm.dwNtbInMaxSize), min);
+ max = min;
+ }
+
+ val = clamp_t(u32, new_rx, min, max);
+ if (val != new_rx)
+ dev_dbg(&dev->intf->dev, "rx_max must be in the [%u, %u] range\n", min, max);
+
+ return val;
+}
+
+static u32 cdc_ncm_check_tx_max(struct usbnet *dev, u32 new_tx)
+{
+ struct cdc_ncm_ctx *ctx = (struct cdc_ncm_ctx *)dev->data[0];
+ u32 val, max, min;
+
+ /* clamp new_tx to sane values */
+ min = ctx->max_datagram_size + ctx->max_ndp_size + sizeof(struct usb_cdc_ncm_nth16);
+ max = min_t(u32, CDC_NCM_NTB_MAX_SIZE_TX, le32_to_cpu(ctx->ncm_parm.dwNtbOutMaxSize));
+
+ /* some devices set dwNtbOutMaxSize too low for the above default */
+ min = min(min, max);
+
+ val = clamp_t(u32, new_tx, min, max);
+ if (val != new_tx)
+ dev_dbg(&dev->intf->dev, "tx_max must be in the [%u, %u] range\n", min, max);
+
+ return val;
+}
+
+static ssize_t cdc_ncm_show_min_tx_pkt(struct device *d, struct device_attribute *attr, char *buf)
+{
+ struct usbnet *dev = netdev_priv(to_net_dev(d));
+ struct cdc_ncm_ctx *ctx = (struct cdc_ncm_ctx *)dev->data[0];
+
+ return sprintf(buf, "%u\n", ctx->min_tx_pkt);
+}
+
+static ssize_t cdc_ncm_show_rx_max(struct device *d, struct device_attribute *attr, char *buf)
+{
+ struct usbnet *dev = netdev_priv(to_net_dev(d));
+ struct cdc_ncm_ctx *ctx = (struct cdc_ncm_ctx *)dev->data[0];
+
+ return sprintf(buf, "%u\n", ctx->rx_max);
+}
+
+static ssize_t cdc_ncm_show_tx_max(struct device *d, struct device_attribute *attr, char *buf)
+{
+ struct usbnet *dev = netdev_priv(to_net_dev(d));
+ struct cdc_ncm_ctx *ctx = (struct cdc_ncm_ctx *)dev->data[0];
+
+ return sprintf(buf, "%u\n", ctx->tx_max);
+}
+
+static ssize_t cdc_ncm_show_tx_timer_usecs(struct device *d, struct device_attribute *attr, char *buf)
+{
+ struct usbnet *dev = netdev_priv(to_net_dev(d));
+ struct cdc_ncm_ctx *ctx = (struct cdc_ncm_ctx *)dev->data[0];
+
+ return sprintf(buf, "%u\n", ctx->timer_interval / (u32)NSEC_PER_USEC);
+}
+
+static ssize_t cdc_ncm_store_min_tx_pkt(struct device *d, struct device_attribute *attr, const char *buf, size_t len)
+{
+ struct usbnet *dev = netdev_priv(to_net_dev(d));
+ struct cdc_ncm_ctx *ctx = (struct cdc_ncm_ctx *)dev->data[0];
+ unsigned long val;
+
+ /* no need to restrict values - anything from 0 to infinity is OK */
+ if (kstrtoul(buf, 0, &val))
+ return -EINVAL;
+
+ ctx->min_tx_pkt = val;
+ return len;
+}
+
+static ssize_t cdc_ncm_store_rx_max(struct device *d, struct device_attribute *attr, const char *buf, size_t len)
+{
+ struct usbnet *dev = netdev_priv(to_net_dev(d));
+ struct cdc_ncm_ctx *ctx = (struct cdc_ncm_ctx *)dev->data[0];
+ unsigned long val;
+
+ if (kstrtoul(buf, 0, &val) || cdc_ncm_check_rx_max(dev, val) != val)
+ return -EINVAL;
+
+ cdc_ncm_update_rxtx_max(dev, val, ctx->tx_max);
+ return len;
+}
+
+static ssize_t cdc_ncm_store_tx_max(struct device *d, struct device_attribute *attr, const char *buf, size_t len)
{
+ struct usbnet *dev = netdev_priv(to_net_dev(d));
struct cdc_ncm_ctx *ctx = (struct cdc_ncm_ctx *)dev->data[0];
+ unsigned long val;
+
+ if (kstrtoul(buf, 0, &val) || cdc_ncm_check_tx_max(dev, val) != val)
+ return -EINVAL;
+
+ cdc_ncm_update_rxtx_max(dev, ctx->rx_max, val);
+ return len;
+}
+
+static ssize_t cdc_ncm_store_tx_timer_usecs(struct device *d, struct device_attribute *attr, const char *buf, size_t len)
+{
+ struct usbnet *dev = netdev_priv(to_net_dev(d));
+ struct cdc_ncm_ctx *ctx = (struct cdc_ncm_ctx *)dev->data[0];
+ ssize_t ret;
+ unsigned long val;
+
+ ret = kstrtoul(buf, 0, &val);
+ if (ret)
+ return ret;
+ if (val && (val < CDC_NCM_TIMER_INTERVAL_MIN || val > CDC_NCM_TIMER_INTERVAL_MAX))
+ return -EINVAL;
+
+ spin_lock_bh(&ctx->mtx);
+ ctx->timer_interval = val * NSEC_PER_USEC;
+ if (!ctx->timer_interval)
+ ctx->tx_timer_pending = 0;
+ spin_unlock_bh(&ctx->mtx);
+ return len;
+}
+
+static DEVICE_ATTR(min_tx_pkt, S_IRUGO | S_IWUSR, cdc_ncm_show_min_tx_pkt, cdc_ncm_store_min_tx_pkt);
+static DEVICE_ATTR(rx_max, S_IRUGO | S_IWUSR, cdc_ncm_show_rx_max, cdc_ncm_store_rx_max);
+static DEVICE_ATTR(tx_max, S_IRUGO | S_IWUSR, cdc_ncm_show_tx_max, cdc_ncm_store_tx_max);
+static DEVICE_ATTR(tx_timer_usecs, S_IRUGO | S_IWUSR, cdc_ncm_show_tx_timer_usecs, cdc_ncm_store_tx_timer_usecs);
+
+#define NCM_PARM_ATTR(name, format, tocpu) \
+static ssize_t cdc_ncm_show_##name(struct device *d, struct device_attribute *attr, char *buf) \
+{ \
+ struct usbnet *dev = netdev_priv(to_net_dev(d)); \
+ struct cdc_ncm_ctx *ctx = (struct cdc_ncm_ctx *)dev->data[0]; \
+ return sprintf(buf, format "\n", tocpu(ctx->ncm_parm.name)); \
+} \
+static DEVICE_ATTR(name, S_IRUGO, cdc_ncm_show_##name, NULL)
+
+NCM_PARM_ATTR(bmNtbFormatsSupported, "0x%04x", le16_to_cpu);
+NCM_PARM_ATTR(dwNtbInMaxSize, "%u", le32_to_cpu);
+NCM_PARM_ATTR(wNdpInDivisor, "%u", le16_to_cpu);
+NCM_PARM_ATTR(wNdpInPayloadRemainder, "%u", le16_to_cpu);
+NCM_PARM_ATTR(wNdpInAlignment, "%u", le16_to_cpu);
+NCM_PARM_ATTR(dwNtbOutMaxSize, "%u", le32_to_cpu);
+NCM_PARM_ATTR(wNdpOutDivisor, "%u", le16_to_cpu);
+NCM_PARM_ATTR(wNdpOutPayloadRemainder, "%u", le16_to_cpu);
+NCM_PARM_ATTR(wNdpOutAlignment, "%u", le16_to_cpu);
+NCM_PARM_ATTR(wNtbOutMaxDatagrams, "%u", le16_to_cpu);
+
+static struct attribute *cdc_ncm_sysfs_attrs[] = {
+ &dev_attr_min_tx_pkt.attr,
+ &dev_attr_rx_max.attr,
+ &dev_attr_tx_max.attr,
+ &dev_attr_tx_timer_usecs.attr,
+ &dev_attr_bmNtbFormatsSupported.attr,
+ &dev_attr_dwNtbInMaxSize.attr,
+ &dev_attr_wNdpInDivisor.attr,
+ &dev_attr_wNdpInPayloadRemainder.attr,
+ &dev_attr_wNdpInAlignment.attr,
+ &dev_attr_dwNtbOutMaxSize.attr,
+ &dev_attr_wNdpOutDivisor.attr,
+ &dev_attr_wNdpOutPayloadRemainder.attr,
+ &dev_attr_wNdpOutAlignment.attr,
+ &dev_attr_wNtbOutMaxDatagrams.attr,
+ NULL,
+};
+
+static struct attribute_group cdc_ncm_sysfs_attr_group = {
+ .name = "cdc_ncm",
+ .attrs = cdc_ncm_sysfs_attrs,
+};
+
+/* handle rx_max and tx_max changes */
+static void cdc_ncm_update_rxtx_max(struct usbnet *dev, u32 new_rx, u32 new_tx)
+{
+ struct cdc_ncm_ctx *ctx = (struct cdc_ncm_ctx *)dev->data[0];
+ u8 iface_no = ctx->control->cur_altsetting->desc.bInterfaceNumber;
u32 val;
- u8 flags;
- u8 iface_no;
- int err;
- int eth_hlen;
- u16 mbim_mtu;
- u16 ntb_fmt_supported;
- __le16 max_datagram_size;
- iface_no = ctx->control->cur_altsetting->desc.bInterfaceNumber;
+ val = cdc_ncm_check_rx_max(dev, new_rx);
+
+ /* inform device about NTB input size changes */
+ if (val != ctx->rx_max) {
+ __le32 dwNtbInMaxSize = cpu_to_le32(val);
+
+ dev_info(&dev->intf->dev, "setting rx_max = %u\n", val);
+
+ /* tell device to use new size */
+ if (usbnet_write_cmd(dev, USB_CDC_SET_NTB_INPUT_SIZE,
+ USB_TYPE_CLASS | USB_DIR_OUT
+ | USB_RECIP_INTERFACE,
+ 0, iface_no, &dwNtbInMaxSize, 4) < 0)
+ dev_dbg(&dev->intf->dev, "Setting NTB Input Size failed\n");
+ else
+ ctx->rx_max = val;
+ }
+
+ /* usbnet use these values for sizing rx queues */
+ if (dev->rx_urb_size != ctx->rx_max) {
+ dev->rx_urb_size = ctx->rx_max;
+ if (netif_running(dev->net))
+ usbnet_unlink_rx_urbs(dev);
+ }
+
+ val = cdc_ncm_check_tx_max(dev, new_tx);
+ if (val != ctx->tx_max)
+ dev_info(&dev->intf->dev, "setting tx_max = %u\n", val);
+
+ /* Adding a pad byte here if necessary simplifies the handling
+ * in cdc_ncm_fill_tx_frame, making tx_max always represent
+ * the real skb max size.
+ *
+ * We cannot use dev->maxpacket here because this is called from
+ * .bind which is called before usbnet sets up dev->maxpacket
+ */
+ if (val != le32_to_cpu(ctx->ncm_parm.dwNtbOutMaxSize) &&
+ val % usb_maxpacket(dev->udev, dev->out, 1) == 0)
+ val++;
+
+ /* we might need to flush any pending tx buffers if running */
+ if (netif_running(dev->net) && val > ctx->tx_max) {
+ netif_tx_lock_bh(dev->net);
+ usbnet_start_xmit(NULL, dev->net);
+ /* make sure tx_curr_skb is reallocated if it was empty */
+ if (ctx->tx_curr_skb) {
+ dev_kfree_skb_any(ctx->tx_curr_skb);
+ ctx->tx_curr_skb = NULL;
+ }
+ ctx->tx_max = val;
+ netif_tx_unlock_bh(dev->net);
+ } else {
+ ctx->tx_max = val;
+ }
+
+ dev->hard_mtu = ctx->tx_max;
+
+ /* max qlen depend on hard_mtu and rx_urb_size */
+ usbnet_update_max_qlen(dev);
+
+ /* never pad more than 3 full USB packets per transfer */
+ ctx->min_tx_pkt = clamp_t(u16, ctx->tx_max - 3 * usb_maxpacket(dev->udev, dev->out, 1),
+ CDC_NCM_MIN_TX_PKT, ctx->tx_max);
+}
+
+/* helpers for NCM and MBIM differences */
+static u8 cdc_ncm_flags(struct usbnet *dev)
+{
+ struct cdc_ncm_ctx *ctx = (struct cdc_ncm_ctx *)dev->data[0];
+
+ if (cdc_ncm_comm_intf_is_mbim(dev->intf->cur_altsetting) && ctx->mbim_desc)
+ return ctx->mbim_desc->bmNetworkCapabilities;
+ if (ctx->func_desc)
+ return ctx->func_desc->bmNetworkCapabilities;
+ return 0;
+}
+
+static int cdc_ncm_eth_hlen(struct usbnet *dev)
+{
+ if (cdc_ncm_comm_intf_is_mbim(dev->intf->cur_altsetting))
+ return 0;
+ return ETH_HLEN;
+}
+
+static u32 cdc_ncm_min_dgram_size(struct usbnet *dev)
+{
+ if (cdc_ncm_comm_intf_is_mbim(dev->intf->cur_altsetting))
+ return CDC_MBIM_MIN_DATAGRAM_SIZE;
+ return CDC_NCM_MIN_DATAGRAM_SIZE;
+}
+
+static u32 cdc_ncm_max_dgram_size(struct usbnet *dev)
+{
+ struct cdc_ncm_ctx *ctx = (struct cdc_ncm_ctx *)dev->data[0];
+
+ if (cdc_ncm_comm_intf_is_mbim(dev->intf->cur_altsetting) && ctx->mbim_desc)
+ return le16_to_cpu(ctx->mbim_desc->wMaxSegmentSize);
+ if (ctx->ether_desc)
+ return le16_to_cpu(ctx->ether_desc->wMaxSegmentSize);
+ return CDC_NCM_MAX_DATAGRAM_SIZE;
+}
+
+/* initial one-time device setup. MUST be called with the data interface
+ * in altsetting 0
+ */
+static int cdc_ncm_init(struct usbnet *dev)
+{
+ struct cdc_ncm_ctx *ctx = (struct cdc_ncm_ctx *)dev->data[0];
+ u8 iface_no = ctx->control->cur_altsetting->desc.bInterfaceNumber;
+ int err;
err = usbnet_read_cmd(dev, USB_CDC_GET_NTB_PARAMETERS,
USB_TYPE_CLASS | USB_DIR_IN
@@ -89,7 +454,36 @@ static int cdc_ncm_setup(struct usbnet *dev)
return err; /* GET_NTB_PARAMETERS is required */
}
- /* read correct set of parameters according to device mode */
+ /* set CRC Mode */
+ if (cdc_ncm_flags(dev) & USB_CDC_NCM_NCAP_CRC_MODE) {
+ dev_dbg(&dev->intf->dev, "Setting CRC mode off\n");
+ err = usbnet_write_cmd(dev, USB_CDC_SET_CRC_MODE,
+ USB_TYPE_CLASS | USB_DIR_OUT
+ | USB_RECIP_INTERFACE,
+ USB_CDC_NCM_CRC_NOT_APPENDED,
+ iface_no, NULL, 0);
+ if (err < 0)
+ dev_err(&dev->intf->dev, "SET_CRC_MODE failed\n");
+ }
+
+ /* set NTB format, if both formats are supported.
+ *
+ * "The host shall only send this command while the NCM Data
+ * Interface is in alternate setting 0."
+ */
+ if (le16_to_cpu(ctx->ncm_parm.bmNtbFormatsSupported) &
+ USB_CDC_NCM_NTB32_SUPPORTED) {
+ dev_dbg(&dev->intf->dev, "Setting NTB format to 16-bit\n");
+ err = usbnet_write_cmd(dev, USB_CDC_SET_NTB_FORMAT,
+ USB_TYPE_CLASS | USB_DIR_OUT
+ | USB_RECIP_INTERFACE,
+ USB_CDC_NCM_NTB16_FORMAT,
+ iface_no, NULL, 0);
+ if (err < 0)
+ dev_err(&dev->intf->dev, "SET_NTB_FORMAT failed\n");
+ }
+
+ /* set initial device values */
ctx->rx_max = le32_to_cpu(ctx->ncm_parm.dwNtbInMaxSize);
ctx->tx_max = le32_to_cpu(ctx->ncm_parm.dwNtbOutMaxSize);
ctx->tx_remainder = le16_to_cpu(ctx->ncm_parm.wNdpOutPayloadRemainder);
@@ -97,72 +491,79 @@ static int cdc_ncm_setup(struct usbnet *dev)
ctx->tx_ndp_modulus = le16_to_cpu(ctx->ncm_parm.wNdpOutAlignment);
/* devices prior to NCM Errata shall set this field to zero */
ctx->tx_max_datagrams = le16_to_cpu(ctx->ncm_parm.wNtbOutMaxDatagrams);
- ntb_fmt_supported = le16_to_cpu(ctx->ncm_parm.bmNtbFormatsSupported);
-
- /* there are some minor differences in NCM and MBIM defaults */
- if (cdc_ncm_comm_intf_is_mbim(ctx->control->cur_altsetting)) {
- if (!ctx->mbim_desc)
- return -EINVAL;
- eth_hlen = 0;
- flags = ctx->mbim_desc->bmNetworkCapabilities;
- ctx->max_datagram_size = le16_to_cpu(ctx->mbim_desc->wMaxSegmentSize);
- if (ctx->max_datagram_size < CDC_MBIM_MIN_DATAGRAM_SIZE)
- ctx->max_datagram_size = CDC_MBIM_MIN_DATAGRAM_SIZE;
- } else {
- if (!ctx->func_desc)
- return -EINVAL;
- eth_hlen = ETH_HLEN;
- flags = ctx->func_desc->bmNetworkCapabilities;
- ctx->max_datagram_size = le16_to_cpu(ctx->ether_desc->wMaxSegmentSize);
- if (ctx->max_datagram_size < CDC_NCM_MIN_DATAGRAM_SIZE)
- ctx->max_datagram_size = CDC_NCM_MIN_DATAGRAM_SIZE;
- }
-
- /* common absolute max for NCM and MBIM */
- if (ctx->max_datagram_size > CDC_NCM_MAX_DATAGRAM_SIZE)
- ctx->max_datagram_size = CDC_NCM_MAX_DATAGRAM_SIZE;
dev_dbg(&dev->intf->dev,
"dwNtbInMaxSize=%u dwNtbOutMaxSize=%u wNdpOutPayloadRemainder=%u wNdpOutDivisor=%u wNdpOutAlignment=%u wNtbOutMaxDatagrams=%u flags=0x%x\n",
ctx->rx_max, ctx->tx_max, ctx->tx_remainder, ctx->tx_modulus,
- ctx->tx_ndp_modulus, ctx->tx_max_datagrams, flags);
+ ctx->tx_ndp_modulus, ctx->tx_max_datagrams, cdc_ncm_flags(dev));
/* max count of tx datagrams */
if ((ctx->tx_max_datagrams == 0) ||
(ctx->tx_max_datagrams > CDC_NCM_DPT_DATAGRAMS_MAX))
ctx->tx_max_datagrams = CDC_NCM_DPT_DATAGRAMS_MAX;
- /* verify maximum size of received NTB in bytes */
- if (ctx->rx_max < USB_CDC_NCM_NTB_MIN_IN_SIZE) {
- dev_dbg(&dev->intf->dev, "Using min receive length=%d\n",
- USB_CDC_NCM_NTB_MIN_IN_SIZE);
- ctx->rx_max = USB_CDC_NCM_NTB_MIN_IN_SIZE;
- }
+ /* set up maximum NDP size */
+ ctx->max_ndp_size = sizeof(struct usb_cdc_ncm_ndp16) + (ctx->tx_max_datagrams + 1) * sizeof(struct usb_cdc_ncm_dpe16);
- if (ctx->rx_max > CDC_NCM_NTB_MAX_SIZE_RX) {
- dev_dbg(&dev->intf->dev, "Using default maximum receive length=%d\n",
- CDC_NCM_NTB_MAX_SIZE_RX);
- ctx->rx_max = CDC_NCM_NTB_MAX_SIZE_RX;
- }
+ /* initial coalescing timer interval */
+ ctx->timer_interval = CDC_NCM_TIMER_INTERVAL_USEC * NSEC_PER_USEC;
- /* inform device about NTB input size changes */
- if (ctx->rx_max != le32_to_cpu(ctx->ncm_parm.dwNtbInMaxSize)) {
- __le32 dwNtbInMaxSize = cpu_to_le32(ctx->rx_max);
+ return 0;
+}
- err = usbnet_write_cmd(dev, USB_CDC_SET_NTB_INPUT_SIZE,
- USB_TYPE_CLASS | USB_DIR_OUT
- | USB_RECIP_INTERFACE,
- 0, iface_no, &dwNtbInMaxSize, 4);
- if (err < 0)
- dev_dbg(&dev->intf->dev, "Setting NTB Input Size failed\n");
+/* set a new max datagram size */
+static void cdc_ncm_set_dgram_size(struct usbnet *dev, int new_size)
+{
+ struct cdc_ncm_ctx *ctx = (struct cdc_ncm_ctx *)dev->data[0];
+ u8 iface_no = ctx->control->cur_altsetting->desc.bInterfaceNumber;
+ __le16 max_datagram_size;
+ u16 mbim_mtu;
+ int err;
+
+ /* set default based on descriptors */
+ ctx->max_datagram_size = clamp_t(u32, new_size,
+ cdc_ncm_min_dgram_size(dev),
+ CDC_NCM_MAX_DATAGRAM_SIZE);
+
+ /* inform the device about the selected Max Datagram Size? */
+ if (!(cdc_ncm_flags(dev) & USB_CDC_NCM_NCAP_MAX_DATAGRAM_SIZE))
+ goto out;
+
+ /* read current mtu value from device */
+ err = usbnet_read_cmd(dev, USB_CDC_GET_MAX_DATAGRAM_SIZE,
+ USB_TYPE_CLASS | USB_DIR_IN | USB_RECIP_INTERFACE,
+ 0, iface_no, &max_datagram_size, 2);
+ if (err < 0) {
+ dev_dbg(&dev->intf->dev, "GET_MAX_DATAGRAM_SIZE failed\n");
+ goto out;
}
- /* verify maximum size of transmitted NTB in bytes */
- if (ctx->tx_max > CDC_NCM_NTB_MAX_SIZE_TX) {
- dev_dbg(&dev->intf->dev, "Using default maximum transmit length=%d\n",
- CDC_NCM_NTB_MAX_SIZE_TX);
- ctx->tx_max = CDC_NCM_NTB_MAX_SIZE_TX;
+ if (le16_to_cpu(max_datagram_size) == ctx->max_datagram_size)
+ goto out;
+
+ max_datagram_size = cpu_to_le16(ctx->max_datagram_size);
+ err = usbnet_write_cmd(dev, USB_CDC_SET_MAX_DATAGRAM_SIZE,
+ USB_TYPE_CLASS | USB_DIR_OUT | USB_RECIP_INTERFACE,
+ 0, iface_no, &max_datagram_size, 2);
+ if (err < 0)
+ dev_dbg(&dev->intf->dev, "SET_MAX_DATAGRAM_SIZE failed\n");
+
+out:
+ /* set MTU to max supported by the device if necessary */
+ dev->net->mtu = min_t(int, dev->net->mtu, ctx->max_datagram_size - cdc_ncm_eth_hlen(dev));
+
+ /* do not exceed operater preferred MTU */
+ if (ctx->mbim_extended_desc) {
+ mbim_mtu = le16_to_cpu(ctx->mbim_extended_desc->wMTU);
+ if (mbim_mtu != 0 && mbim_mtu < dev->net->mtu)
+ dev->net->mtu = mbim_mtu;
}
+}
+
+static void cdc_ncm_fix_modulus(struct usbnet *dev)
+{
+ struct cdc_ncm_ctx *ctx = (struct cdc_ncm_ctx *)dev->data[0];
+ u32 val;
/*
* verify that the structure alignment is:
@@ -199,68 +600,31 @@ static int cdc_ncm_setup(struct usbnet *dev)
}
/* adjust TX-remainder according to NCM specification. */
- ctx->tx_remainder = ((ctx->tx_remainder - eth_hlen) &
+ ctx->tx_remainder = ((ctx->tx_remainder - cdc_ncm_eth_hlen(dev)) &
(ctx->tx_modulus - 1));
+}
- /* additional configuration */
-
- /* set CRC Mode */
- if (flags & USB_CDC_NCM_NCAP_CRC_MODE) {
- err = usbnet_write_cmd(dev, USB_CDC_SET_CRC_MODE,
- USB_TYPE_CLASS | USB_DIR_OUT
- | USB_RECIP_INTERFACE,
- USB_CDC_NCM_CRC_NOT_APPENDED,
- iface_no, NULL, 0);
- if (err < 0)
- dev_dbg(&dev->intf->dev, "Setting CRC mode off failed\n");
- }
-
- /* set NTB format, if both formats are supported */
- if (ntb_fmt_supported & USB_CDC_NCM_NTH32_SIGN) {
- err = usbnet_write_cmd(dev, USB_CDC_SET_NTB_FORMAT,
- USB_TYPE_CLASS | USB_DIR_OUT
- | USB_RECIP_INTERFACE,
- USB_CDC_NCM_NTB16_FORMAT,
- iface_no, NULL, 0);
- if (err < 0)
- dev_dbg(&dev->intf->dev, "Setting NTB format to 16-bit failed\n");
- }
-
- /* inform the device about the selected Max Datagram Size */
- if (!(flags & USB_CDC_NCM_NCAP_MAX_DATAGRAM_SIZE))
- goto out;
-
- /* read current mtu value from device */
- err = usbnet_read_cmd(dev, USB_CDC_GET_MAX_DATAGRAM_SIZE,
- USB_TYPE_CLASS | USB_DIR_IN | USB_RECIP_INTERFACE,
- 0, iface_no, &max_datagram_size, 2);
- if (err < 0) {
- dev_dbg(&dev->intf->dev, "GET_MAX_DATAGRAM_SIZE failed\n");
- goto out;
- }
-
- if (le16_to_cpu(max_datagram_size) == ctx->max_datagram_size)
- goto out;
+static int cdc_ncm_setup(struct usbnet *dev)
+{
+ struct cdc_ncm_ctx *ctx = (struct cdc_ncm_ctx *)dev->data[0];
+ u32 def_rx, def_tx;
- max_datagram_size = cpu_to_le16(ctx->max_datagram_size);
- err = usbnet_write_cmd(dev, USB_CDC_SET_MAX_DATAGRAM_SIZE,
- USB_TYPE_CLASS | USB_DIR_OUT | USB_RECIP_INTERFACE,
- 0, iface_no, &max_datagram_size, 2);
- if (err < 0)
- dev_dbg(&dev->intf->dev, "SET_MAX_DATAGRAM_SIZE failed\n");
+ /* be conservative when selecting intial buffer size to
+ * increase the number of hosts this will work for
+ */
+ def_rx = min_t(u32, CDC_NCM_NTB_DEF_SIZE_RX,
+ le32_to_cpu(ctx->ncm_parm.dwNtbInMaxSize));
+ def_tx = min_t(u32, CDC_NCM_NTB_DEF_SIZE_TX,
+ le32_to_cpu(ctx->ncm_parm.dwNtbOutMaxSize));
-out:
- /* set MTU to max supported by the device if necessary */
- if (dev->net->mtu > ctx->max_datagram_size - eth_hlen)
- dev->net->mtu = ctx->max_datagram_size - eth_hlen;
+ /* clamp rx_max and tx_max and inform device */
+ cdc_ncm_update_rxtx_max(dev, def_rx, def_tx);
- /* do not exceed operater preferred MTU */
- if (ctx->mbim_extended_desc) {
- mbim_mtu = le16_to_cpu(ctx->mbim_extended_desc->wMTU);
- if (mbim_mtu != 0 && mbim_mtu < dev->net->mtu)
- dev->net->mtu = mbim_mtu;
- }
+ /* sanitize the modulus and remainder values */
+ cdc_ncm_fix_modulus(dev);
+ /* set max datagram size */
+ cdc_ncm_set_dgram_size(dev, cdc_ncm_max_dgram_size(dev));
return 0;
}
@@ -424,10 +788,21 @@ advance:
}
/* check if we got everything */
- if (!ctx->data || (!ctx->mbim_desc && !ctx->ether_desc)) {
- dev_dbg(&intf->dev, "CDC descriptors missing\n");
+ if (!ctx->data) {
+ dev_dbg(&intf->dev, "CDC Union missing and no IAD found\n");
goto error;
}
+ if (cdc_ncm_comm_intf_is_mbim(intf->cur_altsetting)) {
+ if (!ctx->mbim_desc) {
+ dev_dbg(&intf->dev, "MBIM functional descriptor missing\n");
+ goto error;
+ }
+ } else {
+ if (!ctx->ether_desc || !ctx->func_desc) {
+ dev_dbg(&intf->dev, "NCM or ECM functional descriptors missing\n");
+ goto error;
+ }
+ }
/* claim data interface, if different from control */
if (ctx->data != ctx->control) {
@@ -447,8 +822,8 @@ advance:
goto error2;
}
- /* initialize data interface */
- if (cdc_ncm_setup(dev))
+ /* initialize basic device settings */
+ if (cdc_ncm_init(dev))
goto error2;
/* configure data interface */
@@ -477,18 +852,14 @@ advance:
dev_info(&intf->dev, "MAC-Address: %pM\n", dev->net->dev_addr);
}
- /* usbnet use these values for sizing tx/rx queues */
- dev->hard_mtu = ctx->tx_max;
- dev->rx_urb_size = ctx->rx_max;
+ /* finish setting up the device specific data */
+ cdc_ncm_setup(dev);
- /* cdc_ncm_setup will override dwNtbOutMaxSize if it is
- * outside the sane range. Adding a pad byte here if necessary
- * simplifies the handling in cdc_ncm_fill_tx_frame, making
- * tx_max always represent the real skb max size.
- */
- if (ctx->tx_max != le32_to_cpu(ctx->ncm_parm.dwNtbOutMaxSize) &&
- ctx->tx_max % usb_maxpacket(dev->udev, dev->out, 1) == 0)
- ctx->tx_max++;
+ /* override ethtool_ops */
+ dev->net->ethtool_ops = &cdc_ncm_ethtool_ops;
+
+ /* add our sysfs attrs */
+ dev->net->sysfs_groups[0] = &cdc_ncm_sysfs_attr_group;
return 0;
@@ -541,10 +912,10 @@ void cdc_ncm_unbind(struct usbnet *dev, struct usb_interface *intf)
}
EXPORT_SYMBOL_GPL(cdc_ncm_unbind);
-/* Select the MBIM altsetting iff it is preferred and available,
- * returning the number of the corresponding data interface altsetting
+/* Return the number of the MBIM control interface altsetting iff it
+ * is preferred and available,
*/
-u8 cdc_ncm_select_altsetting(struct usbnet *dev, struct usb_interface *intf)
+u8 cdc_ncm_select_altsetting(struct usb_interface *intf)
{
struct usb_host_interface *alt;
@@ -563,15 +934,15 @@ u8 cdc_ncm_select_altsetting(struct usbnet *dev, struct usb_interface *intf)
* the rules given in section 6 (USB Device Model) of this
* specification."
*/
- if (prefer_mbim && intf->num_altsetting == 2) {
+ if (intf->num_altsetting < 2)
+ return intf->cur_altsetting->desc.bAlternateSetting;
+
+ if (prefer_mbim) {
alt = usb_altnum_to_altsetting(intf, CDC_NCM_COMM_ALTSETTING_MBIM);
- if (alt && cdc_ncm_comm_intf_is_mbim(alt) &&
- !usb_set_interface(dev->udev,
- intf->cur_altsetting->desc.bInterfaceNumber,
- CDC_NCM_COMM_ALTSETTING_MBIM))
- return CDC_NCM_DATA_ALTSETTING_MBIM;
+ if (alt && cdc_ncm_comm_intf_is_mbim(alt))
+ return CDC_NCM_COMM_ALTSETTING_MBIM;
}
- return CDC_NCM_DATA_ALTSETTING_NCM;
+ return CDC_NCM_COMM_ALTSETTING_NCM;
}
EXPORT_SYMBOL_GPL(cdc_ncm_select_altsetting);
@@ -580,12 +951,11 @@ static int cdc_ncm_bind(struct usbnet *dev, struct usb_interface *intf)
int ret;
/* MBIM backwards compatible function? */
- cdc_ncm_select_altsetting(dev, intf);
- if (cdc_ncm_comm_intf_is_mbim(intf->cur_altsetting))
+ if (cdc_ncm_select_altsetting(intf) != CDC_NCM_COMM_ALTSETTING_NCM)
return -ENODEV;
- /* NCM data altsetting is always 1 */
- ret = cdc_ncm_bind_common(dev, intf, 1);
+ /* The NCM data altsetting is fixed */
+ ret = cdc_ncm_bind_common(dev, intf, CDC_NCM_DATA_ALTSETTING_NCM);
/*
* We should get an event when network connection is "connected" or
@@ -628,7 +998,7 @@ static struct usb_cdc_ncm_ndp16 *cdc_ncm_ndp(struct cdc_ncm_ctx *ctx, struct sk_
cdc_ncm_align_tail(skb, ctx->tx_ndp_modulus, 0, ctx->tx_max);
/* verify that there is room for the NDP and the datagram (reserve) */
- if ((ctx->tx_max - skb->len - reserve) < CDC_NCM_NDP_SIZE)
+ if ((ctx->tx_max - skb->len - reserve) < ctx->max_ndp_size)
return NULL;
/* link to it */
@@ -638,7 +1008,7 @@ static struct usb_cdc_ncm_ndp16 *cdc_ncm_ndp(struct cdc_ncm_ctx *ctx, struct sk_
nth16->wNdpIndex = cpu_to_le16(skb->len);
/* push a new empty NDP */
- ndp16 = (struct usb_cdc_ncm_ndp16 *)memset(skb_put(skb, CDC_NCM_NDP_SIZE), 0, CDC_NCM_NDP_SIZE);
+ ndp16 = (struct usb_cdc_ncm_ndp16 *)memset(skb_put(skb, ctx->max_ndp_size), 0, ctx->max_ndp_size);
ndp16->dwSignature = sign;
ndp16->wLength = cpu_to_le16(sizeof(struct usb_cdc_ncm_ndp16) + sizeof(struct usb_cdc_ncm_dpe16));
return ndp16;
@@ -683,6 +1053,9 @@ cdc_ncm_fill_tx_frame(struct usbnet *dev, struct sk_buff *skb, __le32 sign)
/* count total number of frames in this NTB */
ctx->tx_curr_frame_num = 0;
+
+ /* recent payload counter for this skb_out */
+ ctx->tx_curr_frame_payload = 0;
}
for (n = ctx->tx_curr_frame_num; n < ctx->tx_max_datagrams; n++) {
@@ -720,6 +1093,7 @@ cdc_ncm_fill_tx_frame(struct usbnet *dev, struct sk_buff *skb, __le32 sign)
ctx->tx_rem_sign = sign;
skb = NULL;
ready2send = 1;
+ ctx->tx_reason_ntb_full++; /* count reason for transmitting */
}
break;
}
@@ -733,12 +1107,14 @@ cdc_ncm_fill_tx_frame(struct usbnet *dev, struct sk_buff *skb, __le32 sign)
ndp16->dpe16[index].wDatagramIndex = cpu_to_le16(skb_out->len);
ndp16->wLength = cpu_to_le16(ndplen + sizeof(struct usb_cdc_ncm_dpe16));
memcpy(skb_put(skb_out, skb->len), skb->data, skb->len);
+ ctx->tx_curr_frame_payload += skb->len; /* count real tx payload data */
dev_kfree_skb_any(skb);
skb = NULL;
/* send now if this NDP is full */
if (index >= CDC_NCM_DPT_DATAGRAMS_MAX) {
ready2send = 1;
+ ctx->tx_reason_ndp_full++; /* count reason for transmitting */
break;
}
}
@@ -758,7 +1134,7 @@ cdc_ncm_fill_tx_frame(struct usbnet *dev, struct sk_buff *skb, __le32 sign)
ctx->tx_curr_skb = skb_out;
goto exit_no_skb;
- } else if ((n < ctx->tx_max_datagrams) && (ready2send == 0)) {
+ } else if ((n < ctx->tx_max_datagrams) && (ready2send == 0) && (ctx->timer_interval > 0)) {
/* wait for more frames */
/* push variables */
ctx->tx_curr_skb = skb_out;
@@ -768,11 +1144,13 @@ cdc_ncm_fill_tx_frame(struct usbnet *dev, struct sk_buff *skb, __le32 sign)
goto exit_no_skb;
} else {
+ if (n == ctx->tx_max_datagrams)
+ ctx->tx_reason_max_datagram++; /* count reason for transmitting */
/* frame goes out */
/* variables will be reset at next call */
}
- /* If collected data size is less or equal CDC_NCM_MIN_TX_PKT
+ /* If collected data size is less or equal ctx->min_tx_pkt
* bytes, we send buffers as it is. If we get more data, it
* would be more efficient for USB HS mobile device with DMA
* engine to receive a full size NTB, than canceling DMA
@@ -782,7 +1160,7 @@ cdc_ncm_fill_tx_frame(struct usbnet *dev, struct sk_buff *skb, __le32 sign)
* a ZLP after full sized NTBs.
*/
if (!(dev->driver_info->flags & FLAG_SEND_ZLP) &&
- skb_out->len > CDC_NCM_MIN_TX_PKT)
+ skb_out->len > ctx->min_tx_pkt)
memset(skb_put(skb_out, ctx->tx_max - skb_out->len), 0,
ctx->tx_max - skb_out->len);
else if (skb_out->len < ctx->tx_max && (skb_out->len % dev->maxpacket) == 0)
@@ -795,11 +1173,22 @@ cdc_ncm_fill_tx_frame(struct usbnet *dev, struct sk_buff *skb, __le32 sign)
/* return skb */
ctx->tx_curr_skb = NULL;
dev->net->stats.tx_packets += ctx->tx_curr_frame_num;
+
+ /* keep private stats: framing overhead and number of NTBs */
+ ctx->tx_overhead += skb_out->len - ctx->tx_curr_frame_payload;
+ ctx->tx_ntbs++;
+
+ /* usbnet has already counted all the framing overhead.
+ * Adjust the stats so that the tx_bytes counter show real
+ * payload data instead.
+ */
+ dev->net->stats.tx_bytes -= skb_out->len - ctx->tx_curr_frame_payload;
+
return skb_out;
exit_no_skb:
- /* Start timer, if there is a remaining skb */
- if (ctx->tx_curr_skb != NULL)
+ /* Start timer, if there is a remaining non-empty skb */
+ if (ctx->tx_curr_skb != NULL && n > 0)
cdc_ncm_tx_timeout_start(ctx);
return NULL;
}
@@ -810,7 +1199,7 @@ static void cdc_ncm_tx_timeout_start(struct cdc_ncm_ctx *ctx)
/* start timer, if not already started */
if (!(hrtimer_active(&ctx->tx_timer) || atomic_read(&ctx->stop)))
hrtimer_start(&ctx->tx_timer,
- ktime_set(0, CDC_NCM_TIMER_INTERVAL),
+ ktime_set(0, ctx->timer_interval),
HRTIMER_MODE_REL);
}
@@ -835,6 +1224,7 @@ static void cdc_ncm_txpath_bh(unsigned long param)
cdc_ncm_tx_timeout_start(ctx);
spin_unlock_bh(&ctx->mtx);
} else if (dev->net != NULL) {
+ ctx->tx_reason_timeout++; /* count reason for transmitting */
spin_unlock_bh(&ctx->mtx);
netif_tx_lock_bh(dev->net);
usbnet_start_xmit(NULL, dev->net);
@@ -970,6 +1360,7 @@ int cdc_ncm_rx_fixup(struct usbnet *dev, struct sk_buff *skb_in)
struct usb_cdc_ncm_dpe16 *dpe16;
int ndpoffset;
int loopcount = 50; /* arbitrary max preventing infinite loop */
+ u32 payload = 0;
ndpoffset = cdc_ncm_rx_verify_nth16(ctx, skb_in);
if (ndpoffset < 0)
@@ -1015,13 +1406,13 @@ next_ndp:
break;
} else {
- skb = skb_clone(skb_in, GFP_ATOMIC);
+ /* create a fresh copy to reduce truesize */
+ skb = netdev_alloc_skb_ip_align(dev->net, len);
if (!skb)
goto error;
- skb->len = len;
- skb->data = ((u8 *)skb_in->data) + offset;
- skb_set_tail_pointer(skb, len);
+ memcpy(skb_put(skb, len), skb_in->data + offset, len);
usbnet_skb_return(dev, skb);
+ payload += len; /* count payload bytes in this NTB */
}
}
err_ndp:
@@ -1030,6 +1421,10 @@ err_ndp:
if (ndpoffset && loopcount--)
goto next_ndp;
+ /* update stats */
+ ctx->rx_overhead += skb_in->len - payload;
+ ctx->rx_ntbs++;
+
return 1;
error:
return 0;
@@ -1049,14 +1444,14 @@ cdc_ncm_speed_change(struct usbnet *dev,
*/
if ((tx_speed > 1000000) && (rx_speed > 1000000)) {
netif_info(dev, link, dev->net,
- "%u mbit/s downlink %u mbit/s uplink\n",
- (unsigned int)(rx_speed / 1000000U),
- (unsigned int)(tx_speed / 1000000U));
+ "%u mbit/s downlink %u mbit/s uplink\n",
+ (unsigned int)(rx_speed / 1000000U),
+ (unsigned int)(tx_speed / 1000000U));
} else {
netif_info(dev, link, dev->net,
- "%u kbit/s downlink %u kbit/s uplink\n",
- (unsigned int)(rx_speed / 1000U),
- (unsigned int)(tx_speed / 1000U));
+ "%u kbit/s downlink %u kbit/s uplink\n",
+ (unsigned int)(rx_speed / 1000U),
+ (unsigned int)(tx_speed / 1000U));
}
}
@@ -1086,11 +1481,10 @@ static void cdc_ncm_status(struct usbnet *dev, struct urb *urb)
* USB_CDC_NOTIFY_NETWORK_CONNECTION notification shall be
* sent by device after USB_CDC_NOTIFY_SPEED_CHANGE.
*/
- ctx->connected = le16_to_cpu(event->wValue);
netif_info(dev, link, dev->net,
"network connection: %sconnected\n",
- ctx->connected ? "" : "dis");
- usbnet_link_change(dev, ctx->connected, 0);
+ !!event->wValue ? "" : "dis");
+ usbnet_link_change(dev, !!event->wValue, 0);
break;
case USB_CDC_NOTIFY_SPEED_CHANGE:
@@ -1110,23 +1504,11 @@ static void cdc_ncm_status(struct usbnet *dev, struct urb *urb)
}
}
-static int cdc_ncm_check_connect(struct usbnet *dev)
-{
- struct cdc_ncm_ctx *ctx;
-
- ctx = (struct cdc_ncm_ctx *)dev->data[0];
- if (ctx == NULL)
- return 1; /* disconnected */
-
- return !ctx->connected;
-}
-
static const struct driver_info cdc_ncm_info = {
.description = "CDC NCM",
.flags = FLAG_POINTTOPOINT | FLAG_NO_SETINT | FLAG_MULTI_PACKET,
.bind = cdc_ncm_bind,
.unbind = cdc_ncm_unbind,
- .check_connect = cdc_ncm_check_connect,
.manage_power = usbnet_manage_power,
.status = cdc_ncm_status,
.rx_fixup = cdc_ncm_rx_fixup,
@@ -1140,7 +1522,6 @@ static const struct driver_info wwan_info = {
| FLAG_WWAN,
.bind = cdc_ncm_bind,
.unbind = cdc_ncm_unbind,
- .check_connect = cdc_ncm_check_connect,
.manage_power = usbnet_manage_power,
.status = cdc_ncm_status,
.rx_fixup = cdc_ncm_rx_fixup,
@@ -1154,7 +1535,6 @@ static const struct driver_info wwan_noarp_info = {
| FLAG_WWAN | FLAG_NOARP,
.bind = cdc_ncm_bind,
.unbind = cdc_ncm_unbind,
- .check_connect = cdc_ncm_check_connect,
.manage_power = usbnet_manage_power,
.status = cdc_ncm_status,
.rx_fixup = cdc_ncm_rx_fixup,
diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c
index 660bd5ea9fc0..a3a05869309d 100644
--- a/drivers/net/usb/hso.c
+++ b/drivers/net/usb/hso.c
@@ -2425,7 +2425,7 @@ static void hso_net_init(struct net_device *net)
net->type = ARPHRD_NONE;
net->mtu = DEFAULT_MTU - 14;
net->tx_queue_len = 10;
- SET_ETHTOOL_OPS(net, &ops);
+ net->ethtool_ops = &ops;
/* and initialize the semaphore */
spin_lock_init(&hso_net->net_lock);
diff --git a/drivers/net/usb/huawei_cdc_ncm.c b/drivers/net/usb/huawei_cdc_ncm.c
index 312178d7b698..f9822bc75425 100644
--- a/drivers/net/usb/huawei_cdc_ncm.c
+++ b/drivers/net/usb/huawei_cdc_ncm.c
@@ -172,24 +172,11 @@ err:
return ret;
}
-static int huawei_cdc_ncm_check_connect(struct usbnet *usbnet_dev)
-{
- struct cdc_ncm_ctx *ctx;
-
- ctx = (struct cdc_ncm_ctx *)usbnet_dev->data[0];
-
- if (ctx == NULL)
- return 1; /* disconnected */
-
- return !ctx->connected;
-}
-
static const struct driver_info huawei_cdc_ncm_info = {
.description = "Huawei CDC NCM device",
.flags = FLAG_NO_SETINT | FLAG_MULTI_PACKET | FLAG_WWAN,
.bind = huawei_cdc_ncm_bind,
.unbind = huawei_cdc_ncm_unbind,
- .check_connect = huawei_cdc_ncm_check_connect,
.manage_power = huawei_cdc_ncm_manage_power,
.rx_fixup = cdc_ncm_rx_fixup,
.tx_fixup = cdc_ncm_tx_fixup,
diff --git a/drivers/net/usb/ipheth.c b/drivers/net/usb/ipheth.c
index 973275fef250..76465b117b72 100644
--- a/drivers/net/usb/ipheth.c
+++ b/drivers/net/usb/ipheth.c
@@ -534,7 +534,7 @@ static int ipheth_probe(struct usb_interface *intf,
usb_set_intfdata(intf, dev);
SET_NETDEV_DEV(netdev, &intf->dev);
- SET_ETHTOOL_OPS(netdev, &ops);
+ netdev->ethtool_ops = &ops;
retval = register_netdev(netdev);
if (retval) {
diff --git a/drivers/net/usb/kaweth.c b/drivers/net/usb/kaweth.c
index a359d3bb7c5b..dcb6d33141e0 100644
--- a/drivers/net/usb/kaweth.c
+++ b/drivers/net/usb/kaweth.c
@@ -1171,7 +1171,7 @@ err_fw:
netdev->netdev_ops = &kaweth_netdev_ops;
netdev->watchdog_timeo = KAWETH_TX_TIMEOUT;
netdev->mtu = le16_to_cpu(kaweth->configuration.segment_size);
- SET_ETHTOOL_OPS(netdev, &ops);
+ netdev->ethtool_ops = &ops;
/* kaweth is zeroed as part of alloc_netdev */
INIT_DELAYED_WORK(&kaweth->lowmem_work, kaweth_resubmit_tl);
diff --git a/drivers/net/usb/pegasus.c b/drivers/net/usb/pegasus.c
index 03e8a15d7deb..f84080215915 100644
--- a/drivers/net/usb/pegasus.c
+++ b/drivers/net/usb/pegasus.c
@@ -1159,7 +1159,7 @@ static int pegasus_probe(struct usb_interface *intf,
net->watchdog_timeo = PEGASUS_TX_TIMEOUT;
net->netdev_ops = &pegasus_netdev_ops;
- SET_ETHTOOL_OPS(net, &ops);
+ net->ethtool_ops = &ops;
pegasus->mii.dev = net;
pegasus->mii.mdio_read = mdio_read;
pegasus->mii.mdio_write = mdio_write;
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index dc4bf06948c7..cf62d7e8329f 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -763,7 +763,12 @@ static const struct usb_device_id products[] = {
{QMI_FIXED_INTF(0x2357, 0x9000, 4)}, /* TP-LINK MA260 */
{QMI_FIXED_INTF(0x1bc7, 0x1200, 5)}, /* Telit LE920 */
{QMI_FIXED_INTF(0x1bc7, 0x1201, 2)}, /* Telit LE920 */
- {QMI_FIXED_INTF(0x0b3c, 0xc005, 6)}, /* Olivetti Olicard 200 */
+ {QMI_FIXED_INTF(0x0b3c, 0xc000, 4)}, /* Olivetti Olicard 100 */
+ {QMI_FIXED_INTF(0x0b3c, 0xc001, 4)}, /* Olivetti Olicard 120 */
+ {QMI_FIXED_INTF(0x0b3c, 0xc002, 4)}, /* Olivetti Olicard 140 */
+ {QMI_FIXED_INTF(0x0b3c, 0xc004, 6)}, /* Olivetti Olicard 155 */
+ {QMI_FIXED_INTF(0x0b3c, 0xc005, 6)}, /* Olivetti Olicard 200 */
+ {QMI_FIXED_INTF(0x0b3c, 0xc00a, 6)}, /* Olivetti Olicard 160 */
{QMI_FIXED_INTF(0x0b3c, 0xc00b, 4)}, /* Olivetti Olicard 500 */
{QMI_FIXED_INTF(0x1e2d, 0x0060, 4)}, /* Cinterion PLxx */
{QMI_FIXED_INTF(0x1e2d, 0x0053, 4)}, /* Cinterion PHxx,PXxx */
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
index 3fbfb0869030..25431965a625 100644
--- a/drivers/net/usb/r8152.c
+++ b/drivers/net/usb/r8152.c
@@ -630,12 +630,10 @@ int set_registers(struct r8152 *tp, u16 value, u16 index, u16 size, void *data)
int ret;
void *tmp;
- tmp = kmalloc(size, GFP_KERNEL);
+ tmp = kmemdup(data, size, GFP_KERNEL);
if (!tmp)
return -ENOMEM;
- memcpy(tmp, data, size);
-
ret = usb_control_msg(tp->udev, usb_sndctrlpipe(tp->udev, 0),
RTL8152_REQ_SET_REGS, RTL8152_REQT_WRITE,
value, index, tmp, size, 500);
@@ -3452,7 +3450,7 @@ static int rtl8152_probe(struct usb_interface *intf,
NETIF_F_TSO | NETIF_F_FRAGLIST |
NETIF_F_IPV6_CSUM | NETIF_F_TSO6;
- SET_ETHTOOL_OPS(netdev, &ops);
+ netdev->ethtool_ops = &ops;
netif_set_gso_max_size(netdev, RTL_LIMITED_TSO_SIZE);
tp->mii.dev = netdev;
diff --git a/drivers/net/usb/rtl8150.c b/drivers/net/usb/rtl8150.c
index da2c4583bd2d..6e87e5710048 100644
--- a/drivers/net/usb/rtl8150.c
+++ b/drivers/net/usb/rtl8150.c
@@ -878,7 +878,7 @@ static int rtl8150_probe(struct usb_interface *intf,
dev->netdev = netdev;
netdev->netdev_ops = &rtl8150_netdev_ops;
netdev->watchdog_timeo = RTL8150_TX_TIMEOUT;
- SET_ETHTOOL_OPS(netdev, &ops);
+ netdev->ethtool_ops = &ops;
dev->intr_interval = 100; /* 100ms */
if (!alloc_all_urbs(dev)) {
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 8a852b5f215f..7d9f84a91f37 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -1646,7 +1646,7 @@ static int virtnet_probe(struct virtio_device *vdev)
dev->netdev_ops = &virtnet_netdev;
dev->features = NETIF_F_HIGHDMA;
- SET_ETHTOOL_OPS(dev, &virtnet_ethtool_ops);
+ dev->ethtool_ops = &virtnet_ethtool_ops;
SET_NETDEV_DEV(dev, &vdev->dev);
/* Do we support "hardware" checksums? */
@@ -1724,6 +1724,13 @@ static int virtnet_probe(struct virtio_device *vdev)
if (virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ))
vi->has_cvq = true;
+ if (vi->any_header_sg) {
+ if (vi->mergeable_rx_bufs)
+ dev->needed_headroom = sizeof(struct virtio_net_hdr_mrg_rxbuf);
+ else
+ dev->needed_headroom = sizeof(struct virtio_net_hdr);
+ }
+
/* Use single tx/rx queue pair as default */
vi->curr_queue_pairs = 1;
vi->max_queue_pairs = max_queue_pairs;
diff --git a/drivers/net/vmxnet3/vmxnet3_ethtool.c b/drivers/net/vmxnet3/vmxnet3_ethtool.c
index 600ab56c0008..40c1c7b0d9e0 100644
--- a/drivers/net/vmxnet3/vmxnet3_ethtool.c
+++ b/drivers/net/vmxnet3/vmxnet3_ethtool.c
@@ -431,8 +431,8 @@ vmxnet3_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
ethtool_cmd_speed_set(ecmd, adapter->link_speed);
ecmd->duplex = DUPLEX_FULL;
} else {
- ethtool_cmd_speed_set(ecmd, -1);
- ecmd->duplex = -1;
+ ethtool_cmd_speed_set(ecmd, SPEED_UNKNOWN);
+ ecmd->duplex = DUPLEX_UNKNOWN;
}
return 0;
}
@@ -579,7 +579,7 @@ vmxnet3_get_rss_indir_size(struct net_device *netdev)
}
static int
-vmxnet3_get_rss_indir(struct net_device *netdev, u32 *p)
+vmxnet3_get_rss(struct net_device *netdev, u32 *p, u8 *key)
{
struct vmxnet3_adapter *adapter = netdev_priv(netdev);
struct UPT1_RSSConf *rssConf = adapter->rss_conf;
@@ -592,7 +592,7 @@ vmxnet3_get_rss_indir(struct net_device *netdev, u32 *p)
}
static int
-vmxnet3_set_rss_indir(struct net_device *netdev, const u32 *p)
+vmxnet3_set_rss(struct net_device *netdev, const u32 *p, const u8 *key)
{
unsigned int i;
unsigned long flags;
@@ -628,12 +628,12 @@ static const struct ethtool_ops vmxnet3_ethtool_ops = {
.get_rxnfc = vmxnet3_get_rxnfc,
#ifdef VMXNET3_RSS
.get_rxfh_indir_size = vmxnet3_get_rss_indir_size,
- .get_rxfh_indir = vmxnet3_get_rss_indir,
- .set_rxfh_indir = vmxnet3_set_rss_indir,
+ .get_rxfh = vmxnet3_get_rss,
+ .set_rxfh = vmxnet3_set_rss,
#endif
};
void vmxnet3_set_ethtool_ops(struct net_device *netdev)
{
- SET_ETHTOOL_OPS(netdev, &vmxnet3_ethtool_ops);
+ netdev->ethtool_ops = &vmxnet3_ethtool_ops;
}
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index 4dbb2ed85b97..1610d51dbb5c 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -127,6 +127,7 @@ struct vxlan_dev {
struct list_head next; /* vxlan's per namespace list */
struct vxlan_sock *vn_sock; /* listening socket */
struct net_device *dev;
+ struct net *net; /* netns for packet i/o */
struct vxlan_rdst default_dst; /* default destination */
union vxlan_addr saddr; /* source address */
__be16 dst_port;
@@ -134,7 +135,7 @@ struct vxlan_dev {
__u16 port_max;
__u8 tos; /* TOS override */
__u8 ttl;
- u32 flags; /* VXLAN_F_* below */
+ u32 flags; /* VXLAN_F_* in vxlan.h */
struct work_struct sock_work;
struct work_struct igmp_join;
@@ -149,13 +150,6 @@ struct vxlan_dev {
struct hlist_head fdb_head[FDB_HASH_SIZE];
};
-#define VXLAN_F_LEARN 0x01
-#define VXLAN_F_PROXY 0x02
-#define VXLAN_F_RSC 0x04
-#define VXLAN_F_L2MISS 0x08
-#define VXLAN_F_L3MISS 0x10
-#define VXLAN_F_IPV6 0x20 /* internal flag */
-
/* salt for hash table */
static u32 vxlan_salt __read_mostly;
static struct workqueue_struct *vxlan_wq;
@@ -571,6 +565,7 @@ static struct sk_buff **vxlan_gro_receive(struct sk_buff **head, struct sk_buff
goto out;
}
skb_gro_pull(skb, sizeof(struct vxlanhdr)); /* pull vxlan header */
+ skb_gro_postpull_rcsum(skb, vh, sizeof(struct vxlanhdr));
off_eth = skb_gro_offset(skb);
hlen = off_eth + sizeof(*eh);
@@ -605,6 +600,7 @@ static struct sk_buff **vxlan_gro_receive(struct sk_buff **head, struct sk_buff
}
skb_gro_pull(skb, sizeof(*eh)); /* pull inner eth header */
+ skb_gro_postpull_rcsum(skb, eh, sizeof(*eh));
pp = ptype->callbacks.gro_receive(head, skb);
out_unlock:
@@ -1203,6 +1199,7 @@ static void vxlan_rcv(struct vxlan_sock *vs,
remote_ip = &vxlan->default_dst.remote_ip;
skb_reset_mac_header(skb);
+ skb_scrub_packet(skb, !net_eq(vxlan->net, dev_net(vxlan->dev)));
skb->protocol = eth_type_trans(skb, vxlan->dev);
/* Ignore packet loops (and multicast echo) */
@@ -1599,18 +1596,11 @@ __be16 vxlan_src_port(__u16 port_min, __u16 port_max, struct sk_buff *skb)
}
EXPORT_SYMBOL_GPL(vxlan_src_port);
-static int handle_offloads(struct sk_buff *skb)
+static inline struct sk_buff *vxlan_handle_offloads(struct sk_buff *skb,
+ bool udp_csum)
{
- if (skb_is_gso(skb)) {
- int err = skb_unclone(skb, GFP_ATOMIC);
- if (unlikely(err))
- return err;
-
- skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL;
- } else if (skb->ip_summed != CHECKSUM_PARTIAL)
- skb->ip_summed = CHECKSUM_NONE;
-
- return 0;
+ int type = udp_csum ? SKB_GSO_UDP_TUNNEL_CSUM : SKB_GSO_UDP_TUNNEL;
+ return iptunnel_handle_offloads(skb, udp_csum, type);
}
#if IS_ENABLED(CONFIG_IPV6)
@@ -1618,7 +1608,8 @@ static int vxlan6_xmit_skb(struct vxlan_sock *vs,
struct dst_entry *dst, struct sk_buff *skb,
struct net_device *dev, struct in6_addr *saddr,
struct in6_addr *daddr, __u8 prio, __u8 ttl,
- __be16 src_port, __be16 dst_port, __be32 vni)
+ __be16 src_port, __be16 dst_port, __be32 vni,
+ bool xnet)
{
struct ipv6hdr *ip6h;
struct vxlanhdr *vxh;
@@ -1626,12 +1617,11 @@ static int vxlan6_xmit_skb(struct vxlan_sock *vs,
int min_headroom;
int err;
- if (!skb->encapsulation) {
- skb_reset_inner_headers(skb);
- skb->encapsulation = 1;
- }
+ skb = vxlan_handle_offloads(skb, !udp_get_no_check6_tx(vs->sock->sk));
+ if (IS_ERR(skb))
+ return -EINVAL;
- skb_scrub_packet(skb, false);
+ skb_scrub_packet(skb, xnet);
min_headroom = LL_RESERVED_SPACE(dst->dev) + dst->header_len
+ VXLAN_HLEN + sizeof(struct ipv6hdr)
@@ -1663,27 +1653,14 @@ static int vxlan6_xmit_skb(struct vxlan_sock *vs,
uh->source = src_port;
uh->len = htons(skb->len);
- uh->check = 0;
memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
IPCB(skb)->flags &= ~(IPSKB_XFRM_TUNNEL_SIZE | IPSKB_XFRM_TRANSFORMED |
IPSKB_REROUTED);
skb_dst_set(skb, dst);
- if (!skb_is_gso(skb) && !(dst->dev->features & NETIF_F_IPV6_CSUM)) {
- __wsum csum = skb_checksum(skb, 0, skb->len, 0);
- skb->ip_summed = CHECKSUM_UNNECESSARY;
- uh->check = csum_ipv6_magic(saddr, daddr, skb->len,
- IPPROTO_UDP, csum);
- if (uh->check == 0)
- uh->check = CSUM_MANGLED_0;
- } else {
- skb->ip_summed = CHECKSUM_PARTIAL;
- skb->csum_start = skb_transport_header(skb) - skb->head;
- skb->csum_offset = offsetof(struct udphdr, check);
- uh->check = ~csum_ipv6_magic(saddr, daddr,
- skb->len, IPPROTO_UDP, 0);
- }
+ udp6_set_csum(udp_get_no_check6_tx(vs->sock->sk), skb,
+ saddr, daddr, skb->len);
__skb_push(skb, sizeof(*ip6h));
skb_reset_network_header(skb);
@@ -1699,10 +1676,6 @@ static int vxlan6_xmit_skb(struct vxlan_sock *vs,
ip6h->daddr = *daddr;
ip6h->saddr = *saddr;
- err = handle_offloads(skb);
- if (err)
- return err;
-
ip6tunnel_xmit(skb, dev);
return 0;
}
@@ -1711,17 +1684,16 @@ static int vxlan6_xmit_skb(struct vxlan_sock *vs,
int vxlan_xmit_skb(struct vxlan_sock *vs,
struct rtable *rt, struct sk_buff *skb,
__be32 src, __be32 dst, __u8 tos, __u8 ttl, __be16 df,
- __be16 src_port, __be16 dst_port, __be32 vni)
+ __be16 src_port, __be16 dst_port, __be32 vni, bool xnet)
{
struct vxlanhdr *vxh;
struct udphdr *uh;
int min_headroom;
int err;
- if (!skb->encapsulation) {
- skb_reset_inner_headers(skb);
- skb->encapsulation = 1;
- }
+ skb = vxlan_handle_offloads(skb, !vs->sock->sk->sk_no_check_tx);
+ if (IS_ERR(skb))
+ return -EINVAL;
min_headroom = LL_RESERVED_SPACE(rt->dst.dev) + rt->dst.header_len
+ VXLAN_HLEN + sizeof(struct iphdr)
@@ -1753,14 +1725,12 @@ int vxlan_xmit_skb(struct vxlan_sock *vs,
uh->source = src_port;
uh->len = htons(skb->len);
- uh->check = 0;
- err = handle_offloads(skb);
- if (err)
- return err;
+ udp_set_csum(vs->sock->sk->sk_no_check_tx, skb,
+ src, dst, skb->len);
return iptunnel_xmit(vs->sock->sk, rt, skb, src, dst, IPPROTO_UDP,
- tos, ttl, df, false);
+ tos, ttl, df, xnet);
}
EXPORT_SYMBOL_GPL(vxlan_xmit_skb);
@@ -1853,7 +1823,7 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
fl4.daddr = dst->sin.sin_addr.s_addr;
fl4.saddr = vxlan->saddr.sin.sin_addr.s_addr;
- rt = ip_route_output_key(dev_net(dev), &fl4);
+ rt = ip_route_output_key(vxlan->net, &fl4);
if (IS_ERR(rt)) {
netdev_dbg(dev, "no route to %pI4\n",
&dst->sin.sin_addr.s_addr);
@@ -1874,7 +1844,7 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
struct vxlan_dev *dst_vxlan;
ip_rt_put(rt);
- dst_vxlan = vxlan_find_vni(dev_net(dev), vni, dst_port);
+ dst_vxlan = vxlan_find_vni(vxlan->net, vni, dst_port);
if (!dst_vxlan)
goto tx_error;
vxlan_encap_bypass(skb, vxlan, dst_vxlan);
@@ -1887,7 +1857,8 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
err = vxlan_xmit_skb(vxlan->vn_sock, rt, skb,
fl4.saddr, dst->sin.sin_addr.s_addr,
tos, ttl, df, src_port, dst_port,
- htonl(vni << 8));
+ htonl(vni << 8),
+ !net_eq(vxlan->net, dev_net(vxlan->dev)));
if (err < 0)
goto rt_tx_error;
@@ -1927,7 +1898,7 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
struct vxlan_dev *dst_vxlan;
dst_release(ndst);
- dst_vxlan = vxlan_find_vni(dev_net(dev), vni, dst_port);
+ dst_vxlan = vxlan_find_vni(vxlan->net, vni, dst_port);
if (!dst_vxlan)
goto tx_error;
vxlan_encap_bypass(skb, vxlan, dst_vxlan);
@@ -1938,7 +1909,8 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
err = vxlan6_xmit_skb(vxlan->vn_sock, ndst, skb,
dev, &fl6.saddr, &fl6.daddr, 0, ttl,
- src_port, dst_port, htonl(vni << 8));
+ src_port, dst_port, htonl(vni << 8),
+ !net_eq(vxlan->net, dev_net(vxlan->dev)));
#endif
}
@@ -2082,7 +2054,7 @@ static void vxlan_vs_add_dev(struct vxlan_sock *vs, struct vxlan_dev *vxlan)
static int vxlan_init(struct net_device *dev)
{
struct vxlan_dev *vxlan = netdev_priv(dev);
- struct vxlan_net *vn = net_generic(dev_net(dev), vxlan_net_id);
+ struct vxlan_net *vn = net_generic(vxlan->net, vxlan_net_id);
struct vxlan_sock *vs;
dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
@@ -2090,7 +2062,7 @@ static int vxlan_init(struct net_device *dev)
return -ENOMEM;
spin_lock(&vn->sock_lock);
- vs = vxlan_find_sock(dev_net(dev), vxlan->dst_port);
+ vs = vxlan_find_sock(vxlan->net, vxlan->dst_port);
if (vs) {
/* If we have a socket with same port already, reuse it */
atomic_inc(&vs->refcnt);
@@ -2172,8 +2144,8 @@ static void vxlan_flush(struct vxlan_dev *vxlan)
/* Cleanup timer and forwarding table on shutdown */
static int vxlan_stop(struct net_device *dev)
{
- struct vxlan_net *vn = net_generic(dev_net(dev), vxlan_net_id);
struct vxlan_dev *vxlan = netdev_priv(dev);
+ struct vxlan_net *vn = net_generic(vxlan->net, vxlan_net_id);
struct vxlan_sock *vs = vxlan->vn_sock;
if (vs && vxlan_addr_multicast(&vxlan->default_dst.remote_ip) &&
@@ -2202,7 +2174,7 @@ static int vxlan_change_mtu(struct net_device *dev, int new_mtu)
struct net_device *lowerdev;
int max_mtu;
- lowerdev = __dev_get_by_index(dev_net(dev), dst->remote_ifindex);
+ lowerdev = __dev_get_by_index(vxlan->net, dst->remote_ifindex);
if (lowerdev == NULL)
return eth_change_mtu(dev, new_mtu);
@@ -2285,7 +2257,6 @@ static void vxlan_setup(struct net_device *dev)
dev->tx_queue_len = 0;
dev->features |= NETIF_F_LLTX;
- dev->features |= NETIF_F_NETNS_LOCAL;
dev->features |= NETIF_F_SG | NETIF_F_HW_CSUM;
dev->features |= NETIF_F_RXCSUM;
dev->features |= NETIF_F_GSO_SOFTWARE;
@@ -2401,7 +2372,7 @@ static void vxlan_del_work(struct work_struct *work)
* could be used for both IPv4 and IPv6 communications, but
* users may set bindv6only=1.
*/
-static struct socket *create_v6_sock(struct net *net, __be16 port)
+static struct socket *create_v6_sock(struct net *net, __be16 port, u32 flags)
{
struct sock *sk;
struct socket *sock;
@@ -2438,18 +2409,25 @@ static struct socket *create_v6_sock(struct net *net, __be16 port)
/* Disable multicast loopback */
inet_sk(sk)->mc_loop = 0;
+
+ if (flags & VXLAN_F_UDP_ZERO_CSUM6_TX)
+ udp_set_no_check6_tx(sk, true);
+
+ if (flags & VXLAN_F_UDP_ZERO_CSUM6_RX)
+ udp_set_no_check6_rx(sk, true);
+
return sock;
}
#else
-static struct socket *create_v6_sock(struct net *net, __be16 port)
+static struct socket *create_v6_sock(struct net *net, __be16 port, u32 flags)
{
return ERR_PTR(-EPFNOSUPPORT);
}
#endif
-static struct socket *create_v4_sock(struct net *net, __be16 port)
+static struct socket *create_v4_sock(struct net *net, __be16 port, u32 flags)
{
struct sock *sk;
struct socket *sock;
@@ -2482,18 +2460,24 @@ static struct socket *create_v4_sock(struct net *net, __be16 port)
/* Disable multicast loopback */
inet_sk(sk)->mc_loop = 0;
+
+ if (!(flags & VXLAN_F_UDP_CSUM))
+ sock->sk->sk_no_check_tx = 1;
+
return sock;
}
/* Create new listen socket if needed */
static struct vxlan_sock *vxlan_socket_create(struct net *net, __be16 port,
- vxlan_rcv_t *rcv, void *data, bool ipv6)
+ vxlan_rcv_t *rcv, void *data,
+ u32 flags)
{
struct vxlan_net *vn = net_generic(net, vxlan_net_id);
struct vxlan_sock *vs;
struct socket *sock;
struct sock *sk;
unsigned int h;
+ bool ipv6 = !!(flags & VXLAN_F_IPV6);
vs = kzalloc(sizeof(*vs), GFP_KERNEL);
if (!vs)
@@ -2505,9 +2489,9 @@ static struct vxlan_sock *vxlan_socket_create(struct net *net, __be16 port,
INIT_WORK(&vs->del_work, vxlan_del_work);
if (ipv6)
- sock = create_v6_sock(net, port);
+ sock = create_v6_sock(net, port, flags);
else
- sock = create_v4_sock(net, port);
+ sock = create_v4_sock(net, port, flags);
if (IS_ERR(sock)) {
kfree(vs);
return ERR_CAST(sock);
@@ -2545,12 +2529,12 @@ static struct vxlan_sock *vxlan_socket_create(struct net *net, __be16 port,
struct vxlan_sock *vxlan_sock_add(struct net *net, __be16 port,
vxlan_rcv_t *rcv, void *data,
- bool no_share, bool ipv6)
+ bool no_share, u32 flags)
{
struct vxlan_net *vn = net_generic(net, vxlan_net_id);
struct vxlan_sock *vs;
- vs = vxlan_socket_create(net, port, rcv, data, ipv6);
+ vs = vxlan_socket_create(net, port, rcv, data, flags);
if (!IS_ERR(vs))
return vs;
@@ -2578,12 +2562,12 @@ EXPORT_SYMBOL_GPL(vxlan_sock_add);
static void vxlan_sock_work(struct work_struct *work)
{
struct vxlan_dev *vxlan = container_of(work, struct vxlan_dev, sock_work);
- struct net *net = dev_net(vxlan->dev);
+ struct net *net = vxlan->net;
struct vxlan_net *vn = net_generic(net, vxlan_net_id);
__be16 port = vxlan->dst_port;
struct vxlan_sock *nvs;
- nvs = vxlan_sock_add(net, port, vxlan_rcv, NULL, false, vxlan->flags & VXLAN_F_IPV6);
+ nvs = vxlan_sock_add(net, port, vxlan_rcv, NULL, false, vxlan->flags);
spin_lock(&vn->sock_lock);
if (!IS_ERR(nvs))
vxlan_vs_add_dev(nvs, vxlan);
@@ -2605,6 +2589,8 @@ static int vxlan_newlink(struct net *net, struct net_device *dev,
if (!data[IFLA_VXLAN_ID])
return -EINVAL;
+ vxlan->net = dev_net(dev);
+
vni = nla_get_u32(data[IFLA_VXLAN_ID]);
dst->remote_vni = vni;
@@ -2705,12 +2691,23 @@ static int vxlan_newlink(struct net *net, struct net_device *dev,
if (data[IFLA_VXLAN_PORT])
vxlan->dst_port = nla_get_be16(data[IFLA_VXLAN_PORT]);
+ if (data[IFLA_VXLAN_UDP_CSUM] && nla_get_u8(data[IFLA_VXLAN_UDP_CSUM]))
+ vxlan->flags |= VXLAN_F_UDP_CSUM;
+
+ if (data[IFLA_VXLAN_UDP_ZERO_CSUM6_TX] &&
+ nla_get_u8(data[IFLA_VXLAN_UDP_ZERO_CSUM6_TX]))
+ vxlan->flags |= VXLAN_F_UDP_ZERO_CSUM6_TX;
+
+ if (data[IFLA_VXLAN_UDP_ZERO_CSUM6_RX] &&
+ nla_get_u8(data[IFLA_VXLAN_UDP_ZERO_CSUM6_RX]))
+ vxlan->flags |= VXLAN_F_UDP_ZERO_CSUM6_RX;
+
if (vxlan_find_vni(net, vni, vxlan->dst_port)) {
pr_info("duplicate VNI %u\n", vni);
return -EEXIST;
}
- SET_ETHTOOL_OPS(dev, &vxlan_ethtool_ops);
+ dev->ethtool_ops = &vxlan_ethtool_ops;
/* create an fdb entry for a valid default destination */
if (!vxlan_addr_any(&vxlan->default_dst.remote_ip)) {
@@ -2739,8 +2736,8 @@ static int vxlan_newlink(struct net *net, struct net_device *dev,
static void vxlan_dellink(struct net_device *dev, struct list_head *head)
{
- struct vxlan_net *vn = net_generic(dev_net(dev), vxlan_net_id);
struct vxlan_dev *vxlan = netdev_priv(dev);
+ struct vxlan_net *vn = net_generic(vxlan->net, vxlan_net_id);
spin_lock(&vn->sock_lock);
if (!hlist_unhashed(&vxlan->hlist))
@@ -2768,7 +2765,10 @@ static size_t vxlan_get_size(const struct net_device *dev)
nla_total_size(sizeof(__u32)) + /* IFLA_VXLAN_AGEING */
nla_total_size(sizeof(__u32)) + /* IFLA_VXLAN_LIMIT */
nla_total_size(sizeof(struct ifla_vxlan_port_range)) +
- nla_total_size(sizeof(__be16))+ /* IFLA_VXLAN_PORT */
+ nla_total_size(sizeof(__be16)) + /* IFLA_VXLAN_PORT */
+ nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_UDP_CSUM */
+ nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_UDP_ZERO_CSUM6_TX */
+ nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_UDP_ZERO_CSUM6_RX */
0;
}
@@ -2828,7 +2828,13 @@ static int vxlan_fill_info(struct sk_buff *skb, const struct net_device *dev)
!!(vxlan->flags & VXLAN_F_L3MISS)) ||
nla_put_u32(skb, IFLA_VXLAN_AGEING, vxlan->age_interval) ||
nla_put_u32(skb, IFLA_VXLAN_LIMIT, vxlan->addrmax) ||
- nla_put_be16(skb, IFLA_VXLAN_PORT, vxlan->dst_port))
+ nla_put_be16(skb, IFLA_VXLAN_PORT, vxlan->dst_port) ||
+ nla_put_u8(skb, IFLA_VXLAN_UDP_CSUM,
+ !!(vxlan->flags & VXLAN_F_UDP_CSUM)) ||
+ nla_put_u8(skb, IFLA_VXLAN_UDP_ZERO_CSUM6_TX,
+ !!(vxlan->flags & VXLAN_F_UDP_ZERO_CSUM6_TX)) ||
+ nla_put_u8(skb, IFLA_VXLAN_UDP_ZERO_CSUM6_RX,
+ !!(vxlan->flags & VXLAN_F_UDP_ZERO_CSUM6_RX)))
goto nla_put_failure;
if (nla_put(skb, IFLA_VXLAN_PORT_RANGE, sizeof(ports), &ports))
@@ -2905,8 +2911,33 @@ static __net_init int vxlan_init_net(struct net *net)
return 0;
}
+static void __net_exit vxlan_exit_net(struct net *net)
+{
+ struct vxlan_net *vn = net_generic(net, vxlan_net_id);
+ struct vxlan_dev *vxlan, *next;
+ struct net_device *dev, *aux;
+ LIST_HEAD(list);
+
+ rtnl_lock();
+ for_each_netdev_safe(net, dev, aux)
+ if (dev->rtnl_link_ops == &vxlan_link_ops)
+ unregister_netdevice_queue(dev, &list);
+
+ list_for_each_entry_safe(vxlan, next, &vn->vxlan_list, next) {
+ /* If vxlan->dev is in the same netns, it has already been added
+ * to the list by the previous loop.
+ */
+ if (!net_eq(dev_net(vxlan->dev), net))
+ unregister_netdevice_queue(dev, &list);
+ }
+
+ unregister_netdevice_many(&list);
+ rtnl_unlock();
+}
+
static struct pernet_operations vxlan_net_ops = {
.init = vxlan_init_net,
+ .exit = vxlan_exit_net,
.id = &vxlan_net_id,
.size = sizeof(struct vxlan_net),
};
diff --git a/drivers/net/wan/farsync.c b/drivers/net/wan/farsync.c
index bcfff0d62de4..93ace042d0aa 100644
--- a/drivers/net/wan/farsync.c
+++ b/drivers/net/wan/farsync.c
@@ -26,6 +26,7 @@
#include <linux/ioport.h>
#include <linux/init.h>
#include <linux/interrupt.h>
+#include <linux/delay.h>
#include <linux/if.h>
#include <linux/hdlc.h>
#include <asm/io.h>
@@ -678,7 +679,6 @@ static inline void
fst_cpureset(struct fst_card_info *card)
{
unsigned char interrupt_line_register;
- unsigned long j = jiffies + 1;
unsigned int regval;
if (card->family == FST_FAMILY_TXU) {
@@ -696,16 +696,12 @@ fst_cpureset(struct fst_card_info *card)
/*
* We are delaying here to allow the 9054 to reset itself
*/
- j = jiffies + 1;
- while (jiffies < j)
- /* Do nothing */ ;
+ usleep_range(10, 20);
outw(0x240f, card->pci_conf + CNTRL_9054 + 2);
/*
* We are delaying here to allow the 9054 to reload its eeprom
*/
- j = jiffies + 1;
- while (jiffies < j)
- /* Do nothing */ ;
+ usleep_range(10, 20);
outw(0x040f, card->pci_conf + CNTRL_9054 + 2);
if (pci_write_config_byte
@@ -886,20 +882,18 @@ fst_rx_dma_complete(struct fst_card_info *card, struct fst_port_info *port,
* Receive a frame through the DMA
*/
static inline void
-fst_rx_dma(struct fst_card_info *card, dma_addr_t skb,
- dma_addr_t mem, int len)
+fst_rx_dma(struct fst_card_info *card, dma_addr_t dma, u32 mem, int len)
{
/*
* This routine will setup the DMA and start it
*/
- dbg(DBG_RX, "In fst_rx_dma %lx %lx %d\n",
- (unsigned long) skb, (unsigned long) mem, len);
+ dbg(DBG_RX, "In fst_rx_dma %x %x %d\n", (u32)dma, mem, len);
if (card->dmarx_in_progress) {
dbg(DBG_ASS, "In fst_rx_dma while dma in progress\n");
}
- outl(skb, card->pci_conf + DMAPADR0); /* Copy to here */
+ outl(dma, card->pci_conf + DMAPADR0); /* Copy to here */
outl(mem, card->pci_conf + DMALADR0); /* from here */
outl(len, card->pci_conf + DMASIZ0); /* for this length */
outl(0x00000000c, card->pci_conf + DMADPR0); /* In this direction */
@@ -915,20 +909,19 @@ fst_rx_dma(struct fst_card_info *card, dma_addr_t skb,
* Send a frame through the DMA
*/
static inline void
-fst_tx_dma(struct fst_card_info *card, unsigned char *skb,
- unsigned char *mem, int len)
+fst_tx_dma(struct fst_card_info *card, dma_addr_t dma, u32 mem, int len)
{
/*
* This routine will setup the DMA and start it.
*/
- dbg(DBG_TX, "In fst_tx_dma %p %p %d\n", skb, mem, len);
+ dbg(DBG_TX, "In fst_tx_dma %x %x %d\n", (u32)dma, mem, len);
if (card->dmatx_in_progress) {
dbg(DBG_ASS, "In fst_tx_dma while dma in progress\n");
}
- outl((unsigned long) skb, card->pci_conf + DMAPADR1); /* Copy from here */
- outl((unsigned long) mem, card->pci_conf + DMALADR1); /* to here */
+ outl(dma, card->pci_conf + DMAPADR1); /* Copy from here */
+ outl(mem, card->pci_conf + DMALADR1); /* to here */
outl(len, card->pci_conf + DMASIZ1); /* for this length */
outl(0x000000004, card->pci_conf + DMADPR1); /* In this direction */
@@ -1405,9 +1398,7 @@ do_bottom_half_tx(struct fst_card_info *card)
card->dma_len_tx = skb->len;
card->dma_txpos = port->txpos;
fst_tx_dma(card,
- (char *) card->
- tx_dma_handle_card,
- (char *)
+ card->tx_dma_handle_card,
BUF_OFFSET(txBuffer[pi]
[port->txpos][0]),
skb->len);
diff --git a/drivers/net/wan/sdla.c b/drivers/net/wan/sdla.c
index de3bbf43fc5a..cdd45fb8a1f6 100644
--- a/drivers/net/wan/sdla.c
+++ b/drivers/net/wan/sdla.c
@@ -1322,10 +1322,6 @@ NOTE: This is rather a useless action right now, as the
static int sdla_change_mtu(struct net_device *dev, int new_mtu)
{
- struct frad_local *flp;
-
- flp = netdev_priv(dev);
-
if (netif_running(dev))
return -EBUSY;
diff --git a/drivers/net/wimax/i2400m/control.c b/drivers/net/wimax/i2400m/control.c
index 4a01e5c7fe09..4c417903e9be 100644
--- a/drivers/net/wimax/i2400m/control.c
+++ b/drivers/net/wimax/i2400m/control.c
@@ -1061,7 +1061,7 @@ int i2400m_firmware_check(struct i2400m *i2400m)
goto error_bad_major;
}
result = 0;
- if (minor < I2400M_HDIv_MINOR_2 && minor > I2400M_HDIv_MINOR)
+ if (minor > I2400M_HDIv_MINOR_2 || minor < I2400M_HDIv_MINOR)
dev_warn(dev, "untested minor fw version %u.%u.%u\n",
major, minor, branch);
/* Yes, we ignore the branch -- we don't have to track it */
diff --git a/drivers/net/wimax/i2400m/driver.c b/drivers/net/wimax/i2400m/driver.c
index 9c34d2fccfac..9c78090e72f8 100644
--- a/drivers/net/wimax/i2400m/driver.c
+++ b/drivers/net/wimax/i2400m/driver.c
@@ -500,26 +500,23 @@ int i2400m_pm_notifier(struct notifier_block *notifier,
*/
int i2400m_pre_reset(struct i2400m *i2400m)
{
- int result;
struct device *dev = i2400m_dev(i2400m);
d_fnstart(3, dev, "(i2400m %p)\n", i2400m);
d_printf(1, dev, "pre-reset shut down\n");
- result = 0;
mutex_lock(&i2400m->init_mutex);
if (i2400m->updown) {
netif_tx_disable(i2400m->wimax_dev.net_dev);
__i2400m_dev_stop(i2400m);
- result = 0;
/* down't set updown to zero -- this way
* post_reset can restore properly */
}
mutex_unlock(&i2400m->init_mutex);
if (i2400m->bus_release)
i2400m->bus_release(i2400m);
- d_fnend(3, dev, "(i2400m %p) = %d\n", i2400m, result);
- return result;
+ d_fnend(3, dev, "(i2400m %p) = 0\n", i2400m);
+ return 0;
}
EXPORT_SYMBOL_GPL(i2400m_pre_reset);
diff --git a/drivers/net/wireless/at76c50x-usb.c b/drivers/net/wireless/at76c50x-usb.c
index 99b3bfa717d5..d48776e4f343 100644
--- a/drivers/net/wireless/at76c50x-usb.c
+++ b/drivers/net/wireless/at76c50x-usb.c
@@ -365,15 +365,15 @@ static inline unsigned long at76_get_timeout(struct dfu_status *s)
static int at76_usbdfu_download(struct usb_device *udev, u8 *buf, u32 size,
int manifest_sync_timeout)
{
- u8 *block;
- struct dfu_status dfu_stat_buf;
int ret = 0;
int need_dfu_state = 1;
int is_done = 0;
- u8 dfu_state = 0;
u32 dfu_timeout = 0;
int bsize = 0;
int blockno = 0;
+ struct dfu_status *dfu_stat_buf = NULL;
+ u8 *dfu_state = NULL;
+ u8 *block = NULL;
at76_dbg(DBG_DFU, "%s( %p, %u, %d)", __func__, buf, size,
manifest_sync_timeout);
@@ -383,13 +383,28 @@ static int at76_usbdfu_download(struct usb_device *udev, u8 *buf, u32 size,
return -EINVAL;
}
+ dfu_stat_buf = kmalloc(sizeof(struct dfu_status), GFP_KERNEL);
+ if (!dfu_stat_buf) {
+ ret = -ENOMEM;
+ goto exit;
+ }
+
block = kmalloc(FW_BLOCK_SIZE, GFP_KERNEL);
- if (!block)
- return -ENOMEM;
+ if (!block) {
+ ret = -ENOMEM;
+ goto exit;
+ }
+
+ dfu_state = kmalloc(sizeof(u8), GFP_KERNEL);
+ if (!dfu_state) {
+ ret = -ENOMEM;
+ goto exit;
+ }
+ *dfu_state = 0;
do {
if (need_dfu_state) {
- ret = at76_dfu_get_state(udev, &dfu_state);
+ ret = at76_dfu_get_state(udev, dfu_state);
if (ret < 0) {
dev_err(&udev->dev,
"cannot get DFU state: %d\n", ret);
@@ -398,13 +413,13 @@ static int at76_usbdfu_download(struct usb_device *udev, u8 *buf, u32 size,
need_dfu_state = 0;
}
- switch (dfu_state) {
+ switch (*dfu_state) {
case STATE_DFU_DOWNLOAD_SYNC:
at76_dbg(DBG_DFU, "STATE_DFU_DOWNLOAD_SYNC");
- ret = at76_dfu_get_status(udev, &dfu_stat_buf);
+ ret = at76_dfu_get_status(udev, dfu_stat_buf);
if (ret >= 0) {
- dfu_state = dfu_stat_buf.state;
- dfu_timeout = at76_get_timeout(&dfu_stat_buf);
+ *dfu_state = dfu_stat_buf->state;
+ dfu_timeout = at76_get_timeout(dfu_stat_buf);
need_dfu_state = 0;
} else
dev_err(&udev->dev,
@@ -447,12 +462,12 @@ static int at76_usbdfu_download(struct usb_device *udev, u8 *buf, u32 size,
case STATE_DFU_MANIFEST_SYNC:
at76_dbg(DBG_DFU, "STATE_DFU_MANIFEST_SYNC");
- ret = at76_dfu_get_status(udev, &dfu_stat_buf);
+ ret = at76_dfu_get_status(udev, dfu_stat_buf);
if (ret < 0)
break;
- dfu_state = dfu_stat_buf.state;
- dfu_timeout = at76_get_timeout(&dfu_stat_buf);
+ *dfu_state = dfu_stat_buf->state;
+ dfu_timeout = at76_get_timeout(dfu_stat_buf);
need_dfu_state = 0;
/* override the timeout from the status response,
@@ -484,14 +499,17 @@ static int at76_usbdfu_download(struct usb_device *udev, u8 *buf, u32 size,
break;
default:
- at76_dbg(DBG_DFU, "DFU UNKNOWN STATE (%d)", dfu_state);
+ at76_dbg(DBG_DFU, "DFU UNKNOWN STATE (%d)", *dfu_state);
ret = -EINVAL;
break;
}
} while (!is_done && (ret >= 0));
exit:
+ kfree(dfu_state);
kfree(block);
+ kfree(dfu_stat_buf);
+
if (ret >= 0)
ret = 0;
@@ -1277,6 +1295,7 @@ static int at76_load_external_fw(struct usb_device *udev, struct fwentry *fwe)
dev_err(&udev->dev,
"loading %dth firmware block failed: %d\n",
blockno, ret);
+ ret = -EIO;
goto exit;
}
buf += bsize;
@@ -1410,6 +1429,8 @@ static int at76_startup_device(struct at76_priv *priv)
/* remove BSSID from previous run */
memset(priv->bssid, 0, ETH_ALEN);
+ priv->scanning = false;
+
if (at76_set_radio(priv, 1) == 1)
at76_wait_completion(priv, CMD_RADIO_ON);
@@ -1483,6 +1504,52 @@ static void at76_work_submit_rx(struct work_struct *work)
mutex_unlock(&priv->mtx);
}
+/* This is a workaround to make scan working:
+ * currently mac80211 does not process frames with no frequency
+ * information.
+ * However during scan the HW performs a sweep by itself, and we
+ * are unable to know where the radio is actually tuned.
+ * This function tries to do its best to guess this information..
+ * During scan, If the current frame is a beacon or a probe response,
+ * the channel information is extracted from it.
+ * When not scanning, for other frames, or if it happens that for
+ * whatever reason we fail to parse beacons and probe responses, this
+ * function returns the priv->channel information, that should be correct
+ * at least when we are not scanning.
+ */
+static inline int at76_guess_freq(struct at76_priv *priv)
+{
+ size_t el_off;
+ const u8 *el;
+ int channel = priv->channel;
+ int len = priv->rx_skb->len;
+ struct ieee80211_hdr *hdr = (void *)priv->rx_skb->data;
+
+ if (!priv->scanning)
+ goto exit;
+
+ if (len < 24)
+ goto exit;
+
+ if (ieee80211_is_probe_resp(hdr->frame_control)) {
+ el_off = offsetof(struct ieee80211_mgmt, u.probe_resp.variable);
+ el = ((struct ieee80211_mgmt *)hdr)->u.probe_resp.variable;
+ } else if (ieee80211_is_beacon(hdr->frame_control)) {
+ el_off = offsetof(struct ieee80211_mgmt, u.beacon.variable);
+ el = ((struct ieee80211_mgmt *)hdr)->u.beacon.variable;
+ } else {
+ goto exit;
+ }
+ len -= el_off;
+
+ el = cfg80211_find_ie(WLAN_EID_DS_PARAMS, el, len);
+ if (el && el[1] > 0)
+ channel = el[2];
+
+exit:
+ return ieee80211_channel_to_frequency(channel, IEEE80211_BAND_2GHZ);
+}
+
static void at76_rx_tasklet(unsigned long param)
{
struct urb *urb = (struct urb *)param;
@@ -1523,6 +1590,8 @@ static void at76_rx_tasklet(unsigned long param)
rx_status.signal = buf->rssi;
rx_status.flag |= RX_FLAG_DECRYPTED;
rx_status.flag |= RX_FLAG_IV_STRIPPED;
+ rx_status.band = IEEE80211_BAND_2GHZ;
+ rx_status.freq = at76_guess_freq(priv);
at76_dbg(DBG_MAC80211, "calling ieee80211_rx_irqsafe(): %d/%d",
priv->rx_skb->len, priv->rx_skb->data_len);
@@ -1875,6 +1944,8 @@ static void at76_dwork_hw_scan(struct work_struct *work)
if (is_valid_ether_addr(priv->bssid))
at76_join(priv);
+ priv->scanning = false;
+
mutex_unlock(&priv->mtx);
ieee80211_scan_completed(priv->hw, false);
@@ -1929,6 +2000,7 @@ static int at76_hw_scan(struct ieee80211_hw *hw,
goto exit;
}
+ priv->scanning = true;
ieee80211_queue_delayed_work(priv->hw, &priv->dwork_hw_scan,
SCAN_POLL_INTERVAL);
@@ -2020,6 +2092,44 @@ static void at76_configure_filter(struct ieee80211_hw *hw,
ieee80211_queue_work(hw, &priv->work_set_promisc);
}
+static int at76_set_wep(struct at76_priv *priv)
+{
+ int ret = 0;
+ struct mib_mac_wep *mib_data = &priv->mib_buf.data.wep_mib;
+
+ priv->mib_buf.type = MIB_MAC_WEP;
+ priv->mib_buf.size = sizeof(struct mib_mac_wep);
+ priv->mib_buf.index = 0;
+
+ memset(mib_data, 0, sizeof(*mib_data));
+
+ if (priv->wep_enabled) {
+ if (priv->wep_keys_len[priv->wep_key_id] > WEP_SMALL_KEY_LEN)
+ mib_data->encryption_level = 2;
+ else
+ mib_data->encryption_level = 1;
+
+ /* always exclude unencrypted if WEP is active */
+ mib_data->exclude_unencrypted = 1;
+ } else {
+ mib_data->exclude_unencrypted = 0;
+ mib_data->encryption_level = 0;
+ }
+
+ mib_data->privacy_invoked = priv->wep_enabled;
+ mib_data->wep_default_key_id = priv->wep_key_id;
+ memcpy(mib_data->wep_default_keyvalue, priv->wep_keys,
+ sizeof(priv->wep_keys));
+
+ ret = at76_set_mib(priv, &priv->mib_buf);
+
+ if (ret < 0)
+ wiphy_err(priv->hw->wiphy,
+ "set_mib (wep) failed: %d\n", ret);
+
+ return ret;
+}
+
static int at76_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
struct ieee80211_vif *vif, struct ieee80211_sta *sta,
struct ieee80211_key_conf *key)
@@ -2062,7 +2172,7 @@ static int at76_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
priv->wep_enabled = 1;
}
- at76_startup_device(priv);
+ at76_set_wep(priv);
mutex_unlock(&priv->mtx);
@@ -2330,16 +2440,22 @@ static int at76_probe(struct usb_interface *interface,
struct usb_device *udev;
int op_mode;
int need_ext_fw = 0;
- struct mib_fw_version fwv;
+ struct mib_fw_version *fwv = NULL;
int board_type = (int)id->driver_info;
udev = usb_get_dev(interface_to_usbdev(interface));
+ fwv = kmalloc(sizeof(*fwv), GFP_KERNEL);
+ if (!fwv) {
+ ret = -ENOMEM;
+ goto exit;
+ }
+
/* Load firmware into kernel memory */
fwe = at76_load_firmware(udev, board_type);
if (!fwe) {
ret = -ENOENT;
- goto error;
+ goto exit;
}
op_mode = at76_get_op_mode(udev);
@@ -2353,7 +2469,7 @@ static int at76_probe(struct usb_interface *interface,
dev_err(&interface->dev,
"cannot handle a device in HW_CONFIG_MODE\n");
ret = -EBUSY;
- goto error;
+ goto exit;
}
if (op_mode != OPMODE_NORMAL_NIC_WITH_FLASH
@@ -2366,10 +2482,10 @@ static int at76_probe(struct usb_interface *interface,
dev_err(&interface->dev,
"error %d downloading internal firmware\n",
ret);
- goto error;
+ goto exit;
}
usb_put_dev(udev);
- return ret;
+ goto exit;
}
/* Internal firmware already inside the device. Get firmware
@@ -2382,8 +2498,8 @@ static int at76_probe(struct usb_interface *interface,
* query the device for the fw version */
if ((fwe->fw_version.major > 0 || fwe->fw_version.minor >= 100)
|| (op_mode == OPMODE_NORMAL_NIC_WITH_FLASH)) {
- ret = at76_get_mib(udev, MIB_FW_VERSION, &fwv, sizeof(fwv));
- if (ret < 0 || (fwv.major | fwv.minor) == 0)
+ ret = at76_get_mib(udev, MIB_FW_VERSION, fwv, sizeof(*fwv));
+ if (ret < 0 || (fwv->major | fwv->minor) == 0)
need_ext_fw = 1;
} else
/* No way to check firmware version, reload to be sure */
@@ -2394,37 +2510,37 @@ static int at76_probe(struct usb_interface *interface,
"downloading external firmware\n");
ret = at76_load_external_fw(udev, fwe);
- if (ret)
- goto error;
+ if (ret < 0)
+ goto exit;
/* Re-check firmware version */
- ret = at76_get_mib(udev, MIB_FW_VERSION, &fwv, sizeof(fwv));
+ ret = at76_get_mib(udev, MIB_FW_VERSION, fwv, sizeof(*fwv));
if (ret < 0) {
dev_err(&interface->dev,
"error %d getting firmware version\n", ret);
- goto error;
+ goto exit;
}
}
priv = at76_alloc_new_device(udev);
if (!priv) {
ret = -ENOMEM;
- goto error;
+ goto exit;
}
usb_set_intfdata(interface, priv);
- memcpy(&priv->fw_version, &fwv, sizeof(struct mib_fw_version));
+ memcpy(&priv->fw_version, fwv, sizeof(struct mib_fw_version));
priv->board_type = board_type;
ret = at76_init_new_device(priv, interface);
if (ret < 0)
at76_delete_device(priv);
- return ret;
-
-error:
- usb_put_dev(udev);
+exit:
+ kfree(fwv);
+ if (ret < 0)
+ usb_put_dev(udev);
return ret;
}
diff --git a/drivers/net/wireless/at76c50x-usb.h b/drivers/net/wireless/at76c50x-usb.h
index f14a65473fe8..55090a38ac95 100644
--- a/drivers/net/wireless/at76c50x-usb.h
+++ b/drivers/net/wireless/at76c50x-usb.h
@@ -219,18 +219,6 @@ struct at76_req_join {
u8 reserved;
} __packed;
-struct set_mib_buffer {
- u8 type;
- u8 size;
- u8 index;
- u8 reserved;
- union {
- u8 byte;
- __le16 word;
- u8 addr[ETH_ALEN];
- } data;
-} __packed;
-
struct mib_local {
u16 reserved0;
u8 beacon_enable;
@@ -334,6 +322,19 @@ struct mib_mdomain {
u8 channel_list[14]; /* 0 for invalid channels */
} __packed;
+struct set_mib_buffer {
+ u8 type;
+ u8 size;
+ u8 index;
+ u8 reserved;
+ union {
+ u8 byte;
+ __le16 word;
+ u8 addr[ETH_ALEN];
+ struct mib_mac_wep wep_mib;
+ } data;
+} __packed;
+
struct at76_fw_header {
__le32 crc; /* CRC32 of the whole image */
__le32 board_type; /* firmware compatibility code */
@@ -417,6 +418,7 @@ struct at76_priv {
int scan_max_time; /* scan max channel time */
int scan_mode; /* SCAN_TYPE_ACTIVE, SCAN_TYPE_PASSIVE */
int scan_need_any; /* if set, need to scan for any ESSID */
+ bool scanning; /* if set, the scan is running */
u16 assoc_id; /* current association ID, if associated */
diff --git a/drivers/net/wireless/ath/ar5523/ar5523.c b/drivers/net/wireless/ath/ar5523/ar5523.c
index 507d9a9ee69a..f92050617ae6 100644
--- a/drivers/net/wireless/ath/ar5523/ar5523.c
+++ b/drivers/net/wireless/ath/ar5523/ar5523.c
@@ -1090,7 +1090,8 @@ static int ar5523_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
return ret;
}
-static void ar5523_flush(struct ieee80211_hw *hw, u32 queues, bool drop)
+static void ar5523_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ u32 queues, bool drop)
{
struct ar5523 *ar = hw->priv;
diff --git a/drivers/net/wireless/ath/ath10k/bmi.c b/drivers/net/wireless/ath/ath10k/bmi.c
index a1f099628850..17d221abd58c 100644
--- a/drivers/net/wireless/ath/ath10k/bmi.c
+++ b/drivers/net/wireless/ath/ath10k/bmi.c
@@ -175,7 +175,7 @@ int ath10k_bmi_write_memory(struct ath10k *ar,
return 0;
}
-int ath10k_bmi_execute(struct ath10k *ar, u32 address, u32 *param)
+int ath10k_bmi_execute(struct ath10k *ar, u32 address, u32 param, u32 *result)
{
struct bmi_cmd cmd;
union bmi_resp resp;
@@ -184,7 +184,7 @@ int ath10k_bmi_execute(struct ath10k *ar, u32 address, u32 *param)
int ret;
ath10k_dbg(ATH10K_DBG_BMI, "bmi execute address 0x%x param 0x%x\n",
- address, *param);
+ address, param);
if (ar->bmi.done_sent) {
ath10k_warn("command disallowed\n");
@@ -193,7 +193,7 @@ int ath10k_bmi_execute(struct ath10k *ar, u32 address, u32 *param)
cmd.id = __cpu_to_le32(BMI_EXECUTE);
cmd.execute.addr = __cpu_to_le32(address);
- cmd.execute.param = __cpu_to_le32(*param);
+ cmd.execute.param = __cpu_to_le32(param);
ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, cmdlen, &resp, &resplen);
if (ret) {
@@ -204,10 +204,13 @@ int ath10k_bmi_execute(struct ath10k *ar, u32 address, u32 *param)
if (resplen < sizeof(resp.execute)) {
ath10k_warn("invalid execute response length (%d)\n",
resplen);
- return ret;
+ return -EIO;
}
- *param = __le32_to_cpu(resp.execute.result);
+ *result = __le32_to_cpu(resp.execute.result);
+
+ ath10k_dbg(ATH10K_DBG_BMI, "bmi execute result 0x%x\n", *result);
+
return 0;
}
diff --git a/drivers/net/wireless/ath/ath10k/bmi.h b/drivers/net/wireless/ath/ath10k/bmi.h
index 8d81ce1cec21..111ab701465c 100644
--- a/drivers/net/wireless/ath/ath10k/bmi.h
+++ b/drivers/net/wireless/ath/ath10k/bmi.h
@@ -201,7 +201,8 @@ int ath10k_bmi_write_memory(struct ath10k *ar, u32 address,
\
addr = host_interest_item_address(HI_ITEM(item)); \
ret = ath10k_bmi_read_memory(ar, addr, (u8 *)&tmp, 4); \
- *val = __le32_to_cpu(tmp); \
+ if (!ret) \
+ *val = __le32_to_cpu(tmp); \
ret; \
})
@@ -217,7 +218,7 @@ int ath10k_bmi_write_memory(struct ath10k *ar, u32 address,
ret; \
})
-int ath10k_bmi_execute(struct ath10k *ar, u32 address, u32 *param);
+int ath10k_bmi_execute(struct ath10k *ar, u32 address, u32 param, u32 *result);
int ath10k_bmi_lz_stream_start(struct ath10k *ar, u32 address);
int ath10k_bmi_lz_data(struct ath10k *ar, const void *buffer, u32 length);
int ath10k_bmi_fast_download(struct ath10k *ar, u32 address,
diff --git a/drivers/net/wireless/ath/ath10k/ce.c b/drivers/net/wireless/ath/ath10k/ce.c
index a79499c82350..d185dc0cd12b 100644
--- a/drivers/net/wireless/ath/ath10k/ce.c
+++ b/drivers/net/wireless/ath/ath10k/ce.c
@@ -329,6 +329,33 @@ exit:
return ret;
}
+void __ath10k_ce_send_revert(struct ath10k_ce_pipe *pipe)
+{
+ struct ath10k *ar = pipe->ar;
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+ struct ath10k_ce_ring *src_ring = pipe->src_ring;
+ u32 ctrl_addr = pipe->ctrl_addr;
+
+ lockdep_assert_held(&ar_pci->ce_lock);
+
+ /*
+ * This function must be called only if there is an incomplete
+ * scatter-gather transfer (before index register is updated)
+ * that needs to be cleaned up.
+ */
+ if (WARN_ON_ONCE(src_ring->write_index == src_ring->sw_index))
+ return;
+
+ if (WARN_ON_ONCE(src_ring->write_index ==
+ ath10k_ce_src_ring_write_index_get(ar, ctrl_addr)))
+ return;
+
+ src_ring->write_index--;
+ src_ring->write_index &= src_ring->nentries_mask;
+
+ src_ring->per_transfer_context[src_ring->write_index] = NULL;
+}
+
int ath10k_ce_send(struct ath10k_ce_pipe *ce_state,
void *per_transfer_context,
u32 buffer,
@@ -840,35 +867,17 @@ void ath10k_ce_recv_cb_register(struct ath10k_ce_pipe *ce_state,
static int ath10k_ce_init_src_ring(struct ath10k *ar,
unsigned int ce_id,
- struct ath10k_ce_pipe *ce_state,
const struct ce_attr *attr)
{
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
- struct ath10k_ce_ring *src_ring;
- unsigned int nentries = attr->src_nentries;
- unsigned int ce_nbytes;
- u32 ctrl_addr = ath10k_ce_base_address(ce_id);
- dma_addr_t base_addr;
- char *ptr;
-
- nentries = roundup_pow_of_two(nentries);
-
- if (ce_state->src_ring) {
- WARN_ON(ce_state->src_ring->nentries != nentries);
- return 0;
- }
-
- ce_nbytes = sizeof(struct ath10k_ce_ring) + (nentries * sizeof(void *));
- ptr = kzalloc(ce_nbytes, GFP_KERNEL);
- if (ptr == NULL)
- return -ENOMEM;
+ struct ath10k_ce_pipe *ce_state = &ar_pci->ce_states[ce_id];
+ struct ath10k_ce_ring *src_ring = ce_state->src_ring;
+ u32 nentries, ctrl_addr = ath10k_ce_base_address(ce_id);
- ce_state->src_ring = (struct ath10k_ce_ring *)ptr;
- src_ring = ce_state->src_ring;
+ nentries = roundup_pow_of_two(attr->src_nentries);
- ptr += sizeof(struct ath10k_ce_ring);
- src_ring->nentries = nentries;
- src_ring->nentries_mask = nentries - 1;
+ memset(src_ring->per_transfer_context, 0,
+ nentries * sizeof(*src_ring->per_transfer_context));
src_ring->sw_index = ath10k_ce_src_ring_read_index_get(ar, ctrl_addr);
src_ring->sw_index &= src_ring->nentries_mask;
@@ -878,21 +887,87 @@ static int ath10k_ce_init_src_ring(struct ath10k *ar,
ath10k_ce_src_ring_write_index_get(ar, ctrl_addr);
src_ring->write_index &= src_ring->nentries_mask;
- src_ring->per_transfer_context = (void **)ptr;
+ ath10k_ce_src_ring_base_addr_set(ar, ctrl_addr,
+ src_ring->base_addr_ce_space);
+ ath10k_ce_src_ring_size_set(ar, ctrl_addr, nentries);
+ ath10k_ce_src_ring_dmax_set(ar, ctrl_addr, attr->src_sz_max);
+ ath10k_ce_src_ring_byte_swap_set(ar, ctrl_addr, 0);
+ ath10k_ce_src_ring_lowmark_set(ar, ctrl_addr, 0);
+ ath10k_ce_src_ring_highmark_set(ar, ctrl_addr, nentries);
+
+ ath10k_dbg(ATH10K_DBG_BOOT,
+ "boot init ce src ring id %d entries %d base_addr %p\n",
+ ce_id, nentries, src_ring->base_addr_owner_space);
+
+ return 0;
+}
+
+static int ath10k_ce_init_dest_ring(struct ath10k *ar,
+ unsigned int ce_id,
+ const struct ce_attr *attr)
+{
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+ struct ath10k_ce_pipe *ce_state = &ar_pci->ce_states[ce_id];
+ struct ath10k_ce_ring *dest_ring = ce_state->dest_ring;
+ u32 nentries, ctrl_addr = ath10k_ce_base_address(ce_id);
+
+ nentries = roundup_pow_of_two(attr->dest_nentries);
+
+ memset(dest_ring->per_transfer_context, 0,
+ nentries * sizeof(*dest_ring->per_transfer_context));
+
+ dest_ring->sw_index = ath10k_ce_dest_ring_read_index_get(ar, ctrl_addr);
+ dest_ring->sw_index &= dest_ring->nentries_mask;
+ dest_ring->write_index =
+ ath10k_ce_dest_ring_write_index_get(ar, ctrl_addr);
+ dest_ring->write_index &= dest_ring->nentries_mask;
+
+ ath10k_ce_dest_ring_base_addr_set(ar, ctrl_addr,
+ dest_ring->base_addr_ce_space);
+ ath10k_ce_dest_ring_size_set(ar, ctrl_addr, nentries);
+ ath10k_ce_dest_ring_byte_swap_set(ar, ctrl_addr, 0);
+ ath10k_ce_dest_ring_lowmark_set(ar, ctrl_addr, 0);
+ ath10k_ce_dest_ring_highmark_set(ar, ctrl_addr, nentries);
+
+ ath10k_dbg(ATH10K_DBG_BOOT,
+ "boot ce dest ring id %d entries %d base_addr %p\n",
+ ce_id, nentries, dest_ring->base_addr_owner_space);
+
+ return 0;
+}
+
+static struct ath10k_ce_ring *
+ath10k_ce_alloc_src_ring(struct ath10k *ar, unsigned int ce_id,
+ const struct ce_attr *attr)
+{
+ struct ath10k_ce_ring *src_ring;
+ u32 nentries = attr->src_nentries;
+ dma_addr_t base_addr;
+
+ nentries = roundup_pow_of_two(nentries);
+
+ src_ring = kzalloc(sizeof(*src_ring) +
+ (nentries *
+ sizeof(*src_ring->per_transfer_context)),
+ GFP_KERNEL);
+ if (src_ring == NULL)
+ return ERR_PTR(-ENOMEM);
+
+ src_ring->nentries = nentries;
+ src_ring->nentries_mask = nentries - 1;
/*
* Legacy platforms that do not support cache
* coherent DMA are unsupported
*/
src_ring->base_addr_owner_space_unaligned =
- pci_alloc_consistent(ar_pci->pdev,
- (nentries * sizeof(struct ce_desc) +
- CE_DESC_RING_ALIGN),
- &base_addr);
+ dma_alloc_coherent(ar->dev,
+ (nentries * sizeof(struct ce_desc) +
+ CE_DESC_RING_ALIGN),
+ &base_addr, GFP_KERNEL);
if (!src_ring->base_addr_owner_space_unaligned) {
- kfree(ce_state->src_ring);
- ce_state->src_ring = NULL;
- return -ENOMEM;
+ kfree(src_ring);
+ return ERR_PTR(-ENOMEM);
}
src_ring->base_addr_ce_space_unaligned = base_addr;
@@ -912,88 +987,54 @@ static int ath10k_ce_init_src_ring(struct ath10k *ar,
kmalloc((nentries * sizeof(struct ce_desc) +
CE_DESC_RING_ALIGN), GFP_KERNEL);
if (!src_ring->shadow_base_unaligned) {
- pci_free_consistent(ar_pci->pdev,
- (nentries * sizeof(struct ce_desc) +
- CE_DESC_RING_ALIGN),
- src_ring->base_addr_owner_space,
- src_ring->base_addr_ce_space);
- kfree(ce_state->src_ring);
- ce_state->src_ring = NULL;
- return -ENOMEM;
+ dma_free_coherent(ar->dev,
+ (nentries * sizeof(struct ce_desc) +
+ CE_DESC_RING_ALIGN),
+ src_ring->base_addr_owner_space,
+ src_ring->base_addr_ce_space);
+ kfree(src_ring);
+ return ERR_PTR(-ENOMEM);
}
src_ring->shadow_base = PTR_ALIGN(
src_ring->shadow_base_unaligned,
CE_DESC_RING_ALIGN);
- ath10k_ce_src_ring_base_addr_set(ar, ctrl_addr,
- src_ring->base_addr_ce_space);
- ath10k_ce_src_ring_size_set(ar, ctrl_addr, nentries);
- ath10k_ce_src_ring_dmax_set(ar, ctrl_addr, attr->src_sz_max);
- ath10k_ce_src_ring_byte_swap_set(ar, ctrl_addr, 0);
- ath10k_ce_src_ring_lowmark_set(ar, ctrl_addr, 0);
- ath10k_ce_src_ring_highmark_set(ar, ctrl_addr, nentries);
-
- ath10k_dbg(ATH10K_DBG_BOOT,
- "boot ce src ring id %d entries %d base_addr %p\n",
- ce_id, nentries, src_ring->base_addr_owner_space);
-
- return 0;
+ return src_ring;
}
-static int ath10k_ce_init_dest_ring(struct ath10k *ar,
- unsigned int ce_id,
- struct ath10k_ce_pipe *ce_state,
- const struct ce_attr *attr)
+static struct ath10k_ce_ring *
+ath10k_ce_alloc_dest_ring(struct ath10k *ar, unsigned int ce_id,
+ const struct ce_attr *attr)
{
- struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
struct ath10k_ce_ring *dest_ring;
- unsigned int nentries = attr->dest_nentries;
- unsigned int ce_nbytes;
- u32 ctrl_addr = ath10k_ce_base_address(ce_id);
+ u32 nentries;
dma_addr_t base_addr;
- char *ptr;
- nentries = roundup_pow_of_two(nentries);
+ nentries = roundup_pow_of_two(attr->dest_nentries);
- if (ce_state->dest_ring) {
- WARN_ON(ce_state->dest_ring->nentries != nentries);
- return 0;
- }
-
- ce_nbytes = sizeof(struct ath10k_ce_ring) + (nentries * sizeof(void *));
- ptr = kzalloc(ce_nbytes, GFP_KERNEL);
- if (ptr == NULL)
- return -ENOMEM;
+ dest_ring = kzalloc(sizeof(*dest_ring) +
+ (nentries *
+ sizeof(*dest_ring->per_transfer_context)),
+ GFP_KERNEL);
+ if (dest_ring == NULL)
+ return ERR_PTR(-ENOMEM);
- ce_state->dest_ring = (struct ath10k_ce_ring *)ptr;
- dest_ring = ce_state->dest_ring;
-
- ptr += sizeof(struct ath10k_ce_ring);
dest_ring->nentries = nentries;
dest_ring->nentries_mask = nentries - 1;
- dest_ring->sw_index = ath10k_ce_dest_ring_read_index_get(ar, ctrl_addr);
- dest_ring->sw_index &= dest_ring->nentries_mask;
- dest_ring->write_index =
- ath10k_ce_dest_ring_write_index_get(ar, ctrl_addr);
- dest_ring->write_index &= dest_ring->nentries_mask;
-
- dest_ring->per_transfer_context = (void **)ptr;
-
/*
* Legacy platforms that do not support cache
* coherent DMA are unsupported
*/
dest_ring->base_addr_owner_space_unaligned =
- pci_alloc_consistent(ar_pci->pdev,
- (nentries * sizeof(struct ce_desc) +
- CE_DESC_RING_ALIGN),
- &base_addr);
+ dma_alloc_coherent(ar->dev,
+ (nentries * sizeof(struct ce_desc) +
+ CE_DESC_RING_ALIGN),
+ &base_addr, GFP_KERNEL);
if (!dest_ring->base_addr_owner_space_unaligned) {
- kfree(ce_state->dest_ring);
- ce_state->dest_ring = NULL;
- return -ENOMEM;
+ kfree(dest_ring);
+ return ERR_PTR(-ENOMEM);
}
dest_ring->base_addr_ce_space_unaligned = base_addr;
@@ -1012,39 +1053,7 @@ static int ath10k_ce_init_dest_ring(struct ath10k *ar,
dest_ring->base_addr_ce_space_unaligned,
CE_DESC_RING_ALIGN);
- ath10k_ce_dest_ring_base_addr_set(ar, ctrl_addr,
- dest_ring->base_addr_ce_space);
- ath10k_ce_dest_ring_size_set(ar, ctrl_addr, nentries);
- ath10k_ce_dest_ring_byte_swap_set(ar, ctrl_addr, 0);
- ath10k_ce_dest_ring_lowmark_set(ar, ctrl_addr, 0);
- ath10k_ce_dest_ring_highmark_set(ar, ctrl_addr, nentries);
-
- ath10k_dbg(ATH10K_DBG_BOOT,
- "boot ce dest ring id %d entries %d base_addr %p\n",
- ce_id, nentries, dest_ring->base_addr_owner_space);
-
- return 0;
-}
-
-static struct ath10k_ce_pipe *ath10k_ce_init_state(struct ath10k *ar,
- unsigned int ce_id,
- const struct ce_attr *attr)
-{
- struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
- struct ath10k_ce_pipe *ce_state = &ar_pci->ce_states[ce_id];
- u32 ctrl_addr = ath10k_ce_base_address(ce_id);
-
- spin_lock_bh(&ar_pci->ce_lock);
-
- ce_state->ar = ar;
- ce_state->id = ce_id;
- ce_state->ctrl_addr = ctrl_addr;
- ce_state->attr_flags = attr->flags;
- ce_state->src_sz_max = attr->src_sz_max;
-
- spin_unlock_bh(&ar_pci->ce_lock);
-
- return ce_state;
+ return dest_ring;
}
/*
@@ -1054,11 +1063,11 @@ static struct ath10k_ce_pipe *ath10k_ce_init_state(struct ath10k *ar,
* initialization. It may be that only one side or the other is
* initialized by software/firmware.
*/
-struct ath10k_ce_pipe *ath10k_ce_init(struct ath10k *ar,
- unsigned int ce_id,
- const struct ce_attr *attr)
+int ath10k_ce_init_pipe(struct ath10k *ar, unsigned int ce_id,
+ const struct ce_attr *attr)
{
- struct ath10k_ce_pipe *ce_state;
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+ struct ath10k_ce_pipe *ce_state = &ar_pci->ce_states[ce_id];
int ret;
/*
@@ -1074,64 +1083,128 @@ struct ath10k_ce_pipe *ath10k_ce_init(struct ath10k *ar,
ret = ath10k_pci_wake(ar);
if (ret)
- return NULL;
+ return ret;
- ce_state = ath10k_ce_init_state(ar, ce_id, attr);
- if (!ce_state) {
- ath10k_err("Failed to initialize CE state for ID: %d\n", ce_id);
- goto out;
- }
+ spin_lock_bh(&ar_pci->ce_lock);
+ ce_state->ar = ar;
+ ce_state->id = ce_id;
+ ce_state->ctrl_addr = ath10k_ce_base_address(ce_id);
+ ce_state->attr_flags = attr->flags;
+ ce_state->src_sz_max = attr->src_sz_max;
+ spin_unlock_bh(&ar_pci->ce_lock);
if (attr->src_nentries) {
- ret = ath10k_ce_init_src_ring(ar, ce_id, ce_state, attr);
+ ret = ath10k_ce_init_src_ring(ar, ce_id, attr);
if (ret) {
ath10k_err("Failed to initialize CE src ring for ID: %d (%d)\n",
ce_id, ret);
- ath10k_ce_deinit(ce_state);
- ce_state = NULL;
goto out;
}
}
if (attr->dest_nentries) {
- ret = ath10k_ce_init_dest_ring(ar, ce_id, ce_state, attr);
+ ret = ath10k_ce_init_dest_ring(ar, ce_id, attr);
if (ret) {
ath10k_err("Failed to initialize CE dest ring for ID: %d (%d)\n",
ce_id, ret);
- ath10k_ce_deinit(ce_state);
- ce_state = NULL;
goto out;
}
}
out:
ath10k_pci_sleep(ar);
- return ce_state;
+ return ret;
}
-void ath10k_ce_deinit(struct ath10k_ce_pipe *ce_state)
+static void ath10k_ce_deinit_src_ring(struct ath10k *ar, unsigned int ce_id)
+{
+ u32 ctrl_addr = ath10k_ce_base_address(ce_id);
+
+ ath10k_ce_src_ring_base_addr_set(ar, ctrl_addr, 0);
+ ath10k_ce_src_ring_size_set(ar, ctrl_addr, 0);
+ ath10k_ce_src_ring_dmax_set(ar, ctrl_addr, 0);
+ ath10k_ce_src_ring_highmark_set(ar, ctrl_addr, 0);
+}
+
+static void ath10k_ce_deinit_dest_ring(struct ath10k *ar, unsigned int ce_id)
+{
+ u32 ctrl_addr = ath10k_ce_base_address(ce_id);
+
+ ath10k_ce_dest_ring_base_addr_set(ar, ctrl_addr, 0);
+ ath10k_ce_dest_ring_size_set(ar, ctrl_addr, 0);
+ ath10k_ce_dest_ring_highmark_set(ar, ctrl_addr, 0);
+}
+
+void ath10k_ce_deinit_pipe(struct ath10k *ar, unsigned int ce_id)
+{
+ int ret;
+
+ ret = ath10k_pci_wake(ar);
+ if (ret)
+ return;
+
+ ath10k_ce_deinit_src_ring(ar, ce_id);
+ ath10k_ce_deinit_dest_ring(ar, ce_id);
+
+ ath10k_pci_sleep(ar);
+}
+
+int ath10k_ce_alloc_pipe(struct ath10k *ar, int ce_id,
+ const struct ce_attr *attr)
+{
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+ struct ath10k_ce_pipe *ce_state = &ar_pci->ce_states[ce_id];
+ int ret;
+
+ if (attr->src_nentries) {
+ ce_state->src_ring = ath10k_ce_alloc_src_ring(ar, ce_id, attr);
+ if (IS_ERR(ce_state->src_ring)) {
+ ret = PTR_ERR(ce_state->src_ring);
+ ath10k_err("failed to allocate copy engine source ring %d: %d\n",
+ ce_id, ret);
+ ce_state->src_ring = NULL;
+ return ret;
+ }
+ }
+
+ if (attr->dest_nentries) {
+ ce_state->dest_ring = ath10k_ce_alloc_dest_ring(ar, ce_id,
+ attr);
+ if (IS_ERR(ce_state->dest_ring)) {
+ ret = PTR_ERR(ce_state->dest_ring);
+ ath10k_err("failed to allocate copy engine destination ring %d: %d\n",
+ ce_id, ret);
+ ce_state->dest_ring = NULL;
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+void ath10k_ce_free_pipe(struct ath10k *ar, int ce_id)
{
- struct ath10k *ar = ce_state->ar;
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+ struct ath10k_ce_pipe *ce_state = &ar_pci->ce_states[ce_id];
if (ce_state->src_ring) {
kfree(ce_state->src_ring->shadow_base_unaligned);
- pci_free_consistent(ar_pci->pdev,
- (ce_state->src_ring->nentries *
- sizeof(struct ce_desc) +
- CE_DESC_RING_ALIGN),
- ce_state->src_ring->base_addr_owner_space,
- ce_state->src_ring->base_addr_ce_space);
+ dma_free_coherent(ar->dev,
+ (ce_state->src_ring->nentries *
+ sizeof(struct ce_desc) +
+ CE_DESC_RING_ALIGN),
+ ce_state->src_ring->base_addr_owner_space,
+ ce_state->src_ring->base_addr_ce_space);
kfree(ce_state->src_ring);
}
if (ce_state->dest_ring) {
- pci_free_consistent(ar_pci->pdev,
- (ce_state->dest_ring->nentries *
- sizeof(struct ce_desc) +
- CE_DESC_RING_ALIGN),
- ce_state->dest_ring->base_addr_owner_space,
- ce_state->dest_ring->base_addr_ce_space);
+ dma_free_coherent(ar->dev,
+ (ce_state->dest_ring->nentries *
+ sizeof(struct ce_desc) +
+ CE_DESC_RING_ALIGN),
+ ce_state->dest_ring->base_addr_owner_space,
+ ce_state->dest_ring->base_addr_ce_space);
kfree(ce_state->dest_ring);
}
diff --git a/drivers/net/wireless/ath/ath10k/ce.h b/drivers/net/wireless/ath/ath10k/ce.h
index 8eb7f99ed992..7a5a36fc59c1 100644
--- a/drivers/net/wireless/ath/ath10k/ce.h
+++ b/drivers/net/wireless/ath/ath10k/ce.h
@@ -104,7 +104,8 @@ struct ath10k_ce_ring {
void *shadow_base_unaligned;
struct ce_desc *shadow_base;
- void **per_transfer_context;
+ /* keep last */
+ void *per_transfer_context[0];
};
struct ath10k_ce_pipe {
@@ -159,6 +160,8 @@ int ath10k_ce_send_nolock(struct ath10k_ce_pipe *ce_state,
unsigned int transfer_id,
unsigned int flags);
+void __ath10k_ce_send_revert(struct ath10k_ce_pipe *pipe);
+
void ath10k_ce_send_cb_register(struct ath10k_ce_pipe *ce_state,
void (*send_cb)(struct ath10k_ce_pipe *),
int disable_interrupts);
@@ -210,10 +213,12 @@ int ath10k_ce_completed_send_next(struct ath10k_ce_pipe *ce_state,
/*==================CE Engine Initialization=======================*/
-/* Initialize an instance of a CE */
-struct ath10k_ce_pipe *ath10k_ce_init(struct ath10k *ar,
- unsigned int ce_id,
- const struct ce_attr *attr);
+int ath10k_ce_init_pipe(struct ath10k *ar, unsigned int ce_id,
+ const struct ce_attr *attr);
+void ath10k_ce_deinit_pipe(struct ath10k *ar, unsigned int ce_id);
+int ath10k_ce_alloc_pipe(struct ath10k *ar, int ce_id,
+ const struct ce_attr *attr);
+void ath10k_ce_free_pipe(struct ath10k *ar, int ce_id);
/*==================CE Engine Shutdown=======================*/
/*
@@ -236,8 +241,6 @@ int ath10k_ce_cancel_send_next(struct ath10k_ce_pipe *ce_state,
unsigned int *nbytesp,
unsigned int *transfer_idp);
-void ath10k_ce_deinit(struct ath10k_ce_pipe *ce_state);
-
/*==================CE Interrupt Handlers====================*/
void ath10k_ce_per_engine_service_any(struct ath10k *ar);
void ath10k_ce_per_engine_service(struct ath10k *ar, unsigned int ce_id);
diff --git a/drivers/net/wireless/ath/ath10k/core.c b/drivers/net/wireless/ath/ath10k/core.c
index ebc5fc2ede75..82017f56e661 100644
--- a/drivers/net/wireless/ath/ath10k/core.c
+++ b/drivers/net/wireless/ath/ath10k/core.c
@@ -58,36 +58,6 @@ static void ath10k_send_suspend_complete(struct ath10k *ar)
complete(&ar->target_suspend);
}
-static int ath10k_init_connect_htc(struct ath10k *ar)
-{
- int status;
-
- status = ath10k_wmi_connect_htc_service(ar);
- if (status)
- goto conn_fail;
-
- /* Start HTC */
- status = ath10k_htc_start(&ar->htc);
- if (status)
- goto conn_fail;
-
- /* Wait for WMI event to be ready */
- status = ath10k_wmi_wait_for_service_ready(ar);
- if (status <= 0) {
- ath10k_warn("wmi service ready event not received");
- status = -ETIMEDOUT;
- goto timeout;
- }
-
- ath10k_dbg(ATH10K_DBG_BOOT, "boot wmi ready\n");
- return 0;
-
-timeout:
- ath10k_htc_stop(&ar->htc);
-conn_fail:
- return status;
-}
-
static int ath10k_init_configure_target(struct ath10k *ar)
{
u32 param_host;
@@ -249,30 +219,40 @@ exit:
static int ath10k_download_and_run_otp(struct ath10k *ar)
{
- u32 address = ar->hw_params.patch_load_addr;
- u32 exec_param;
+ u32 result, address = ar->hw_params.patch_load_addr;
int ret;
/* OTP is optional */
- if (!ar->otp_data || !ar->otp_len)
+ if (!ar->otp_data || !ar->otp_len) {
+ ath10k_warn("Not running otp, calibration will be incorrect (otp-data %p otp_len %zd)!\n",
+ ar->otp_data, ar->otp_len);
return 0;
+ }
+
+ ath10k_dbg(ATH10K_DBG_BOOT, "boot upload otp to 0x%x len %zd\n",
+ address, ar->otp_len);
ret = ath10k_bmi_fast_download(ar, address, ar->otp_data, ar->otp_len);
if (ret) {
ath10k_err("could not write otp (%d)\n", ret);
- goto exit;
+ return ret;
}
- exec_param = 0;
- ret = ath10k_bmi_execute(ar, address, &exec_param);
+ ret = ath10k_bmi_execute(ar, address, 0, &result);
if (ret) {
ath10k_err("could not execute otp (%d)\n", ret);
- goto exit;
+ return ret;
}
-exit:
- return ret;
+ ath10k_dbg(ATH10K_DBG_BOOT, "boot otp execute result %d\n", result);
+
+ if (result != 0) {
+ ath10k_err("otp calibration failed: %d", result);
+ return -EINVAL;
+ }
+
+ return 0;
}
static int ath10k_download_fw(struct ath10k *ar)
@@ -389,8 +369,8 @@ static int ath10k_core_fetch_firmware_api_n(struct ath10k *ar, const char *name)
/* first fetch the firmware file (firmware-*.bin) */
ar->firmware = ath10k_fetch_fw_file(ar, ar->hw_params.fw.dir, name);
if (IS_ERR(ar->firmware)) {
- ath10k_err("Could not fetch firmware file '%s': %ld\n",
- name, PTR_ERR(ar->firmware));
+ ath10k_err("could not fetch firmware file '%s/%s': %ld\n",
+ ar->hw_params.fw.dir, name, PTR_ERR(ar->firmware));
return PTR_ERR(ar->firmware);
}
@@ -401,14 +381,14 @@ static int ath10k_core_fetch_firmware_api_n(struct ath10k *ar, const char *name)
magic_len = strlen(ATH10K_FIRMWARE_MAGIC) + 1;
if (len < magic_len) {
- ath10k_err("firmware image too small to contain magic: %zu\n",
- len);
+ ath10k_err("firmware file '%s/%s' too small to contain magic: %zu\n",
+ ar->hw_params.fw.dir, name, len);
ret = -EINVAL;
goto err;
}
if (memcmp(data, ATH10K_FIRMWARE_MAGIC, magic_len) != 0) {
- ath10k_err("Invalid firmware magic\n");
+ ath10k_err("invalid firmware magic\n");
ret = -EINVAL;
goto err;
}
@@ -430,7 +410,7 @@ static int ath10k_core_fetch_firmware_api_n(struct ath10k *ar, const char *name)
data += sizeof(*hdr);
if (len < ie_len) {
- ath10k_err("Invalid length for FW IE %d (%zu < %zu)\n",
+ ath10k_err("invalid length for FW IE %d (%zu < %zu)\n",
ie_id, len, ie_len);
ret = -EINVAL;
goto err;
@@ -513,8 +493,8 @@ static int ath10k_core_fetch_firmware_api_n(struct ath10k *ar, const char *name)
}
if (!ar->firmware_data || !ar->firmware_len) {
- ath10k_warn("No ATH10K_FW_IE_FW_IMAGE found from %s, skipping\n",
- name);
+ ath10k_warn("No ATH10K_FW_IE_FW_IMAGE found from '%s/%s', skipping\n",
+ ar->hw_params.fw.dir, name);
ret = -ENOMEDIUM;
goto err;
}
@@ -531,7 +511,9 @@ static int ath10k_core_fetch_firmware_api_n(struct ath10k *ar, const char *name)
ar->hw_params.fw.board);
if (IS_ERR(ar->board)) {
ret = PTR_ERR(ar->board);
- ath10k_err("could not fetch board data (%d)\n", ret);
+ ath10k_err("could not fetch board data '%s/%s' (%d)\n",
+ ar->hw_params.fw.dir, ar->hw_params.fw.board,
+ ret);
goto err;
}
@@ -549,19 +531,21 @@ static int ath10k_core_fetch_firmware_files(struct ath10k *ar)
{
int ret;
+ ar->fw_api = 2;
+ ath10k_dbg(ATH10K_DBG_BOOT, "trying fw api %d\n", ar->fw_api);
+
ret = ath10k_core_fetch_firmware_api_n(ar, ATH10K_FW_API2_FILE);
- if (ret == 0) {
- ar->fw_api = 2;
- goto out;
- }
+ if (ret == 0)
+ goto success;
+
+ ar->fw_api = 1;
+ ath10k_dbg(ATH10K_DBG_BOOT, "trying fw api %d\n", ar->fw_api);
ret = ath10k_core_fetch_firmware_api_1(ar);
if (ret)
return ret;
- ar->fw_api = 1;
-
-out:
+success:
ath10k_dbg(ATH10K_DBG_BOOT, "using fw api %d\n", ar->fw_api);
return 0;
@@ -572,16 +556,22 @@ static int ath10k_init_download_firmware(struct ath10k *ar)
int ret;
ret = ath10k_download_board_data(ar);
- if (ret)
+ if (ret) {
+ ath10k_err("failed to download board data: %d\n", ret);
return ret;
+ }
ret = ath10k_download_and_run_otp(ar);
- if (ret)
+ if (ret) {
+ ath10k_err("failed to run otp: %d\n", ret);
return ret;
+ }
ret = ath10k_download_fw(ar);
- if (ret)
+ if (ret) {
+ ath10k_err("failed to download firmware: %d\n", ret);
return ret;
+ }
return ret;
}
@@ -660,8 +650,9 @@ static void ath10k_core_restart(struct work_struct *work)
switch (ar->state) {
case ATH10K_STATE_ON:
- ath10k_halt(ar);
ar->state = ATH10K_STATE_RESTARTING;
+ del_timer_sync(&ar->scan.timeout);
+ ath10k_reset_scan((unsigned long)ar);
ieee80211_restart_hw(ar->hw);
break;
case ATH10K_STATE_OFF:
@@ -670,6 +661,8 @@ static void ath10k_core_restart(struct work_struct *work)
ath10k_warn("cannot restart a device that hasn't been started\n");
break;
case ATH10K_STATE_RESTARTING:
+ /* hw restart might be requested from multiple places */
+ break;
case ATH10K_STATE_RESTARTED:
ar->state = ATH10K_STATE_WEDGED;
/* fall through */
@@ -681,70 +674,6 @@ static void ath10k_core_restart(struct work_struct *work)
mutex_unlock(&ar->conf_mutex);
}
-struct ath10k *ath10k_core_create(void *hif_priv, struct device *dev,
- const struct ath10k_hif_ops *hif_ops)
-{
- struct ath10k *ar;
-
- ar = ath10k_mac_create();
- if (!ar)
- return NULL;
-
- ar->ath_common.priv = ar;
- ar->ath_common.hw = ar->hw;
-
- ar->p2p = !!ath10k_p2p;
- ar->dev = dev;
-
- ar->hif.priv = hif_priv;
- ar->hif.ops = hif_ops;
-
- init_completion(&ar->scan.started);
- init_completion(&ar->scan.completed);
- init_completion(&ar->scan.on_channel);
- init_completion(&ar->target_suspend);
-
- init_completion(&ar->install_key_done);
- init_completion(&ar->vdev_setup_done);
-
- setup_timer(&ar->scan.timeout, ath10k_reset_scan, (unsigned long)ar);
-
- ar->workqueue = create_singlethread_workqueue("ath10k_wq");
- if (!ar->workqueue)
- goto err_wq;
-
- mutex_init(&ar->conf_mutex);
- spin_lock_init(&ar->data_lock);
-
- INIT_LIST_HEAD(&ar->peers);
- init_waitqueue_head(&ar->peer_mapping_wq);
-
- init_completion(&ar->offchan_tx_completed);
- INIT_WORK(&ar->offchan_tx_work, ath10k_offchan_tx_work);
- skb_queue_head_init(&ar->offchan_tx_queue);
-
- INIT_WORK(&ar->wmi_mgmt_tx_work, ath10k_mgmt_over_wmi_tx_work);
- skb_queue_head_init(&ar->wmi_mgmt_tx_queue);
-
- INIT_WORK(&ar->restart_work, ath10k_core_restart);
-
- return ar;
-
-err_wq:
- ath10k_mac_destroy(ar);
- return NULL;
-}
-EXPORT_SYMBOL(ath10k_core_create);
-
-void ath10k_core_destroy(struct ath10k *ar)
-{
- flush_workqueue(ar->workqueue);
- destroy_workqueue(ar->workqueue);
-
- ath10k_mac_destroy(ar);
-}
-EXPORT_SYMBOL(ath10k_core_destroy);
-
int ath10k_core_start(struct ath10k *ar)
{
int status;
@@ -785,10 +714,28 @@ int ath10k_core_start(struct ath10k *ar)
goto err;
}
+ status = ath10k_htt_init(ar);
+ if (status) {
+ ath10k_err("failed to init htt: %d\n", status);
+ goto err_wmi_detach;
+ }
+
+ status = ath10k_htt_tx_alloc(&ar->htt);
+ if (status) {
+ ath10k_err("failed to alloc htt tx: %d\n", status);
+ goto err_wmi_detach;
+ }
+
+ status = ath10k_htt_rx_alloc(&ar->htt);
+ if (status) {
+ ath10k_err("failed to alloc htt rx: %d\n", status);
+ goto err_htt_tx_detach;
+ }
+
status = ath10k_hif_start(ar);
if (status) {
ath10k_err("could not start HIF: %d\n", status);
- goto err_wmi_detach;
+ goto err_htt_rx_detach;
}
status = ath10k_htc_wait_target(&ar->htc);
@@ -797,15 +744,30 @@ int ath10k_core_start(struct ath10k *ar)
goto err_hif_stop;
}
- status = ath10k_htt_attach(ar);
+ status = ath10k_htt_connect(&ar->htt);
if (status) {
- ath10k_err("could not attach htt (%d)\n", status);
+ ath10k_err("failed to connect htt (%d)\n", status);
goto err_hif_stop;
}
- status = ath10k_init_connect_htc(ar);
- if (status)
- goto err_htt_detach;
+ status = ath10k_wmi_connect(ar);
+ if (status) {
+ ath10k_err("could not connect wmi: %d\n", status);
+ goto err_hif_stop;
+ }
+
+ status = ath10k_htc_start(&ar->htc);
+ if (status) {
+ ath10k_err("failed to start htc: %d\n", status);
+ goto err_hif_stop;
+ }
+
+ status = ath10k_wmi_wait_for_service_ready(ar);
+ if (status <= 0) {
+ ath10k_warn("wmi service ready event not received");
+ status = -ETIMEDOUT;
+ goto err_htc_stop;
+ }
ath10k_dbg(ATH10K_DBG_BOOT, "firmware %s booted\n",
ar->hw->wiphy->fw_version);
@@ -813,31 +775,36 @@ int ath10k_core_start(struct ath10k *ar)
status = ath10k_wmi_cmd_init(ar);
if (status) {
ath10k_err("could not send WMI init command (%d)\n", status);
- goto err_disconnect_htc;
+ goto err_htc_stop;
}
status = ath10k_wmi_wait_for_unified_ready(ar);
if (status <= 0) {
ath10k_err("wmi unified ready event not received\n");
status = -ETIMEDOUT;
- goto err_disconnect_htc;
+ goto err_htc_stop;
}
- status = ath10k_htt_attach_target(&ar->htt);
- if (status)
- goto err_disconnect_htc;
+ status = ath10k_htt_setup(&ar->htt);
+ if (status) {
+ ath10k_err("failed to setup htt: %d\n", status);
+ goto err_htc_stop;
+ }
status = ath10k_debug_start(ar);
if (status)
- goto err_disconnect_htc;
+ goto err_htc_stop;
ar->free_vdev_map = (1 << TARGET_NUM_VDEVS) - 1;
INIT_LIST_HEAD(&ar->arvifs);
if (!test_bit(ATH10K_FLAG_FIRST_BOOT_DONE, &ar->dev_flags))
- ath10k_info("%s (0x%x) fw %s api %d htt %d.%d\n",
- ar->hw_params.name, ar->target_version,
- ar->hw->wiphy->fw_version, ar->fw_api,
+ ath10k_info("%s (0x%08x, 0x%08x) fw %s api %d htt %d.%d\n",
+ ar->hw_params.name,
+ ar->target_version,
+ ar->chip_id,
+ ar->hw->wiphy->fw_version,
+ ar->fw_api,
ar->htt.target_version_major,
ar->htt.target_version_minor);
@@ -845,12 +812,14 @@ int ath10k_core_start(struct ath10k *ar)
return 0;
-err_disconnect_htc:
+err_htc_stop:
ath10k_htc_stop(&ar->htc);
-err_htt_detach:
- ath10k_htt_detach(&ar->htt);
err_hif_stop:
ath10k_hif_stop(ar);
+err_htt_rx_detach:
+ ath10k_htt_rx_free(&ar->htt);
+err_htt_tx_detach:
+ ath10k_htt_tx_free(&ar->htt);
err_wmi_detach:
ath10k_wmi_detach(ar);
err:
@@ -885,10 +854,14 @@ void ath10k_core_stop(struct ath10k *ar)
lockdep_assert_held(&ar->conf_mutex);
/* try to suspend target */
- ath10k_wait_for_suspend(ar, WMI_PDEV_SUSPEND_AND_DISABLE_INTR);
+ if (ar->state != ATH10K_STATE_RESTARTING)
+ ath10k_wait_for_suspend(ar, WMI_PDEV_SUSPEND_AND_DISABLE_INTR);
+
ath10k_debug_stop(ar);
ath10k_htc_stop(&ar->htc);
- ath10k_htt_detach(&ar->htt);
+ ath10k_hif_stop(ar);
+ ath10k_htt_tx_free(&ar->htt);
+ ath10k_htt_rx_free(&ar->htt);
ath10k_wmi_detach(ar);
}
EXPORT_SYMBOL(ath10k_core_stop);
@@ -980,22 +953,15 @@ static int ath10k_core_check_chip_id(struct ath10k *ar)
return 0;
}
-int ath10k_core_register(struct ath10k *ar, u32 chip_id)
+static void ath10k_core_register_work(struct work_struct *work)
{
+ struct ath10k *ar = container_of(work, struct ath10k, register_work);
int status;
- ar->chip_id = chip_id;
-
- status = ath10k_core_check_chip_id(ar);
- if (status) {
- ath10k_err("Unsupported chip id 0x%08x\n", ar->chip_id);
- return status;
- }
-
status = ath10k_core_probe_fw(ar);
if (status) {
ath10k_err("could not probe fw (%d)\n", status);
- return status;
+ goto err;
}
status = ath10k_mac_register(ar);
@@ -1010,18 +976,43 @@ int ath10k_core_register(struct ath10k *ar, u32 chip_id)
goto err_unregister_mac;
}
- return 0;
+ set_bit(ATH10K_FLAG_CORE_REGISTERED, &ar->dev_flags);
+ return;
err_unregister_mac:
ath10k_mac_unregister(ar);
err_release_fw:
ath10k_core_free_firmware_files(ar);
- return status;
+err:
+ device_release_driver(ar->dev);
+ return;
+}
+
+int ath10k_core_register(struct ath10k *ar, u32 chip_id)
+{
+ int status;
+
+ ar->chip_id = chip_id;
+
+ status = ath10k_core_check_chip_id(ar);
+ if (status) {
+ ath10k_err("Unsupported chip id 0x%08x\n", ar->chip_id);
+ return status;
+ }
+
+ queue_work(ar->workqueue, &ar->register_work);
+
+ return 0;
}
EXPORT_SYMBOL(ath10k_core_register);
void ath10k_core_unregister(struct ath10k *ar)
{
+ cancel_work_sync(&ar->register_work);
+
+ if (!test_bit(ATH10K_FLAG_CORE_REGISTERED, &ar->dev_flags))
+ return;
+
/* We must unregister from mac80211 before we stop HTC and HIF.
* Otherwise we will fail to submit commands to FW and mac80211 will be
* unhappy about callback failures. */
@@ -1033,6 +1024,71 @@ void ath10k_core_unregister(struct ath10k *ar)
}
EXPORT_SYMBOL(ath10k_core_unregister);
+struct ath10k *ath10k_core_create(void *hif_priv, struct device *dev,
+ const struct ath10k_hif_ops *hif_ops)
+{
+ struct ath10k *ar;
+
+ ar = ath10k_mac_create();
+ if (!ar)
+ return NULL;
+
+ ar->ath_common.priv = ar;
+ ar->ath_common.hw = ar->hw;
+
+ ar->p2p = !!ath10k_p2p;
+ ar->dev = dev;
+
+ ar->hif.priv = hif_priv;
+ ar->hif.ops = hif_ops;
+
+ init_completion(&ar->scan.started);
+ init_completion(&ar->scan.completed);
+ init_completion(&ar->scan.on_channel);
+ init_completion(&ar->target_suspend);
+
+ init_completion(&ar->install_key_done);
+ init_completion(&ar->vdev_setup_done);
+
+ setup_timer(&ar->scan.timeout, ath10k_reset_scan, (unsigned long)ar);
+
+ ar->workqueue = create_singlethread_workqueue("ath10k_wq");
+ if (!ar->workqueue)
+ goto err_wq;
+
+ mutex_init(&ar->conf_mutex);
+ spin_lock_init(&ar->data_lock);
+
+ INIT_LIST_HEAD(&ar->peers);
+ init_waitqueue_head(&ar->peer_mapping_wq);
+
+ init_completion(&ar->offchan_tx_completed);
+ INIT_WORK(&ar->offchan_tx_work, ath10k_offchan_tx_work);
+ skb_queue_head_init(&ar->offchan_tx_queue);
+
+ INIT_WORK(&ar->wmi_mgmt_tx_work, ath10k_mgmt_over_wmi_tx_work);
+ skb_queue_head_init(&ar->wmi_mgmt_tx_queue);
+
+ INIT_WORK(&ar->register_work, ath10k_core_register_work);
+ INIT_WORK(&ar->restart_work, ath10k_core_restart);
+
+ return ar;
+
+err_wq:
+ ath10k_mac_destroy(ar);
+ return NULL;
+}
+EXPORT_SYMBOL(ath10k_core_create);
+
+void ath10k_core_destroy(struct ath10k *ar)
+{
+ flush_workqueue(ar->workqueue);
+ destroy_workqueue(ar->workqueue);
+
+ ath10k_mac_destroy(ar);
+}
+EXPORT_SYMBOL(ath10k_core_destroy);
+
MODULE_AUTHOR("Qualcomm Atheros");
MODULE_DESCRIPTION("Core module for QCA988X PCIe devices.");
MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/wireless/ath/ath10k/core.h b/drivers/net/wireless/ath/ath10k/core.h
index 0e71979d837c..68ceef61933d 100644
--- a/drivers/net/wireless/ath/ath10k/core.h
+++ b/drivers/net/wireless/ath/ath10k/core.h
@@ -119,6 +119,7 @@ struct ath10k_peer_stat {
u8 peer_macaddr[ETH_ALEN];
u32 peer_rssi;
u32 peer_tx_rate;
+ u32 peer_rx_rate; /* 10x only */
};
struct ath10k_target_stats {
@@ -130,6 +131,12 @@ struct ath10k_target_stats {
u32 cycle_count;
u32 phy_err_count;
u32 chan_tx_power;
+ u32 ack_rx_bad;
+ u32 rts_bad;
+ u32 rts_good;
+ u32 fcs_bad;
+ u32 no_beacons;
+ u32 mib_int_count;
/* PDEV TX stats */
s32 comp_queued;
@@ -260,6 +267,8 @@ struct ath10k_vif {
u8 fixed_rate;
u8 fixed_nss;
u8 force_sgi;
+ bool use_cts_prot;
+ int num_legacy_stations;
};
struct ath10k_vif_iter {
@@ -326,6 +335,7 @@ enum ath10k_dev_flags {
/* Indicates that ath10k device is during CAC phase of DFS */
ATH10K_CAC_RUNNING,
ATH10K_FLAG_FIRST_BOOT_DONE,
+ ATH10K_FLAG_CORE_REGISTERED,
};
struct ath10k {
@@ -419,13 +429,24 @@ struct ath10k {
struct cfg80211_chan_def chandef;
int free_vdev_map;
+ bool promisc;
+ bool monitor;
int monitor_vdev_id;
- bool monitor_enabled;
- bool monitor_present;
+ bool monitor_started;
unsigned int filter_flags;
unsigned long dev_flags;
u32 dfs_block_radar_events;
+ /* protected by conf_mutex */
+ bool radar_enabled;
+ int num_started_vdevs;
+
+ /* Protected by conf-mutex */
+ u8 supp_tx_chainmask;
+ u8 supp_rx_chainmask;
+ u8 cfg_tx_chainmask;
+ u8 cfg_rx_chainmask;
+
struct wmi_pdev_set_wmm_params_arg wmm_params;
struct completion install_key_done;
@@ -456,6 +477,7 @@ struct ath10k {
enum ath10k_state state;
+ struct work_struct register_work;
struct work_struct restart_work;
/* cycle count is reported twice for each visited channel during scan.
diff --git a/drivers/net/wireless/ath/ath10k/debug.c b/drivers/net/wireless/ath/ath10k/debug.c
index 6debd281350a..1b7ff4ba122c 100644
--- a/drivers/net/wireless/ath/ath10k/debug.c
+++ b/drivers/net/wireless/ath/ath10k/debug.c
@@ -161,7 +161,7 @@ void ath10k_debug_read_target_stats(struct ath10k *ar,
u8 *tmp = ev->data;
struct ath10k_target_stats *stats;
int num_pdev_stats, num_vdev_stats, num_peer_stats;
- struct wmi_pdev_stats *ps;
+ struct wmi_pdev_stats_10x *ps;
int i;
spin_lock_bh(&ar->data_lock);
@@ -173,7 +173,7 @@ void ath10k_debug_read_target_stats(struct ath10k *ar,
num_peer_stats = __le32_to_cpu(ev->num_peer_stats); /* 0 or max peers */
if (num_pdev_stats) {
- ps = (struct wmi_pdev_stats *)tmp;
+ ps = (struct wmi_pdev_stats_10x *)tmp;
stats->ch_noise_floor = __le32_to_cpu(ps->chan_nf);
stats->tx_frame_count = __le32_to_cpu(ps->tx_frame_count);
@@ -228,7 +228,18 @@ void ath10k_debug_read_target_stats(struct ath10k *ar,
stats->phy_err_drop = __le32_to_cpu(ps->wal.rx.phy_err_drop);
stats->mpdu_errs = __le32_to_cpu(ps->wal.rx.mpdu_errs);
- tmp += sizeof(struct wmi_pdev_stats);
+ if (test_bit(ATH10K_FW_FEATURE_WMI_10X,
+ ar->fw_features)) {
+ stats->ack_rx_bad = __le32_to_cpu(ps->ack_rx_bad);
+ stats->rts_bad = __le32_to_cpu(ps->rts_bad);
+ stats->rts_good = __le32_to_cpu(ps->rts_good);
+ stats->fcs_bad = __le32_to_cpu(ps->fcs_bad);
+ stats->no_beacons = __le32_to_cpu(ps->no_beacons);
+ stats->mib_int_count = __le32_to_cpu(ps->mib_int_count);
+ tmp += sizeof(struct wmi_pdev_stats_10x);
+ } else {
+ tmp += sizeof(struct wmi_pdev_stats_old);
+ }
}
/* 0 or max vdevs */
@@ -243,22 +254,29 @@ void ath10k_debug_read_target_stats(struct ath10k *ar,
}
if (num_peer_stats) {
- struct wmi_peer_stats *peer_stats;
+ struct wmi_peer_stats_10x *peer_stats;
struct ath10k_peer_stat *s;
stats->peers = num_peer_stats;
for (i = 0; i < num_peer_stats; i++) {
- peer_stats = (struct wmi_peer_stats *)tmp;
+ peer_stats = (struct wmi_peer_stats_10x *)tmp;
s = &stats->peer_stat[i];
- WMI_MAC_ADDR_TO_CHAR_ARRAY(&peer_stats->peer_macaddr,
- s->peer_macaddr);
+ memcpy(s->peer_macaddr, &peer_stats->peer_macaddr.addr,
+ ETH_ALEN);
s->peer_rssi = __le32_to_cpu(peer_stats->peer_rssi);
s->peer_tx_rate =
__le32_to_cpu(peer_stats->peer_tx_rate);
-
- tmp += sizeof(struct wmi_peer_stats);
+ if (test_bit(ATH10K_FW_FEATURE_WMI_10X,
+ ar->fw_features)) {
+ s->peer_rx_rate =
+ __le32_to_cpu(peer_stats->peer_rx_rate);
+ tmp += sizeof(struct wmi_peer_stats_10x);
+
+ } else {
+ tmp += sizeof(struct wmi_peer_stats_old);
+ }
}
}
@@ -272,7 +290,7 @@ static ssize_t ath10k_read_fw_stats(struct file *file, char __user *user_buf,
struct ath10k *ar = file->private_data;
struct ath10k_target_stats *fw_stats;
char *buf = NULL;
- unsigned int len = 0, buf_len = 2500;
+ unsigned int len = 0, buf_len = 8000;
ssize_t ret_cnt = 0;
long left;
int i;
@@ -320,6 +338,16 @@ static ssize_t ath10k_read_fw_stats(struct file *file, char __user *user_buf,
"Cycle count", fw_stats->cycle_count);
len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
"PHY error count", fw_stats->phy_err_count);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
+ "RTS bad count", fw_stats->rts_bad);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
+ "RTS good count", fw_stats->rts_good);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
+ "FCS bad count", fw_stats->fcs_bad);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
+ "No beacon count", fw_stats->no_beacons);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
+ "MIB int count", fw_stats->mib_int_count);
len += scnprintf(buf + len, buf_len - len, "\n");
len += scnprintf(buf + len, buf_len - len, "%30s\n",
@@ -411,8 +439,8 @@ static ssize_t ath10k_read_fw_stats(struct file *file, char __user *user_buf,
"MPDU errors (FCS, MIC, ENC)", fw_stats->mpdu_errs);
len += scnprintf(buf + len, buf_len - len, "\n");
- len += scnprintf(buf + len, buf_len - len, "%30s\n",
- "ath10k PEER stats");
+ len += scnprintf(buf + len, buf_len - len, "%30s (%d)\n",
+ "ath10k PEER stats", fw_stats->peers);
len += scnprintf(buf + len, buf_len - len, "%30s\n\n",
"=================");
@@ -425,6 +453,9 @@ static ssize_t ath10k_read_fw_stats(struct file *file, char __user *user_buf,
len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
"Peer TX rate",
fw_stats->peer_stat[i].peer_tx_rate);
+ len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
+ "Peer RX rate",
+ fw_stats->peer_stat[i].peer_rx_rate);
len += scnprintf(buf + len, buf_len - len, "\n");
}
spin_unlock_bh(&ar->data_lock);
@@ -451,27 +482,37 @@ static ssize_t ath10k_read_simulate_fw_crash(struct file *file,
char __user *user_buf,
size_t count, loff_t *ppos)
{
- const char buf[] = "To simulate firmware crash write the keyword"
- " `crash` to this file.\nThis will force firmware"
- " to report a crash to the host system.\n";
+ const char buf[] = "To simulate firmware crash write one of the"
+ " keywords to this file:\n `soft` - this will send"
+ " WMI_FORCE_FW_HANG_ASSERT to firmware if FW"
+ " supports that command.\n `hard` - this will send"
+ " to firmware command with illegal parameters"
+ " causing firmware crash.\n";
+
return simple_read_from_buffer(user_buf, count, ppos, buf, strlen(buf));
}
+/* Simulate firmware crash:
+ * 'soft': Call wmi command causing firmware hang. This firmware hang is
+ * recoverable by warm firmware reset.
+ * 'hard': Force firmware crash by setting any vdev parameter for not allowed
+ * vdev id. This is hard firmware crash because it is recoverable only by cold
+ * firmware reset.
+ */
static ssize_t ath10k_write_simulate_fw_crash(struct file *file,
const char __user *user_buf,
size_t count, loff_t *ppos)
{
struct ath10k *ar = file->private_data;
- char buf[32] = {};
+ char buf[32];
int ret;
mutex_lock(&ar->conf_mutex);
simple_write_to_buffer(buf, sizeof(buf) - 1, ppos, user_buf, count);
- if (strcmp(buf, "crash") && strcmp(buf, "crash\n")) {
- ret = -EINVAL;
- goto exit;
- }
+
+ /* make sure that buf is null terminated */
+ buf[sizeof(buf) - 1] = 0;
if (ar->state != ATH10K_STATE_ON &&
ar->state != ATH10K_STATE_RESTARTED) {
@@ -479,14 +520,30 @@ static ssize_t ath10k_write_simulate_fw_crash(struct file *file,
goto exit;
}
- ath10k_info("simulating firmware crash\n");
+ /* drop the possible '\n' from the end */
+ if (buf[count - 1] == '\n') {
+ buf[count - 1] = 0;
+ count--;
+ }
- ret = ath10k_wmi_force_fw_hang(ar, WMI_FORCE_FW_HANG_ASSERT, 0);
- if (ret)
- ath10k_warn("failed to force fw hang (%d)\n", ret);
+ if (!strcmp(buf, "soft")) {
+ ath10k_info("simulating soft firmware crash\n");
+ ret = ath10k_wmi_force_fw_hang(ar, WMI_FORCE_FW_HANG_ASSERT, 0);
+ } else if (!strcmp(buf, "hard")) {
+ ath10k_info("simulating hard firmware crash\n");
+ ret = ath10k_wmi_vdev_set_param(ar, TARGET_NUM_VDEVS + 1,
+ ar->wmi.vdev_param->rts_threshold, 0);
+ } else {
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ if (ret) {
+ ath10k_warn("failed to simulate firmware crash: %d\n", ret);
+ goto exit;
+ }
- if (ret == 0)
- ret = count;
+ ret = count;
exit:
mutex_unlock(&ar->conf_mutex);
diff --git a/drivers/net/wireless/ath/ath10k/htc.c b/drivers/net/wireless/ath/ath10k/htc.c
index 7f1bccd3597f..e493db4b4a41 100644
--- a/drivers/net/wireless/ath/ath10k/htc.c
+++ b/drivers/net/wireless/ath/ath10k/htc.c
@@ -157,6 +157,9 @@ int ath10k_htc_send(struct ath10k_htc *htc,
goto err_pull;
}
ep->tx_credits -= credits;
+ ath10k_dbg(ATH10K_DBG_HTC,
+ "htc ep %d consumed %d credits (total %d)\n",
+ eid, credits, ep->tx_credits);
spin_unlock_bh(&htc->tx_lock);
}
@@ -185,6 +188,9 @@ err_credits:
if (ep->tx_credit_flow_enabled) {
spin_lock_bh(&htc->tx_lock);
ep->tx_credits += credits;
+ ath10k_dbg(ATH10K_DBG_HTC,
+ "htc ep %d reverted %d credits back (total %d)\n",
+ eid, credits, ep->tx_credits);
spin_unlock_bh(&htc->tx_lock);
if (ep->ep_ops.ep_tx_credits)
@@ -234,12 +240,12 @@ ath10k_htc_process_credit_report(struct ath10k_htc *htc,
if (report->eid >= ATH10K_HTC_EP_COUNT)
break;
- ath10k_dbg(ATH10K_DBG_HTC, "ep %d got %d credits\n",
- report->eid, report->credits);
-
ep = &htc->endpoint[report->eid];
ep->tx_credits += report->credits;
+ ath10k_dbg(ATH10K_DBG_HTC, "htc ep %d got %d credits (total %d)\n",
+ report->eid, report->credits, ep->tx_credits);
+
if (ep->ep_ops.ep_tx_credits) {
spin_unlock_bh(&htc->tx_lock);
ep->ep_ops.ep_tx_credits(htc->ar);
@@ -824,17 +830,11 @@ int ath10k_htc_start(struct ath10k_htc *htc)
return 0;
}
-/*
- * stop HTC communications, i.e. stop interrupt reception, and flush all
- * queued buffers
- */
void ath10k_htc_stop(struct ath10k_htc *htc)
{
spin_lock_bh(&htc->tx_lock);
htc->stopped = true;
spin_unlock_bh(&htc->tx_lock);
-
- ath10k_hif_stop(htc->ar);
}
/* registered target arrival callback from the HIF layer */
diff --git a/drivers/net/wireless/ath/ath10k/htt.c b/drivers/net/wireless/ath/ath10k/htt.c
index 69697af59ce0..19c12cc8d663 100644
--- a/drivers/net/wireless/ath/ath10k/htt.c
+++ b/drivers/net/wireless/ath/ath10k/htt.c
@@ -22,7 +22,7 @@
#include "core.h"
#include "debug.h"
-static int ath10k_htt_htc_attach(struct ath10k_htt *htt)
+int ath10k_htt_connect(struct ath10k_htt *htt)
{
struct ath10k_htc_svc_conn_req conn_req;
struct ath10k_htc_svc_conn_resp conn_resp;
@@ -48,39 +48,14 @@ static int ath10k_htt_htc_attach(struct ath10k_htt *htt)
return 0;
}
-int ath10k_htt_attach(struct ath10k *ar)
+int ath10k_htt_init(struct ath10k *ar)
{
struct ath10k_htt *htt = &ar->htt;
- int ret;
htt->ar = ar;
htt->max_throughput_mbps = 800;
/*
- * Connect to HTC service.
- * This has to be done before calling ath10k_htt_rx_attach,
- * since ath10k_htt_rx_attach involves sending a rx ring configure
- * message to the target.
- */
- ret = ath10k_htt_htc_attach(htt);
- if (ret) {
- ath10k_err("could not attach htt htc (%d)\n", ret);
- goto err_htc_attach;
- }
-
- ret = ath10k_htt_tx_attach(htt);
- if (ret) {
- ath10k_err("could not attach htt tx (%d)\n", ret);
- goto err_htc_attach;
- }
-
- ret = ath10k_htt_rx_attach(htt);
- if (ret) {
- ath10k_err("could not attach htt rx (%d)\n", ret);
- goto err_rx_attach;
- }
-
- /*
* Prefetch enough data to satisfy target
* classification engine.
* This is for LL chips. HL chips will probably
@@ -93,11 +68,6 @@ int ath10k_htt_attach(struct ath10k *ar)
2; /* ip4 dscp or ip6 priority */
return 0;
-
-err_rx_attach:
- ath10k_htt_tx_detach(htt);
-err_htc_attach:
- return ret;
}
#define HTT_TARGET_VERSION_TIMEOUT_HZ (3*HZ)
@@ -117,7 +87,7 @@ static int ath10k_htt_verify_version(struct ath10k_htt *htt)
return 0;
}
-int ath10k_htt_attach_target(struct ath10k_htt *htt)
+int ath10k_htt_setup(struct ath10k_htt *htt)
{
int status;
@@ -140,9 +110,3 @@ int ath10k_htt_attach_target(struct ath10k_htt *htt)
return ath10k_htt_send_rx_ring_cfg_ll(htt);
}
-
-void ath10k_htt_detach(struct ath10k_htt *htt)
-{
- ath10k_htt_rx_detach(htt);
- ath10k_htt_tx_detach(htt);
-}
diff --git a/drivers/net/wireless/ath/ath10k/htt.h b/drivers/net/wireless/ath/ath10k/htt.h
index 654867fc1ae7..9a263462c793 100644
--- a/drivers/net/wireless/ath/ath10k/htt.h
+++ b/drivers/net/wireless/ath/ath10k/htt.h
@@ -21,6 +21,7 @@
#include <linux/bug.h>
#include <linux/interrupt.h>
#include <linux/dmapool.h>
+#include <net/mac80211.h>
#include "htc.h"
#include "rx_desc.h"
@@ -1172,23 +1173,6 @@ struct htt_peer_unmap_event {
u16 peer_id;
};
-struct htt_rx_info {
- struct sk_buff *skb;
- enum htt_rx_mpdu_status status;
- enum htt_rx_mpdu_encrypt_type encrypt_type;
- s8 signal;
- struct {
- u8 info0;
- u32 info1;
- u32 info2;
- } rate;
-
- u32 tsf;
- bool fcs_err;
- bool amsdu_more;
- bool mic_err;
-};
-
struct ath10k_htt_txbuf {
struct htt_data_tx_desc_frag frags[2];
struct ath10k_htc_hdr htc_hdr;
@@ -1289,6 +1273,9 @@ struct ath10k_htt {
struct tasklet_struct txrx_compl_task;
struct sk_buff_head tx_compl_q;
struct sk_buff_head rx_compl_q;
+
+ /* rx_status template */
+ struct ieee80211_rx_status rx_status;
};
#define RX_HTT_HDR_STATUS_LEN 64
@@ -1341,14 +1328,16 @@ struct htt_rx_desc {
#define HTT_LOG2_MAX_CACHE_LINE_SIZE 7 /* 2^7 = 128 */
#define HTT_MAX_CACHE_LINE_SIZE_MASK ((1 << HTT_LOG2_MAX_CACHE_LINE_SIZE) - 1)
-int ath10k_htt_attach(struct ath10k *ar);
-int ath10k_htt_attach_target(struct ath10k_htt *htt);
-void ath10k_htt_detach(struct ath10k_htt *htt);
+int ath10k_htt_connect(struct ath10k_htt *htt);
+int ath10k_htt_init(struct ath10k *ar);
+int ath10k_htt_setup(struct ath10k_htt *htt);
+
+int ath10k_htt_tx_alloc(struct ath10k_htt *htt);
+void ath10k_htt_tx_free(struct ath10k_htt *htt);
+
+int ath10k_htt_rx_alloc(struct ath10k_htt *htt);
+void ath10k_htt_rx_free(struct ath10k_htt *htt);
-int ath10k_htt_tx_attach(struct ath10k_htt *htt);
-void ath10k_htt_tx_detach(struct ath10k_htt *htt);
-int ath10k_htt_rx_attach(struct ath10k_htt *htt);
-void ath10k_htt_rx_detach(struct ath10k_htt *htt);
void ath10k_htt_htc_tx_complete(struct ath10k *ar, struct sk_buff *skb);
void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb);
int ath10k_htt_h2t_ver_req_msg(struct ath10k_htt *htt);
diff --git a/drivers/net/wireless/ath/ath10k/htt_rx.c b/drivers/net/wireless/ath/ath10k/htt_rx.c
index cdcbe2de95f9..6c102b1312ff 100644
--- a/drivers/net/wireless/ath/ath10k/htt_rx.c
+++ b/drivers/net/wireless/ath/ath10k/htt_rx.c
@@ -225,10 +225,26 @@ static void ath10k_htt_rx_ring_refill_retry(unsigned long arg)
ath10k_htt_rx_msdu_buff_replenish(htt);
}
-void ath10k_htt_rx_detach(struct ath10k_htt *htt)
+static void ath10k_htt_rx_ring_clean_up(struct ath10k_htt *htt)
{
- int sw_rd_idx = htt->rx_ring.sw_rd_idx.msdu_payld;
+ struct sk_buff *skb;
+ int i;
+
+ for (i = 0; i < htt->rx_ring.size; i++) {
+ skb = htt->rx_ring.netbufs_ring[i];
+ if (!skb)
+ continue;
+ dma_unmap_single(htt->ar->dev, ATH10K_SKB_CB(skb)->paddr,
+ skb->len + skb_tailroom(skb),
+ DMA_FROM_DEVICE);
+ dev_kfree_skb_any(skb);
+ htt->rx_ring.netbufs_ring[i] = NULL;
+ }
+}
+
+void ath10k_htt_rx_free(struct ath10k_htt *htt)
+{
del_timer_sync(&htt->rx_ring.refill_retry_timer);
tasklet_kill(&htt->rx_replenish_task);
tasklet_kill(&htt->txrx_compl_task);
@@ -236,18 +252,7 @@ void ath10k_htt_rx_detach(struct ath10k_htt *htt)
skb_queue_purge(&htt->tx_compl_q);
skb_queue_purge(&htt->rx_compl_q);
- while (sw_rd_idx != __le32_to_cpu(*(htt->rx_ring.alloc_idx.vaddr))) {
- struct sk_buff *skb =
- htt->rx_ring.netbufs_ring[sw_rd_idx];
- struct ath10k_skb_cb *cb = ATH10K_SKB_CB(skb);
-
- dma_unmap_single(htt->ar->dev, cb->paddr,
- skb->len + skb_tailroom(skb),
- DMA_FROM_DEVICE);
- dev_kfree_skb_any(htt->rx_ring.netbufs_ring[sw_rd_idx]);
- sw_rd_idx++;
- sw_rd_idx &= htt->rx_ring.size_mask;
- }
+ ath10k_htt_rx_ring_clean_up(htt);
dma_free_coherent(htt->ar->dev,
(htt->rx_ring.size *
@@ -277,6 +282,7 @@ static inline struct sk_buff *ath10k_htt_rx_netbuf_pop(struct ath10k_htt *htt)
idx = htt->rx_ring.sw_rd_idx.msdu_payld;
msdu = htt->rx_ring.netbufs_ring[idx];
+ htt->rx_ring.netbufs_ring[idx] = NULL;
idx++;
idx &= htt->rx_ring.size_mask;
@@ -297,6 +303,7 @@ static void ath10k_htt_rx_free_msdu_chain(struct sk_buff *skb)
}
}
+/* return: < 0 fatal error, 0 - non chained msdu, 1 chained msdu */
static int ath10k_htt_rx_amsdu_pop(struct ath10k_htt *htt,
u8 **fw_desc, int *fw_desc_len,
struct sk_buff **head_msdu,
@@ -305,12 +312,13 @@ static int ath10k_htt_rx_amsdu_pop(struct ath10k_htt *htt,
int msdu_len, msdu_chaining = 0;
struct sk_buff *msdu;
struct htt_rx_desc *rx_desc;
+ bool corrupted = false;
lockdep_assert_held(&htt->rx_ring.lock);
if (htt->rx_confused) {
ath10k_warn("htt is confused. refusing rx\n");
- return 0;
+ return -1;
}
msdu = *head_msdu = ath10k_htt_rx_netbuf_pop(htt);
@@ -398,7 +406,6 @@ static int ath10k_htt_rx_amsdu_pop(struct ath10k_htt *htt,
msdu_len = MS(__le32_to_cpu(rx_desc->msdu_start.info0),
RX_MSDU_START_INFO0_MSDU_LENGTH);
msdu_chained = rx_desc->frag_info.ring2_more_count;
- msdu_chaining = msdu_chained;
if (msdu_len_invalid)
msdu_len = 0;
@@ -426,11 +433,15 @@ static int ath10k_htt_rx_amsdu_pop(struct ath10k_htt *htt,
msdu->next = next;
msdu = next;
+ msdu_chaining = 1;
}
last_msdu = __le32_to_cpu(rx_desc->msdu_end.info0) &
RX_MSDU_END_INFO0_LAST_MSDU;
+ if (msdu_chaining && !last_msdu)
+ corrupted = true;
+
if (last_msdu) {
msdu->next = NULL;
break;
@@ -442,6 +453,23 @@ static int ath10k_htt_rx_amsdu_pop(struct ath10k_htt *htt,
}
*tail_msdu = msdu;
+ if (*head_msdu == NULL)
+ msdu_chaining = -1;
+
+ /*
+ * Apparently FW sometimes reports weird chained MSDU sequences with
+ * more than one rx descriptor. This seems like a bug but needs more
+ * analyzing. For the time being fix it by dropping such sequences to
+ * avoid blowing up the host system.
+ */
+ if (corrupted) {
+ ath10k_warn("failed to pop chained msdus, dropping\n");
+ ath10k_htt_rx_free_msdu_chain(*head_msdu);
+ *head_msdu = NULL;
+ *tail_msdu = NULL;
+ msdu_chaining = -EINVAL;
+ }
+
/*
* Don't refill the ring yet.
*
@@ -464,7 +492,7 @@ static void ath10k_htt_rx_replenish_task(unsigned long ptr)
ath10k_htt_rx_msdu_buff_replenish(htt);
}
-int ath10k_htt_rx_attach(struct ath10k_htt *htt)
+int ath10k_htt_rx_alloc(struct ath10k_htt *htt)
{
dma_addr_t paddr;
void *vaddr;
@@ -490,7 +518,7 @@ int ath10k_htt_rx_attach(struct ath10k_htt *htt)
htt->rx_ring.fill_level = ath10k_htt_rx_ring_fill_level(htt);
htt->rx_ring.netbufs_ring =
- kmalloc(htt->rx_ring.size * sizeof(struct sk_buff *),
+ kzalloc(htt->rx_ring.size * sizeof(struct sk_buff *),
GFP_KERNEL);
if (!htt->rx_ring.netbufs_ring)
goto err_netbuf;
@@ -636,6 +664,203 @@ struct amsdu_subframe_hdr {
__be16 len;
} __packed;
+static const u8 rx_legacy_rate_idx[] = {
+ 3, /* 0x00 - 11Mbps */
+ 2, /* 0x01 - 5.5Mbps */
+ 1, /* 0x02 - 2Mbps */
+ 0, /* 0x03 - 1Mbps */
+ 3, /* 0x04 - 11Mbps */
+ 2, /* 0x05 - 5.5Mbps */
+ 1, /* 0x06 - 2Mbps */
+ 0, /* 0x07 - 1Mbps */
+ 10, /* 0x08 - 48Mbps */
+ 8, /* 0x09 - 24Mbps */
+ 6, /* 0x0A - 12Mbps */
+ 4, /* 0x0B - 6Mbps */
+ 11, /* 0x0C - 54Mbps */
+ 9, /* 0x0D - 36Mbps */
+ 7, /* 0x0E - 18Mbps */
+ 5, /* 0x0F - 9Mbps */
+};
+
+static void ath10k_htt_rx_h_rates(struct ath10k *ar,
+ enum ieee80211_band band,
+ u8 info0, u32 info1, u32 info2,
+ struct ieee80211_rx_status *status)
+{
+ u8 cck, rate, rate_idx, bw, sgi, mcs, nss;
+ u8 preamble = 0;
+
+ /* Check if valid fields */
+ if (!(info0 & HTT_RX_INDICATION_INFO0_START_VALID))
+ return;
+
+ preamble = MS(info1, HTT_RX_INDICATION_INFO1_PREAMBLE_TYPE);
+
+ switch (preamble) {
+ case HTT_RX_LEGACY:
+ cck = info0 & HTT_RX_INDICATION_INFO0_LEGACY_RATE_CCK;
+ rate = MS(info0, HTT_RX_INDICATION_INFO0_LEGACY_RATE);
+ rate_idx = 0;
+
+ if (rate < 0x08 || rate > 0x0F)
+ break;
+
+ switch (band) {
+ case IEEE80211_BAND_2GHZ:
+ if (cck)
+ rate &= ~BIT(3);
+ rate_idx = rx_legacy_rate_idx[rate];
+ break;
+ case IEEE80211_BAND_5GHZ:
+ rate_idx = rx_legacy_rate_idx[rate];
+ /* We are using same rate table registering
+ HW - ath10k_rates[]. In case of 5GHz skip
+ CCK rates, so -4 here */
+ rate_idx -= 4;
+ break;
+ default:
+ break;
+ }
+
+ status->rate_idx = rate_idx;
+ break;
+ case HTT_RX_HT:
+ case HTT_RX_HT_WITH_TXBF:
+ /* HT-SIG - Table 20-11 in info1 and info2 */
+ mcs = info1 & 0x1F;
+ nss = mcs >> 3;
+ bw = (info1 >> 7) & 1;
+ sgi = (info2 >> 7) & 1;
+
+ status->rate_idx = mcs;
+ status->flag |= RX_FLAG_HT;
+ if (sgi)
+ status->flag |= RX_FLAG_SHORT_GI;
+ if (bw)
+ status->flag |= RX_FLAG_40MHZ;
+ break;
+ case HTT_RX_VHT:
+ case HTT_RX_VHT_WITH_TXBF:
+ /* VHT-SIG-A1 in info 1, VHT-SIG-A2 in info2
+ TODO check this */
+ mcs = (info2 >> 4) & 0x0F;
+ nss = ((info1 >> 10) & 0x07) + 1;
+ bw = info1 & 3;
+ sgi = info2 & 1;
+
+ status->rate_idx = mcs;
+ status->vht_nss = nss;
+
+ if (sgi)
+ status->flag |= RX_FLAG_SHORT_GI;
+
+ switch (bw) {
+ /* 20MHZ */
+ case 0:
+ break;
+ /* 40MHZ */
+ case 1:
+ status->flag |= RX_FLAG_40MHZ;
+ break;
+ /* 80MHZ */
+ case 2:
+ status->vht_flag |= RX_VHT_FLAG_80MHZ;
+ }
+
+ status->flag |= RX_FLAG_VHT;
+ break;
+ default:
+ break;
+ }
+}
+
+static void ath10k_htt_rx_h_protected(struct ath10k_htt *htt,
+ struct ieee80211_rx_status *rx_status,
+ struct sk_buff *skb,
+ enum htt_rx_mpdu_encrypt_type enctype,
+ enum rx_msdu_decap_format fmt,
+ bool dot11frag)
+{
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+
+ rx_status->flag &= ~(RX_FLAG_DECRYPTED |
+ RX_FLAG_IV_STRIPPED |
+ RX_FLAG_MMIC_STRIPPED);
+
+ if (enctype == HTT_RX_MPDU_ENCRYPT_NONE)
+ return;
+
+ /*
+ * There's no explicit rx descriptor flag to indicate whether a given
+ * frame has been decrypted or not. We're forced to use the decap
+ * format as an implicit indication. However fragmentation rx is always
+ * raw and it probably never reports undecrypted raws.
+ *
+ * This makes sure sniffed frames are reported as-is without stripping
+ * the protected flag.
+ */
+ if (fmt == RX_MSDU_DECAP_RAW && !dot11frag)
+ return;
+
+ rx_status->flag |= RX_FLAG_DECRYPTED |
+ RX_FLAG_IV_STRIPPED |
+ RX_FLAG_MMIC_STRIPPED;
+ hdr->frame_control = __cpu_to_le16(__le16_to_cpu(hdr->frame_control) &
+ ~IEEE80211_FCTL_PROTECTED);
+}
+
+static bool ath10k_htt_rx_h_channel(struct ath10k *ar,
+ struct ieee80211_rx_status *status)
+{
+ struct ieee80211_channel *ch;
+
+ spin_lock_bh(&ar->data_lock);
+ ch = ar->scan_channel;
+ if (!ch)
+ ch = ar->rx_channel;
+ spin_unlock_bh(&ar->data_lock);
+
+ if (!ch)
+ return false;
+
+ status->band = ch->band;
+ status->freq = ch->center_freq;
+
+ return true;
+}
+
+static void ath10k_process_rx(struct ath10k *ar,
+ struct ieee80211_rx_status *rx_status,
+ struct sk_buff *skb)
+{
+ struct ieee80211_rx_status *status;
+
+ status = IEEE80211_SKB_RXCB(skb);
+ *status = *rx_status;
+
+ ath10k_dbg(ATH10K_DBG_DATA,
+ "rx skb %p len %u %s%s%s%s%s %srate_idx %u vht_nss %u freq %u band %u flag 0x%x fcs-err %imic-err %i\n",
+ skb,
+ skb->len,
+ status->flag == 0 ? "legacy" : "",
+ status->flag & RX_FLAG_HT ? "ht" : "",
+ status->flag & RX_FLAG_VHT ? "vht" : "",
+ status->flag & RX_FLAG_40MHZ ? "40" : "",
+ status->vht_flag & RX_VHT_FLAG_80MHZ ? "80" : "",
+ status->flag & RX_FLAG_SHORT_GI ? "sgi " : "",
+ status->rate_idx,
+ status->vht_nss,
+ status->freq,
+ status->band, status->flag,
+ !!(status->flag & RX_FLAG_FAILED_FCS_CRC),
+ !!(status->flag & RX_FLAG_MMIC_ERROR));
+ ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "rx skb: ",
+ skb->data, skb->len);
+
+ ieee80211_rx(ar->hw, skb);
+}
+
static int ath10k_htt_rx_nwifi_hdrlen(struct ieee80211_hdr *hdr)
{
/* nwifi header is padded to 4 bytes. this fixes 4addr rx */
@@ -643,11 +868,12 @@ static int ath10k_htt_rx_nwifi_hdrlen(struct ieee80211_hdr *hdr)
}
static void ath10k_htt_rx_amsdu(struct ath10k_htt *htt,
- struct htt_rx_info *info)
+ struct ieee80211_rx_status *rx_status,
+ struct sk_buff *skb_in)
{
struct htt_rx_desc *rxd;
+ struct sk_buff *skb = skb_in;
struct sk_buff *first;
- struct sk_buff *skb = info->skb;
enum rx_msdu_decap_format fmt;
enum htt_rx_mpdu_encrypt_type enctype;
struct ieee80211_hdr *hdr;
@@ -728,24 +954,28 @@ static void ath10k_htt_rx_amsdu(struct ath10k_htt *htt,
break;
}
- info->skb = skb;
- info->encrypt_type = enctype;
+ skb_in = skb;
+ ath10k_htt_rx_h_protected(htt, rx_status, skb_in, enctype, fmt,
+ false);
skb = skb->next;
- info->skb->next = NULL;
+ skb_in->next = NULL;
if (skb)
- info->amsdu_more = true;
+ rx_status->flag |= RX_FLAG_AMSDU_MORE;
+ else
+ rx_status->flag &= ~RX_FLAG_AMSDU_MORE;
- ath10k_process_rx(htt->ar, info);
+ ath10k_process_rx(htt->ar, rx_status, skb_in);
}
/* FIXME: It might be nice to re-assemble the A-MSDU when there's a
* monitor interface active for sniffing purposes. */
}
-static void ath10k_htt_rx_msdu(struct ath10k_htt *htt, struct htt_rx_info *info)
+static void ath10k_htt_rx_msdu(struct ath10k_htt *htt,
+ struct ieee80211_rx_status *rx_status,
+ struct sk_buff *skb)
{
- struct sk_buff *skb = info->skb;
struct htt_rx_desc *rxd;
struct ieee80211_hdr *hdr;
enum rx_msdu_decap_format fmt;
@@ -808,66 +1038,9 @@ static void ath10k_htt_rx_msdu(struct ath10k_htt *htt, struct htt_rx_info *info)
break;
}
- info->skb = skb;
- info->encrypt_type = enctype;
+ ath10k_htt_rx_h_protected(htt, rx_status, skb, enctype, fmt, false);
- ath10k_process_rx(htt->ar, info);
-}
-
-static bool ath10k_htt_rx_has_decrypt_err(struct sk_buff *skb)
-{
- struct htt_rx_desc *rxd;
- u32 flags;
-
- rxd = (void *)skb->data - sizeof(*rxd);
- flags = __le32_to_cpu(rxd->attention.flags);
-
- if (flags & RX_ATTENTION_FLAGS_DECRYPT_ERR)
- return true;
-
- return false;
-}
-
-static bool ath10k_htt_rx_has_fcs_err(struct sk_buff *skb)
-{
- struct htt_rx_desc *rxd;
- u32 flags;
-
- rxd = (void *)skb->data - sizeof(*rxd);
- flags = __le32_to_cpu(rxd->attention.flags);
-
- if (flags & RX_ATTENTION_FLAGS_FCS_ERR)
- return true;
-
- return false;
-}
-
-static bool ath10k_htt_rx_has_mic_err(struct sk_buff *skb)
-{
- struct htt_rx_desc *rxd;
- u32 flags;
-
- rxd = (void *)skb->data - sizeof(*rxd);
- flags = __le32_to_cpu(rxd->attention.flags);
-
- if (flags & RX_ATTENTION_FLAGS_TKIP_MIC_ERR)
- return true;
-
- return false;
-}
-
-static bool ath10k_htt_rx_is_mgmt(struct sk_buff *skb)
-{
- struct htt_rx_desc *rxd;
- u32 flags;
-
- rxd = (void *)skb->data - sizeof(*rxd);
- flags = __le32_to_cpu(rxd->attention.flags);
-
- if (flags & RX_ATTENTION_FLAGS_MGMT_TYPE)
- return true;
-
- return false;
+ ath10k_process_rx(htt->ar, rx_status, skb);
}
static int ath10k_htt_rx_get_csum_state(struct sk_buff *skb)
@@ -952,21 +1125,73 @@ static int ath10k_unchain_msdu(struct sk_buff *msdu_head)
return 0;
}
+static bool ath10k_htt_rx_amsdu_allowed(struct ath10k_htt *htt,
+ struct sk_buff *head,
+ enum htt_rx_mpdu_status status,
+ bool channel_set,
+ u32 attention)
+{
+ if (head->len == 0) {
+ ath10k_dbg(ATH10K_DBG_HTT,
+ "htt rx dropping due to zero-len\n");
+ return false;
+ }
+
+ if (attention & RX_ATTENTION_FLAGS_DECRYPT_ERR) {
+ ath10k_dbg(ATH10K_DBG_HTT,
+ "htt rx dropping due to decrypt-err\n");
+ return false;
+ }
+
+ if (!channel_set) {
+ ath10k_warn("no channel configured; ignoring frame!\n");
+ return false;
+ }
+
+ /* Skip mgmt frames while we handle this in WMI */
+ if (status == HTT_RX_IND_MPDU_STATUS_MGMT_CTRL ||
+ attention & RX_ATTENTION_FLAGS_MGMT_TYPE) {
+ ath10k_dbg(ATH10K_DBG_HTT, "htt rx mgmt ctrl\n");
+ return false;
+ }
+
+ if (status != HTT_RX_IND_MPDU_STATUS_OK &&
+ status != HTT_RX_IND_MPDU_STATUS_TKIP_MIC_ERR &&
+ status != HTT_RX_IND_MPDU_STATUS_ERR_INV_PEER &&
+ !htt->ar->monitor_started) {
+ ath10k_dbg(ATH10K_DBG_HTT,
+ "htt rx ignoring frame w/ status %d\n",
+ status);
+ return false;
+ }
+
+ if (test_bit(ATH10K_CAC_RUNNING, &htt->ar->dev_flags)) {
+ ath10k_dbg(ATH10K_DBG_HTT,
+ "htt rx CAC running\n");
+ return false;
+ }
+
+ return true;
+}
+
static void ath10k_htt_rx_handler(struct ath10k_htt *htt,
struct htt_rx_indication *rx)
{
- struct htt_rx_info info;
+ struct ieee80211_rx_status *rx_status = &htt->rx_status;
struct htt_rx_indication_mpdu_range *mpdu_ranges;
+ struct htt_rx_desc *rxd;
+ enum htt_rx_mpdu_status status;
struct ieee80211_hdr *hdr;
int num_mpdu_ranges;
+ u32 attention;
int fw_desc_len;
u8 *fw_desc;
+ bool channel_set;
int i, j;
+ int ret;
lockdep_assert_held(&htt->rx_ring.lock);
- memset(&info, 0, sizeof(info));
-
fw_desc_len = __le16_to_cpu(rx->prefix.fw_rx_desc_bytes);
fw_desc = (u8 *)&rx->fw_desc;
@@ -974,106 +1199,90 @@ static void ath10k_htt_rx_handler(struct ath10k_htt *htt,
HTT_RX_INDICATION_INFO1_NUM_MPDU_RANGES);
mpdu_ranges = htt_rx_ind_get_mpdu_ranges(rx);
+ /* Fill this once, while this is per-ppdu */
+ if (rx->ppdu.info0 & HTT_RX_INDICATION_INFO0_START_VALID) {
+ memset(rx_status, 0, sizeof(*rx_status));
+ rx_status->signal = ATH10K_DEFAULT_NOISE_FLOOR +
+ rx->ppdu.combined_rssi;
+ }
+
+ if (rx->ppdu.info0 & HTT_RX_INDICATION_INFO0_END_VALID) {
+ /* TSF available only in 32-bit */
+ rx_status->mactime = __le32_to_cpu(rx->ppdu.tsf) & 0xffffffff;
+ rx_status->flag |= RX_FLAG_MACTIME_END;
+ }
+
+ channel_set = ath10k_htt_rx_h_channel(htt->ar, rx_status);
+
+ if (channel_set) {
+ ath10k_htt_rx_h_rates(htt->ar, rx_status->band,
+ rx->ppdu.info0,
+ __le32_to_cpu(rx->ppdu.info1),
+ __le32_to_cpu(rx->ppdu.info2),
+ rx_status);
+ }
+
ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "htt rx ind: ",
rx, sizeof(*rx) +
(sizeof(struct htt_rx_indication_mpdu_range) *
num_mpdu_ranges));
for (i = 0; i < num_mpdu_ranges; i++) {
- info.status = mpdu_ranges[i].mpdu_range_status;
+ status = mpdu_ranges[i].mpdu_range_status;
for (j = 0; j < mpdu_ranges[i].mpdu_count; j++) {
struct sk_buff *msdu_head, *msdu_tail;
- enum htt_rx_mpdu_status status;
- int msdu_chaining;
msdu_head = NULL;
msdu_tail = NULL;
- msdu_chaining = ath10k_htt_rx_amsdu_pop(htt,
- &fw_desc,
- &fw_desc_len,
- &msdu_head,
- &msdu_tail);
-
- if (!msdu_head) {
- ath10k_warn("htt rx no data!\n");
- continue;
- }
-
- if (msdu_head->len == 0) {
- ath10k_dbg(ATH10K_DBG_HTT,
- "htt rx dropping due to zero-len\n");
+ ret = ath10k_htt_rx_amsdu_pop(htt,
+ &fw_desc,
+ &fw_desc_len,
+ &msdu_head,
+ &msdu_tail);
+
+ if (ret < 0) {
+ ath10k_warn("failed to pop amsdu from htt rx ring %d\n",
+ ret);
ath10k_htt_rx_free_msdu_chain(msdu_head);
continue;
}
- if (ath10k_htt_rx_has_decrypt_err(msdu_head)) {
- ath10k_dbg(ATH10K_DBG_HTT,
- "htt rx dropping due to decrypt-err\n");
- ath10k_htt_rx_free_msdu_chain(msdu_head);
- continue;
- }
+ rxd = container_of((void *)msdu_head->data,
+ struct htt_rx_desc,
+ msdu_payload);
+ attention = __le32_to_cpu(rxd->attention.flags);
- status = info.status;
-
- /* Skip mgmt frames while we handle this in WMI */
- if (status == HTT_RX_IND_MPDU_STATUS_MGMT_CTRL ||
- ath10k_htt_rx_is_mgmt(msdu_head)) {
- ath10k_dbg(ATH10K_DBG_HTT, "htt rx mgmt ctrl\n");
+ if (!ath10k_htt_rx_amsdu_allowed(htt, msdu_head,
+ status,
+ channel_set,
+ attention)) {
ath10k_htt_rx_free_msdu_chain(msdu_head);
continue;
}
- if (status != HTT_RX_IND_MPDU_STATUS_OK &&
- status != HTT_RX_IND_MPDU_STATUS_TKIP_MIC_ERR &&
- status != HTT_RX_IND_MPDU_STATUS_ERR_INV_PEER &&
- !htt->ar->monitor_enabled) {
- ath10k_dbg(ATH10K_DBG_HTT,
- "htt rx ignoring frame w/ status %d\n",
- status);
+ if (ret > 0 &&
+ ath10k_unchain_msdu(msdu_head) < 0) {
ath10k_htt_rx_free_msdu_chain(msdu_head);
continue;
}
- if (test_bit(ATH10K_CAC_RUNNING, &htt->ar->dev_flags)) {
- ath10k_dbg(ATH10K_DBG_HTT,
- "htt rx CAC running\n");
- ath10k_htt_rx_free_msdu_chain(msdu_head);
- continue;
- }
-
- if (msdu_chaining &&
- (ath10k_unchain_msdu(msdu_head) < 0)) {
- ath10k_htt_rx_free_msdu_chain(msdu_head);
- continue;
- }
-
- info.skb = msdu_head;
- info.fcs_err = ath10k_htt_rx_has_fcs_err(msdu_head);
- info.mic_err = ath10k_htt_rx_has_mic_err(msdu_head);
-
- if (info.fcs_err)
- ath10k_dbg(ATH10K_DBG_HTT,
- "htt rx has FCS err\n");
-
- if (info.mic_err)
- ath10k_dbg(ATH10K_DBG_HTT,
- "htt rx has MIC err\n");
-
- info.signal = ATH10K_DEFAULT_NOISE_FLOOR;
- info.signal += rx->ppdu.combined_rssi;
+ if (attention & RX_ATTENTION_FLAGS_FCS_ERR)
+ rx_status->flag |= RX_FLAG_FAILED_FCS_CRC;
+ else
+ rx_status->flag &= ~RX_FLAG_FAILED_FCS_CRC;
- info.rate.info0 = rx->ppdu.info0;
- info.rate.info1 = __le32_to_cpu(rx->ppdu.info1);
- info.rate.info2 = __le32_to_cpu(rx->ppdu.info2);
- info.tsf = __le32_to_cpu(rx->ppdu.tsf);
+ if (attention & RX_ATTENTION_FLAGS_TKIP_MIC_ERR)
+ rx_status->flag |= RX_FLAG_MMIC_ERROR;
+ else
+ rx_status->flag &= ~RX_FLAG_MMIC_ERROR;
hdr = ath10k_htt_rx_skb_get_hdr(msdu_head);
if (ath10k_htt_rx_hdr_is_amsdu(hdr))
- ath10k_htt_rx_amsdu(htt, &info);
+ ath10k_htt_rx_amsdu(htt, rx_status, msdu_head);
else
- ath10k_htt_rx_msdu(htt, &info);
+ ath10k_htt_rx_msdu(htt, rx_status, msdu_head);
}
}
@@ -1084,11 +1293,12 @@ static void ath10k_htt_rx_frag_handler(struct ath10k_htt *htt,
struct htt_rx_fragment_indication *frag)
{
struct sk_buff *msdu_head, *msdu_tail;
+ enum htt_rx_mpdu_encrypt_type enctype;
struct htt_rx_desc *rxd;
enum rx_msdu_decap_format fmt;
- struct htt_rx_info info = {};
+ struct ieee80211_rx_status *rx_status = &htt->rx_status;
struct ieee80211_hdr *hdr;
- int msdu_chaining;
+ int ret;
bool tkip_mic_err;
bool decrypt_err;
u8 *fw_desc;
@@ -1102,24 +1312,21 @@ static void ath10k_htt_rx_frag_handler(struct ath10k_htt *htt,
msdu_tail = NULL;
spin_lock_bh(&htt->rx_ring.lock);
- msdu_chaining = ath10k_htt_rx_amsdu_pop(htt, &fw_desc, &fw_desc_len,
- &msdu_head, &msdu_tail);
+ ret = ath10k_htt_rx_amsdu_pop(htt, &fw_desc, &fw_desc_len,
+ &msdu_head, &msdu_tail);
spin_unlock_bh(&htt->rx_ring.lock);
ath10k_dbg(ATH10K_DBG_HTT_DUMP, "htt rx frag ahead\n");
- if (!msdu_head) {
- ath10k_warn("htt rx frag no data\n");
- return;
- }
-
- if (msdu_chaining || msdu_head != msdu_tail) {
- ath10k_warn("aggregation with fragmentation?!\n");
+ if (ret) {
+ ath10k_warn("failed to pop amsdu from httr rx ring for fragmented rx %d\n",
+ ret);
ath10k_htt_rx_free_msdu_chain(msdu_head);
return;
}
/* FIXME: implement signal strength */
+ rx_status->flag |= RX_FLAG_NO_SIGNAL_VAL;
hdr = (struct ieee80211_hdr *)msdu_head->data;
rxd = (void *)msdu_head->data - sizeof(*rxd);
@@ -1136,57 +1343,55 @@ static void ath10k_htt_rx_frag_handler(struct ath10k_htt *htt,
goto end;
}
- info.skb = msdu_head;
- info.status = HTT_RX_IND_MPDU_STATUS_OK;
- info.encrypt_type = MS(__le32_to_cpu(rxd->mpdu_start.info0),
- RX_MPDU_START_INFO0_ENCRYPT_TYPE);
- info.skb->ip_summed = ath10k_htt_rx_get_csum_state(info.skb);
+ enctype = MS(__le32_to_cpu(rxd->mpdu_start.info0),
+ RX_MPDU_START_INFO0_ENCRYPT_TYPE);
+ ath10k_htt_rx_h_protected(htt, rx_status, msdu_head, enctype, fmt,
+ true);
+ msdu_head->ip_summed = ath10k_htt_rx_get_csum_state(msdu_head);
- if (tkip_mic_err) {
+ if (tkip_mic_err)
ath10k_warn("tkip mic error\n");
- info.status = HTT_RX_IND_MPDU_STATUS_TKIP_MIC_ERR;
- }
if (decrypt_err) {
ath10k_warn("decryption err in fragmented rx\n");
- dev_kfree_skb_any(info.skb);
+ dev_kfree_skb_any(msdu_head);
goto end;
}
- if (info.encrypt_type != HTT_RX_MPDU_ENCRYPT_NONE) {
+ if (enctype != HTT_RX_MPDU_ENCRYPT_NONE) {
hdrlen = ieee80211_hdrlen(hdr->frame_control);
- paramlen = ath10k_htt_rx_crypto_param_len(info.encrypt_type);
+ paramlen = ath10k_htt_rx_crypto_param_len(enctype);
/* It is more efficient to move the header than the payload */
- memmove((void *)info.skb->data + paramlen,
- (void *)info.skb->data,
+ memmove((void *)msdu_head->data + paramlen,
+ (void *)msdu_head->data,
hdrlen);
- skb_pull(info.skb, paramlen);
- hdr = (struct ieee80211_hdr *)info.skb->data;
+ skb_pull(msdu_head, paramlen);
+ hdr = (struct ieee80211_hdr *)msdu_head->data;
}
/* remove trailing FCS */
trim = 4;
/* remove crypto trailer */
- trim += ath10k_htt_rx_crypto_tail_len(info.encrypt_type);
+ trim += ath10k_htt_rx_crypto_tail_len(enctype);
/* last fragment of TKIP frags has MIC */
if (!ieee80211_has_morefrags(hdr->frame_control) &&
- info.encrypt_type == HTT_RX_MPDU_ENCRYPT_TKIP_WPA)
+ enctype == HTT_RX_MPDU_ENCRYPT_TKIP_WPA)
trim += 8;
- if (trim > info.skb->len) {
+ if (trim > msdu_head->len) {
ath10k_warn("htt rx fragment: trailer longer than the frame itself? drop\n");
- dev_kfree_skb_any(info.skb);
+ dev_kfree_skb_any(msdu_head);
goto end;
}
- skb_trim(info.skb, info.skb->len - trim);
+ skb_trim(msdu_head, msdu_head->len - trim);
ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "htt rx frag mpdu: ",
- info.skb->data, info.skb->len);
- ath10k_process_rx(htt->ar, &info);
+ msdu_head->data, msdu_head->len);
+ ath10k_process_rx(htt->ar, rx_status, msdu_head);
end:
if (fw_desc_len > 0) {
diff --git a/drivers/net/wireless/ath/ath10k/htt_tx.c b/drivers/net/wireless/ath/ath10k/htt_tx.c
index 7a3e2e40dd5c..7064354d1f4f 100644
--- a/drivers/net/wireless/ath/ath10k/htt_tx.c
+++ b/drivers/net/wireless/ath/ath10k/htt_tx.c
@@ -83,7 +83,7 @@ void ath10k_htt_tx_free_msdu_id(struct ath10k_htt *htt, u16 msdu_id)
__clear_bit(msdu_id, htt->used_msdu_ids);
}
-int ath10k_htt_tx_attach(struct ath10k_htt *htt)
+int ath10k_htt_tx_alloc(struct ath10k_htt *htt)
{
spin_lock_init(&htt->tx_lock);
init_waitqueue_head(&htt->empty_tx_wq);
@@ -120,7 +120,7 @@ int ath10k_htt_tx_attach(struct ath10k_htt *htt)
return 0;
}
-static void ath10k_htt_tx_cleanup_pending(struct ath10k_htt *htt)
+static void ath10k_htt_tx_free_pending(struct ath10k_htt *htt)
{
struct htt_tx_done tx_done = {0};
int msdu_id;
@@ -141,9 +141,9 @@ static void ath10k_htt_tx_cleanup_pending(struct ath10k_htt *htt)
spin_unlock_bh(&htt->tx_lock);
}
-void ath10k_htt_tx_detach(struct ath10k_htt *htt)
+void ath10k_htt_tx_free(struct ath10k_htt *htt)
{
- ath10k_htt_tx_cleanup_pending(htt);
+ ath10k_htt_tx_free_pending(htt);
kfree(htt->pending_tx);
kfree(htt->used_msdu_ids);
dma_pool_destroy(htt->tx_pool);
diff --git a/drivers/net/wireless/ath/ath10k/hw.h b/drivers/net/wireless/ath/ath10k/hw.h
index 35fc44e281f5..007e855f4ba9 100644
--- a/drivers/net/wireless/ath/ath10k/hw.h
+++ b/drivers/net/wireless/ath/ath10k/hw.h
@@ -28,6 +28,7 @@
#define QCA988X_HW_2_0_CHIP_ID_REV 0x2
#define QCA988X_HW_2_0_FW_DIR "ath10k/QCA988X/hw2.0"
#define QCA988X_HW_2_0_FW_FILE "firmware.bin"
+#define QCA988X_HW_2_0_FW_2_FILE "firmware-2.bin"
#define QCA988X_HW_2_0_OTP_FILE "otp.bin"
#define QCA988X_HW_2_0_BOARD_DATA_FILE "board.bin"
#define QCA988X_HW_2_0_PATCH_LOAD_ADDR 0x1234
diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c
index 511a2f81e7af..a21080028c54 100644
--- a/drivers/net/wireless/ath/ath10k/mac.c
+++ b/drivers/net/wireless/ath/ath10k/mac.c
@@ -54,7 +54,10 @@ static int ath10k_send_key(struct ath10k_vif *arvif,
switch (key->cipher) {
case WLAN_CIPHER_SUITE_CCMP:
arg.key_cipher = WMI_CIPHER_AES_CCM;
- key->flags |= IEEE80211_KEY_FLAG_SW_MGMT_TX;
+ if (arvif->vdev_type == WMI_VDEV_TYPE_AP)
+ key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV_MGMT;
+ else
+ key->flags |= IEEE80211_KEY_FLAG_SW_MGMT_TX;
break;
case WLAN_CIPHER_SUITE_TKIP:
arg.key_cipher = WMI_CIPHER_TKIP;
@@ -165,7 +168,7 @@ static int ath10k_clear_peer_keys(struct ath10k_vif *arvif,
first_errno = ret;
if (ret)
- ath10k_warn("could not remove peer wep key %d (%d)\n",
+ ath10k_warn("failed to remove peer wep key %d: %d\n",
i, ret);
peer->keys[i] = NULL;
@@ -213,7 +216,8 @@ static int ath10k_clear_vdev_key(struct ath10k_vif *arvif,
first_errno = ret;
if (ret)
- ath10k_warn("could not remove key for %pM\n", addr);
+ ath10k_warn("failed to remove key for %pM: %d\n",
+ addr, ret);
}
return first_errno;
@@ -323,14 +327,14 @@ static int ath10k_peer_create(struct ath10k *ar, u32 vdev_id, const u8 *addr)
ret = ath10k_wmi_peer_create(ar, vdev_id, addr);
if (ret) {
- ath10k_warn("Failed to create wmi peer %pM on vdev %i: %i\n",
+ ath10k_warn("failed to create wmi peer %pM on vdev %i: %i\n",
addr, vdev_id, ret);
return ret;
}
ret = ath10k_wait_for_peer_created(ar, vdev_id, addr);
if (ret) {
- ath10k_warn("Failed to wait for created wmi peer %pM on vdev %i: %i\n",
+ ath10k_warn("failed to wait for created wmi peer %pM on vdev %i: %i\n",
addr, vdev_id, ret);
return ret;
}
@@ -351,7 +355,7 @@ static int ath10k_mac_set_kickout(struct ath10k_vif *arvif)
ret = ath10k_wmi_pdev_set_param(ar, param,
ATH10K_KICKOUT_THRESHOLD);
if (ret) {
- ath10k_warn("Failed to set kickout threshold on vdev %i: %d\n",
+ ath10k_warn("failed to set kickout threshold on vdev %i: %d\n",
arvif->vdev_id, ret);
return ret;
}
@@ -360,7 +364,7 @@ static int ath10k_mac_set_kickout(struct ath10k_vif *arvif)
ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, param,
ATH10K_KEEPALIVE_MIN_IDLE);
if (ret) {
- ath10k_warn("Failed to set keepalive minimum idle time on vdev %i : %d\n",
+ ath10k_warn("failed to set keepalive minimum idle time on vdev %i: %d\n",
arvif->vdev_id, ret);
return ret;
}
@@ -369,7 +373,7 @@ static int ath10k_mac_set_kickout(struct ath10k_vif *arvif)
ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, param,
ATH10K_KEEPALIVE_MAX_IDLE);
if (ret) {
- ath10k_warn("Failed to set keepalive maximum idle time on vdev %i: %d\n",
+ ath10k_warn("failed to set keepalive maximum idle time on vdev %i: %d\n",
arvif->vdev_id, ret);
return ret;
}
@@ -378,7 +382,7 @@ static int ath10k_mac_set_kickout(struct ath10k_vif *arvif)
ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, param,
ATH10K_KEEPALIVE_MAX_UNRESPONSIVE);
if (ret) {
- ath10k_warn("Failed to set keepalive maximum unresponsive time on vdev %i: %d\n",
+ ath10k_warn("failed to set keepalive maximum unresponsive time on vdev %i: %d\n",
arvif->vdev_id, ret);
return ret;
}
@@ -488,92 +492,20 @@ static inline int ath10k_vdev_setup_sync(struct ath10k *ar)
return 0;
}
-static int ath10k_vdev_start(struct ath10k_vif *arvif)
+static bool ath10k_monitor_is_enabled(struct ath10k *ar)
{
- struct ath10k *ar = arvif->ar;
- struct cfg80211_chan_def *chandef = &ar->chandef;
- struct wmi_vdev_start_request_arg arg = {};
- int ret = 0;
-
lockdep_assert_held(&ar->conf_mutex);
- reinit_completion(&ar->vdev_setup_done);
-
- arg.vdev_id = arvif->vdev_id;
- arg.dtim_period = arvif->dtim_period;
- arg.bcn_intval = arvif->beacon_interval;
-
- arg.channel.freq = chandef->chan->center_freq;
- arg.channel.band_center_freq1 = chandef->center_freq1;
- arg.channel.mode = chan_to_phymode(chandef);
-
- arg.channel.min_power = 0;
- arg.channel.max_power = chandef->chan->max_power * 2;
- arg.channel.max_reg_power = chandef->chan->max_reg_power * 2;
- arg.channel.max_antenna_gain = chandef->chan->max_antenna_gain * 2;
-
- if (arvif->vdev_type == WMI_VDEV_TYPE_AP) {
- arg.ssid = arvif->u.ap.ssid;
- arg.ssid_len = arvif->u.ap.ssid_len;
- arg.hidden_ssid = arvif->u.ap.hidden_ssid;
-
- /* For now allow DFS for AP mode */
- arg.channel.chan_radar =
- !!(chandef->chan->flags & IEEE80211_CHAN_RADAR);
- } else if (arvif->vdev_type == WMI_VDEV_TYPE_IBSS) {
- arg.ssid = arvif->vif->bss_conf.ssid;
- arg.ssid_len = arvif->vif->bss_conf.ssid_len;
- }
-
ath10k_dbg(ATH10K_DBG_MAC,
- "mac vdev %d start center_freq %d phymode %s\n",
- arg.vdev_id, arg.channel.freq,
- ath10k_wmi_phymode_str(arg.channel.mode));
+ "mac monitor refs: promisc %d monitor %d cac %d\n",
+ ar->promisc, ar->monitor,
+ test_bit(ATH10K_CAC_RUNNING, &ar->dev_flags));
- ret = ath10k_wmi_vdev_start(ar, &arg);
- if (ret) {
- ath10k_warn("WMI vdev %i start failed: ret %d\n",
- arg.vdev_id, ret);
- return ret;
- }
-
- ret = ath10k_vdev_setup_sync(ar);
- if (ret) {
- ath10k_warn("vdev %i setup failed %d\n",
- arg.vdev_id, ret);
- return ret;
- }
-
- return ret;
+ return ar->promisc || ar->monitor ||
+ test_bit(ATH10K_CAC_RUNNING, &ar->dev_flags);
}
-static int ath10k_vdev_stop(struct ath10k_vif *arvif)
-{
- struct ath10k *ar = arvif->ar;
- int ret;
-
- lockdep_assert_held(&ar->conf_mutex);
-
- reinit_completion(&ar->vdev_setup_done);
-
- ret = ath10k_wmi_vdev_stop(ar, arvif->vdev_id);
- if (ret) {
- ath10k_warn("WMI vdev %i stop failed: ret %d\n",
- arvif->vdev_id, ret);
- return ret;
- }
-
- ret = ath10k_vdev_setup_sync(ar);
- if (ret) {
- ath10k_warn("vdev %i setup sync failed %d\n",
- arvif->vdev_id, ret);
- return ret;
- }
-
- return ret;
-}
-
-static int ath10k_monitor_start(struct ath10k *ar, int vdev_id)
+static int ath10k_monitor_vdev_start(struct ath10k *ar, int vdev_id)
{
struct cfg80211_chan_def *chandef = &ar->chandef;
struct ieee80211_channel *channel = chandef->chan;
@@ -582,11 +514,6 @@ static int ath10k_monitor_start(struct ath10k *ar, int vdev_id)
lockdep_assert_held(&ar->conf_mutex);
- if (!ar->monitor_present) {
- ath10k_warn("mac montor stop -- monitor is not present\n");
- return -EINVAL;
- }
-
arg.vdev_id = vdev_id;
arg.channel.freq = channel->center_freq;
arg.channel.band_center_freq1 = chandef->center_freq1;
@@ -604,88 +531,75 @@ static int ath10k_monitor_start(struct ath10k *ar, int vdev_id)
ret = ath10k_wmi_vdev_start(ar, &arg);
if (ret) {
- ath10k_warn("Monitor vdev %i start failed: ret %d\n",
+ ath10k_warn("failed to request monitor vdev %i start: %d\n",
vdev_id, ret);
return ret;
}
ret = ath10k_vdev_setup_sync(ar);
if (ret) {
- ath10k_warn("Monitor vdev %i setup failed %d\n",
+ ath10k_warn("failed to synchronize setup for monitor vdev %i: %d\n",
vdev_id, ret);
return ret;
}
ret = ath10k_wmi_vdev_up(ar, vdev_id, 0, ar->mac_addr);
if (ret) {
- ath10k_warn("Monitor vdev %i up failed: %d\n",
+ ath10k_warn("failed to put up monitor vdev %i: %d\n",
vdev_id, ret);
goto vdev_stop;
}
ar->monitor_vdev_id = vdev_id;
- ar->monitor_enabled = true;
+ ath10k_dbg(ATH10K_DBG_MAC, "mac monitor vdev %i started\n",
+ ar->monitor_vdev_id);
return 0;
vdev_stop:
ret = ath10k_wmi_vdev_stop(ar, ar->monitor_vdev_id);
if (ret)
- ath10k_warn("Monitor vdev %i stop failed: %d\n",
+ ath10k_warn("failed to stop monitor vdev %i after start failure: %d\n",
ar->monitor_vdev_id, ret);
return ret;
}
-static int ath10k_monitor_stop(struct ath10k *ar)
+static int ath10k_monitor_vdev_stop(struct ath10k *ar)
{
int ret = 0;
lockdep_assert_held(&ar->conf_mutex);
- if (!ar->monitor_present) {
- ath10k_warn("mac montor stop -- monitor is not present\n");
- return -EINVAL;
- }
-
- if (!ar->monitor_enabled) {
- ath10k_warn("mac montor stop -- monitor is not enabled\n");
- return -EINVAL;
- }
-
ret = ath10k_wmi_vdev_down(ar, ar->monitor_vdev_id);
if (ret)
- ath10k_warn("Monitor vdev %i down failed: %d\n",
+ ath10k_warn("failed to put down monitor vdev %i: %d\n",
ar->monitor_vdev_id, ret);
ret = ath10k_wmi_vdev_stop(ar, ar->monitor_vdev_id);
if (ret)
- ath10k_warn("Monitor vdev %i stop failed: %d\n",
+ ath10k_warn("failed to to request monitor vdev %i stop: %d\n",
ar->monitor_vdev_id, ret);
ret = ath10k_vdev_setup_sync(ar);
if (ret)
- ath10k_warn("Monitor_down sync failed, vdev %i: %d\n",
+ ath10k_warn("failed to synchronise monitor vdev %i: %d\n",
ar->monitor_vdev_id, ret);
- ar->monitor_enabled = false;
+ ath10k_dbg(ATH10K_DBG_MAC, "mac monitor vdev %i stopped\n",
+ ar->monitor_vdev_id);
return ret;
}
-static int ath10k_monitor_create(struct ath10k *ar)
+static int ath10k_monitor_vdev_create(struct ath10k *ar)
{
int bit, ret = 0;
lockdep_assert_held(&ar->conf_mutex);
- if (ar->monitor_present) {
- ath10k_warn("Monitor mode already enabled\n");
- return 0;
- }
-
bit = ffs(ar->free_vdev_map);
if (bit == 0) {
- ath10k_warn("No free VDEV slots\n");
+ ath10k_warn("failed to find free vdev id for monitor vdev\n");
return -ENOMEM;
}
@@ -696,7 +610,7 @@ static int ath10k_monitor_create(struct ath10k *ar)
WMI_VDEV_TYPE_MONITOR,
0, ar->mac_addr);
if (ret) {
- ath10k_warn("WMI vdev %i monitor create failed: ret %d\n",
+ ath10k_warn("failed to request monitor vdev %i creation: %d\n",
ar->monitor_vdev_id, ret);
goto vdev_fail;
}
@@ -704,7 +618,6 @@ static int ath10k_monitor_create(struct ath10k *ar)
ath10k_dbg(ATH10K_DBG_MAC, "mac monitor vdev %d created\n",
ar->monitor_vdev_id);
- ar->monitor_present = true;
return 0;
vdev_fail:
@@ -715,48 +628,123 @@ vdev_fail:
return ret;
}
-static int ath10k_monitor_destroy(struct ath10k *ar)
+static int ath10k_monitor_vdev_delete(struct ath10k *ar)
{
int ret = 0;
lockdep_assert_held(&ar->conf_mutex);
- if (!ar->monitor_present)
- return 0;
-
ret = ath10k_wmi_vdev_delete(ar, ar->monitor_vdev_id);
if (ret) {
- ath10k_warn("WMI vdev %i monitor delete failed: %d\n",
+ ath10k_warn("failed to request wmi monitor vdev %i removal: %d\n",
ar->monitor_vdev_id, ret);
return ret;
}
ar->free_vdev_map |= 1 << (ar->monitor_vdev_id);
- ar->monitor_present = false;
ath10k_dbg(ATH10K_DBG_MAC, "mac monitor vdev %d deleted\n",
ar->monitor_vdev_id);
return ret;
}
-static int ath10k_start_cac(struct ath10k *ar)
+static int ath10k_monitor_start(struct ath10k *ar)
{
int ret;
lockdep_assert_held(&ar->conf_mutex);
- set_bit(ATH10K_CAC_RUNNING, &ar->dev_flags);
+ if (!ath10k_monitor_is_enabled(ar)) {
+ ath10k_warn("trying to start monitor with no references\n");
+ return 0;
+ }
+
+ if (ar->monitor_started) {
+ ath10k_dbg(ATH10K_DBG_MAC, "mac monitor already started\n");
+ return 0;
+ }
- ret = ath10k_monitor_create(ar);
+ ret = ath10k_monitor_vdev_create(ar);
if (ret) {
- clear_bit(ATH10K_CAC_RUNNING, &ar->dev_flags);
+ ath10k_warn("failed to create monitor vdev: %d\n", ret);
return ret;
}
- ret = ath10k_monitor_start(ar, ar->monitor_vdev_id);
+ ret = ath10k_monitor_vdev_start(ar, ar->monitor_vdev_id);
+ if (ret) {
+ ath10k_warn("failed to start monitor vdev: %d\n", ret);
+ ath10k_monitor_vdev_delete(ar);
+ return ret;
+ }
+
+ ar->monitor_started = true;
+ ath10k_dbg(ATH10K_DBG_MAC, "mac monitor started\n");
+
+ return 0;
+}
+
+static void ath10k_monitor_stop(struct ath10k *ar)
+{
+ int ret;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ if (ath10k_monitor_is_enabled(ar)) {
+ ath10k_dbg(ATH10K_DBG_MAC,
+ "mac monitor will be stopped later\n");
+ return;
+ }
+
+ if (!ar->monitor_started) {
+ ath10k_dbg(ATH10K_DBG_MAC,
+ "mac monitor probably failed to start earlier\n");
+ return;
+ }
+
+ ret = ath10k_monitor_vdev_stop(ar);
+ if (ret)
+ ath10k_warn("failed to stop monitor vdev: %d\n", ret);
+
+ ret = ath10k_monitor_vdev_delete(ar);
+ if (ret)
+ ath10k_warn("failed to delete monitor vdev: %d\n", ret);
+
+ ar->monitor_started = false;
+ ath10k_dbg(ATH10K_DBG_MAC, "mac monitor stopped\n");
+}
+
+static int ath10k_recalc_rtscts_prot(struct ath10k_vif *arvif)
+{
+ struct ath10k *ar = arvif->ar;
+ u32 vdev_param, rts_cts = 0;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ vdev_param = ar->wmi.vdev_param->enable_rtscts;
+
+ if (arvif->use_cts_prot || arvif->num_legacy_stations > 0)
+ rts_cts |= SM(WMI_RTSCTS_ENABLED, WMI_RTSCTS_SET);
+
+ if (arvif->num_legacy_stations > 0)
+ rts_cts |= SM(WMI_RTSCTS_ACROSS_SW_RETRIES,
+ WMI_RTSCTS_PROFILE);
+
+ return ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
+ rts_cts);
+}
+
+static int ath10k_start_cac(struct ath10k *ar)
+{
+ int ret;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ set_bit(ATH10K_CAC_RUNNING, &ar->dev_flags);
+
+ ret = ath10k_monitor_start(ar);
if (ret) {
+ ath10k_warn("failed to start monitor (cac): %d\n", ret);
clear_bit(ATH10K_CAC_RUNNING, &ar->dev_flags);
- ath10k_monitor_destroy(ar);
return ret;
}
@@ -774,58 +762,26 @@ static int ath10k_stop_cac(struct ath10k *ar)
if (!test_bit(ATH10K_CAC_RUNNING, &ar->dev_flags))
return 0;
- ath10k_monitor_stop(ar);
- ath10k_monitor_destroy(ar);
clear_bit(ATH10K_CAC_RUNNING, &ar->dev_flags);
+ ath10k_monitor_stop(ar);
ath10k_dbg(ATH10K_DBG_MAC, "mac cac finished\n");
return 0;
}
-static const char *ath10k_dfs_state(enum nl80211_dfs_state dfs_state)
+static void ath10k_recalc_radar_detection(struct ath10k *ar)
{
- switch (dfs_state) {
- case NL80211_DFS_USABLE:
- return "USABLE";
- case NL80211_DFS_UNAVAILABLE:
- return "UNAVAILABLE";
- case NL80211_DFS_AVAILABLE:
- return "AVAILABLE";
- default:
- WARN_ON(1);
- return "bug";
- }
-}
-
-static void ath10k_config_radar_detection(struct ath10k *ar)
-{
- struct ieee80211_channel *chan = ar->hw->conf.chandef.chan;
- bool radar = ar->hw->conf.radar_enabled;
- bool chan_radar = !!(chan->flags & IEEE80211_CHAN_RADAR);
- enum nl80211_dfs_state dfs_state = chan->dfs_state;
int ret;
lockdep_assert_held(&ar->conf_mutex);
- ath10k_dbg(ATH10K_DBG_MAC,
- "mac radar config update: chan %dMHz radar %d chan radar %d chan state %s\n",
- chan->center_freq, radar, chan_radar,
- ath10k_dfs_state(dfs_state));
-
- /*
- * It's safe to call it even if CAC is not started.
- * This call here guarantees changing channel, etc. will stop CAC.
- */
ath10k_stop_cac(ar);
- if (!radar)
- return;
-
- if (!chan_radar)
+ if (!ar->radar_enabled)
return;
- if (dfs_state != NL80211_DFS_USABLE)
+ if (ar->num_started_vdevs > 0)
return;
ret = ath10k_start_cac(ar);
@@ -835,11 +791,106 @@ static void ath10k_config_radar_detection(struct ath10k *ar)
* radiation is not allowed, make this channel DFS_UNAVAILABLE
* by indicating that radar was detected.
*/
- ath10k_warn("failed to start CAC (%d)\n", ret);
+ ath10k_warn("failed to start CAC: %d\n", ret);
ieee80211_radar_detected(ar->hw);
}
}
+static int ath10k_vdev_start(struct ath10k_vif *arvif)
+{
+ struct ath10k *ar = arvif->ar;
+ struct cfg80211_chan_def *chandef = &ar->chandef;
+ struct wmi_vdev_start_request_arg arg = {};
+ int ret = 0;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ reinit_completion(&ar->vdev_setup_done);
+
+ arg.vdev_id = arvif->vdev_id;
+ arg.dtim_period = arvif->dtim_period;
+ arg.bcn_intval = arvif->beacon_interval;
+
+ arg.channel.freq = chandef->chan->center_freq;
+ arg.channel.band_center_freq1 = chandef->center_freq1;
+ arg.channel.mode = chan_to_phymode(chandef);
+
+ arg.channel.min_power = 0;
+ arg.channel.max_power = chandef->chan->max_power * 2;
+ arg.channel.max_reg_power = chandef->chan->max_reg_power * 2;
+ arg.channel.max_antenna_gain = chandef->chan->max_antenna_gain * 2;
+
+ if (arvif->vdev_type == WMI_VDEV_TYPE_AP) {
+ arg.ssid = arvif->u.ap.ssid;
+ arg.ssid_len = arvif->u.ap.ssid_len;
+ arg.hidden_ssid = arvif->u.ap.hidden_ssid;
+
+ /* For now allow DFS for AP mode */
+ arg.channel.chan_radar =
+ !!(chandef->chan->flags & IEEE80211_CHAN_RADAR);
+ } else if (arvif->vdev_type == WMI_VDEV_TYPE_IBSS) {
+ arg.ssid = arvif->vif->bss_conf.ssid;
+ arg.ssid_len = arvif->vif->bss_conf.ssid_len;
+ }
+
+ ath10k_dbg(ATH10K_DBG_MAC,
+ "mac vdev %d start center_freq %d phymode %s\n",
+ arg.vdev_id, arg.channel.freq,
+ ath10k_wmi_phymode_str(arg.channel.mode));
+
+ ret = ath10k_wmi_vdev_start(ar, &arg);
+ if (ret) {
+ ath10k_warn("failed to start WMI vdev %i: %d\n",
+ arg.vdev_id, ret);
+ return ret;
+ }
+
+ ret = ath10k_vdev_setup_sync(ar);
+ if (ret) {
+ ath10k_warn("failed to synchronise setup for vdev %i: %d\n",
+ arg.vdev_id, ret);
+ return ret;
+ }
+
+ ar->num_started_vdevs++;
+ ath10k_recalc_radar_detection(ar);
+
+ return ret;
+}
+
+static int ath10k_vdev_stop(struct ath10k_vif *arvif)
+{
+ struct ath10k *ar = arvif->ar;
+ int ret;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ reinit_completion(&ar->vdev_setup_done);
+
+ ret = ath10k_wmi_vdev_stop(ar, arvif->vdev_id);
+ if (ret) {
+ ath10k_warn("failed to stop WMI vdev %i: %d\n",
+ arvif->vdev_id, ret);
+ return ret;
+ }
+
+ ret = ath10k_vdev_setup_sync(ar);
+ if (ret) {
+ ath10k_warn("failed to syncronise setup for vdev %i: %d\n",
+ arvif->vdev_id, ret);
+ return ret;
+ }
+
+ WARN_ON(ar->num_started_vdevs == 0);
+
+ if (ar->num_started_vdevs != 0) {
+ ar->num_started_vdevs--;
+ ath10k_recalc_radar_detection(ar);
+ }
+
+ return ret;
+}
+
static void ath10k_control_beaconing(struct ath10k_vif *arvif,
struct ieee80211_bss_conf *info)
{
@@ -880,7 +931,7 @@ static void ath10k_control_beaconing(struct ath10k_vif *arvif,
ret = ath10k_wmi_vdev_up(arvif->ar, arvif->vdev_id, arvif->aid,
arvif->bssid);
if (ret) {
- ath10k_warn("Failed to bring up vdev %d: %i\n",
+ ath10k_warn("failed to bring up vdev %d: %i\n",
arvif->vdev_id, ret);
ath10k_vdev_stop(arvif);
return;
@@ -904,7 +955,7 @@ static void ath10k_control_ibss(struct ath10k_vif *arvif,
if (!info->ibss_joined) {
ret = ath10k_peer_delete(arvif->ar, arvif->vdev_id, self_peer);
if (ret)
- ath10k_warn("Failed to delete IBSS self peer:%pM for VDEV:%d ret:%d\n",
+ ath10k_warn("failed to delete IBSS self peer %pM for vdev %d: %d\n",
self_peer, arvif->vdev_id, ret);
if (is_zero_ether_addr(arvif->bssid))
@@ -913,7 +964,7 @@ static void ath10k_control_ibss(struct ath10k_vif *arvif,
ret = ath10k_peer_delete(arvif->ar, arvif->vdev_id,
arvif->bssid);
if (ret) {
- ath10k_warn("Failed to delete IBSS BSSID peer:%pM for VDEV:%d ret:%d\n",
+ ath10k_warn("failed to delete IBSS BSSID peer %pM for vdev %d: %d\n",
arvif->bssid, arvif->vdev_id, ret);
return;
}
@@ -925,7 +976,7 @@ static void ath10k_control_ibss(struct ath10k_vif *arvif,
ret = ath10k_peer_create(arvif->ar, arvif->vdev_id, self_peer);
if (ret) {
- ath10k_warn("Failed to create IBSS self peer:%pM for VDEV:%d ret:%d\n",
+ ath10k_warn("failed to create IBSS self peer %pM for vdev %d: %d\n",
self_peer, arvif->vdev_id, ret);
return;
}
@@ -934,7 +985,7 @@ static void ath10k_control_ibss(struct ath10k_vif *arvif,
ret = ath10k_wmi_vdev_set_param(arvif->ar, arvif->vdev_id, vdev_param,
ATH10K_DEFAULT_ATIM);
if (ret)
- ath10k_warn("Failed to set IBSS ATIM for VDEV:%d ret:%d\n",
+ ath10k_warn("failed to set IBSS ATIM for vdev %d: %d\n",
arvif->vdev_id, ret);
}
@@ -961,7 +1012,7 @@ static int ath10k_mac_vif_setup_ps(struct ath10k_vif *arvif)
ret = ath10k_wmi_set_sta_ps_param(ar, arvif->vdev_id, param,
conf->dynamic_ps_timeout);
if (ret) {
- ath10k_warn("Failed to set inactivity time for vdev %d: %i\n",
+ ath10k_warn("failed to set inactivity time for vdev %d: %i\n",
arvif->vdev_id, ret);
return ret;
}
@@ -974,8 +1025,8 @@ static int ath10k_mac_vif_setup_ps(struct ath10k_vif *arvif)
ret = ath10k_wmi_set_psmode(ar, arvif->vdev_id, psmode);
if (ret) {
- ath10k_warn("Failed to set PS Mode: %d for VDEV: %d\n",
- psmode, arvif->vdev_id);
+ ath10k_warn("failed to set PS Mode %d for vdev %d: %d\n",
+ psmode, arvif->vdev_id, ret);
return ret;
}
@@ -1429,7 +1480,7 @@ static void ath10k_bss_assoc(struct ieee80211_hw *hw,
ap_sta = ieee80211_find_sta(vif, bss_conf->bssid);
if (!ap_sta) {
- ath10k_warn("Failed to find station entry for %pM, vdev %i\n",
+ ath10k_warn("failed to find station entry for bss %pM vdev %i\n",
bss_conf->bssid, arvif->vdev_id);
rcu_read_unlock();
return;
@@ -1442,7 +1493,7 @@ static void ath10k_bss_assoc(struct ieee80211_hw *hw,
ret = ath10k_peer_assoc_prepare(ar, arvif, ap_sta,
bss_conf, &peer_arg);
if (ret) {
- ath10k_warn("Peer assoc prepare failed for %pM vdev %i\n: %d",
+ ath10k_warn("failed to prepare peer assoc for %pM vdev %i: %d\n",
bss_conf->bssid, arvif->vdev_id, ret);
rcu_read_unlock();
return;
@@ -1452,7 +1503,7 @@ static void ath10k_bss_assoc(struct ieee80211_hw *hw,
ret = ath10k_wmi_peer_assoc(ar, &peer_arg);
if (ret) {
- ath10k_warn("Peer assoc failed for %pM vdev %i\n: %d",
+ ath10k_warn("failed to run peer assoc for %pM vdev %i: %d\n",
bss_conf->bssid, arvif->vdev_id, ret);
return;
}
@@ -1473,7 +1524,7 @@ static void ath10k_bss_assoc(struct ieee80211_hw *hw,
ret = ath10k_wmi_vdev_up(ar, arvif->vdev_id, arvif->aid, arvif->bssid);
if (ret) {
- ath10k_warn("VDEV: %d up failed: ret %d\n",
+ ath10k_warn("failed to set vdev %d up: %d\n",
arvif->vdev_id, ret);
return;
}
@@ -1524,7 +1575,7 @@ static void ath10k_bss_disassoc(struct ieee80211_hw *hw,
}
static int ath10k_station_assoc(struct ath10k *ar, struct ath10k_vif *arvif,
- struct ieee80211_sta *sta)
+ struct ieee80211_sta *sta, bool reassoc)
{
struct wmi_peer_assoc_complete_arg peer_arg;
int ret = 0;
@@ -1533,34 +1584,46 @@ static int ath10k_station_assoc(struct ath10k *ar, struct ath10k_vif *arvif,
ret = ath10k_peer_assoc_prepare(ar, arvif, sta, NULL, &peer_arg);
if (ret) {
- ath10k_warn("WMI peer assoc prepare failed for %pM vdev %i: %i\n",
+ ath10k_warn("failed to prepare WMI peer assoc for %pM vdev %i: %i\n",
sta->addr, arvif->vdev_id, ret);
return ret;
}
+ peer_arg.peer_reassoc = reassoc;
ret = ath10k_wmi_peer_assoc(ar, &peer_arg);
if (ret) {
- ath10k_warn("Peer assoc failed for STA %pM vdev %i: %d\n",
+ ath10k_warn("failed to run peer assoc for STA %pM vdev %i: %d\n",
sta->addr, arvif->vdev_id, ret);
return ret;
}
ret = ath10k_setup_peer_smps(ar, arvif, sta->addr, &sta->ht_cap);
if (ret) {
- ath10k_warn("failed to setup peer SMPS for vdev: %d\n", ret);
+ ath10k_warn("failed to setup peer SMPS for vdev %d: %d\n",
+ arvif->vdev_id, ret);
return ret;
}
+ if (!sta->wme) {
+ arvif->num_legacy_stations++;
+ ret = ath10k_recalc_rtscts_prot(arvif);
+ if (ret) {
+ ath10k_warn("failed to recalculate rts/cts prot for vdev %d: %d\n",
+ arvif->vdev_id, ret);
+ return ret;
+ }
+ }
+
ret = ath10k_install_peer_wep_keys(arvif, sta->addr);
if (ret) {
- ath10k_warn("could not install peer wep keys for vdev %i: %d\n",
+ ath10k_warn("failed to install peer wep keys for vdev %i: %d\n",
arvif->vdev_id, ret);
return ret;
}
ret = ath10k_peer_assoc_qos_ap(ar, arvif, sta);
if (ret) {
- ath10k_warn("could not set qos params for STA %pM for vdev %i: %d\n",
+ ath10k_warn("failed to set qos params for STA %pM for vdev %i: %d\n",
sta->addr, arvif->vdev_id, ret);
return ret;
}
@@ -1575,9 +1638,19 @@ static int ath10k_station_disassoc(struct ath10k *ar, struct ath10k_vif *arvif,
lockdep_assert_held(&ar->conf_mutex);
+ if (!sta->wme) {
+ arvif->num_legacy_stations--;
+ ret = ath10k_recalc_rtscts_prot(arvif);
+ if (ret) {
+ ath10k_warn("failed to recalculate rts/cts prot for vdev %d: %d\n",
+ arvif->vdev_id, ret);
+ return ret;
+ }
+ }
+
ret = ath10k_clear_peer_keys(arvif, sta->addr);
if (ret) {
- ath10k_warn("could not clear all peer wep keys for vdev %i: %d\n",
+ ath10k_warn("failed to clear all peer wep keys for vdev %i: %d\n",
arvif->vdev_id, ret);
return ret;
}
@@ -1685,19 +1758,44 @@ static int ath10k_update_channel_list(struct ath10k *ar)
return ret;
}
+static enum wmi_dfs_region
+ath10k_mac_get_dfs_region(enum nl80211_dfs_regions dfs_region)
+{
+ switch (dfs_region) {
+ case NL80211_DFS_UNSET:
+ return WMI_UNINIT_DFS_DOMAIN;
+ case NL80211_DFS_FCC:
+ return WMI_FCC_DFS_DOMAIN;
+ case NL80211_DFS_ETSI:
+ return WMI_ETSI_DFS_DOMAIN;
+ case NL80211_DFS_JP:
+ return WMI_MKK4_DFS_DOMAIN;
+ }
+ return WMI_UNINIT_DFS_DOMAIN;
+}
+
static void ath10k_regd_update(struct ath10k *ar)
{
struct reg_dmn_pair_mapping *regpair;
int ret;
+ enum wmi_dfs_region wmi_dfs_reg;
+ enum nl80211_dfs_regions nl_dfs_reg;
lockdep_assert_held(&ar->conf_mutex);
ret = ath10k_update_channel_list(ar);
if (ret)
- ath10k_warn("could not update channel list (%d)\n", ret);
+ ath10k_warn("failed to update channel list: %d\n", ret);
regpair = ar->ath_common.regulatory.regpair;
+ if (config_enabled(CONFIG_ATH10K_DFS_CERTIFIED) && ar->dfs_detector) {
+ nl_dfs_reg = ar->dfs_detector->region;
+ wmi_dfs_reg = ath10k_mac_get_dfs_region(nl_dfs_reg);
+ } else {
+ wmi_dfs_reg = WMI_UNINIT_DFS_DOMAIN;
+ }
+
/* Target allows setting up per-band regdomain but ath_common provides
* a combined one only */
ret = ath10k_wmi_pdev_set_regdomain(ar,
@@ -1705,9 +1803,10 @@ static void ath10k_regd_update(struct ath10k *ar)
regpair->reg_domain, /* 2ghz */
regpair->reg_domain, /* 5ghz */
regpair->reg_2ghz_ctl,
- regpair->reg_5ghz_ctl);
+ regpair->reg_5ghz_ctl,
+ wmi_dfs_reg);
if (ret)
- ath10k_warn("could not set pdev regdomain (%d)\n", ret);
+ ath10k_warn("failed to set pdev regdomain: %d\n", ret);
}
static void ath10k_reg_notifier(struct wiphy *wiphy,
@@ -1725,7 +1824,7 @@ static void ath10k_reg_notifier(struct wiphy *wiphy,
result = ar->dfs_detector->set_dfs_domain(ar->dfs_detector,
request->dfs_region);
if (!result)
- ath10k_warn("dfs region 0x%X not supported, will trigger radar for every pulse\n",
+ ath10k_warn("DFS region 0x%X not supported, will trigger radar for every pulse\n",
request->dfs_region);
}
@@ -1759,10 +1858,10 @@ static u8 ath10k_tx_h_get_vdev_id(struct ath10k *ar,
if (info->control.vif)
return ath10k_vif_to_arvif(info->control.vif)->vdev_id;
- if (ar->monitor_enabled)
+ if (ar->monitor_started)
return ar->monitor_vdev_id;
- ath10k_warn("could not resolve vdev id\n");
+ ath10k_warn("failed to resolve vdev id\n");
return 0;
}
@@ -1792,8 +1891,13 @@ static void ath10k_tx_wep_key_work(struct work_struct *work)
wep_key_work);
int ret, keyidx = arvif->def_wep_key_newidx;
+ mutex_lock(&arvif->ar->conf_mutex);
+
+ if (arvif->ar->state != ATH10K_STATE_ON)
+ goto unlock;
+
if (arvif->def_wep_key_idx == keyidx)
- return;
+ goto unlock;
ath10k_dbg(ATH10K_DBG_MAC, "mac vdev %d set keyidx %d\n",
arvif->vdev_id, keyidx);
@@ -1803,11 +1907,16 @@ static void ath10k_tx_wep_key_work(struct work_struct *work)
arvif->ar->wmi.vdev_param->def_keyid,
keyidx);
if (ret) {
- ath10k_warn("could not update wep keyidx (%d)\n", ret);
- return;
+ ath10k_warn("failed to update wep key index for vdev %d: %d\n",
+ arvif->vdev_id,
+ ret);
+ goto unlock;
}
arvif->def_wep_key_idx = keyidx;
+
+unlock:
+ mutex_unlock(&arvif->ar->conf_mutex);
}
static void ath10k_tx_h_update_wep_key(struct sk_buff *skb)
@@ -1879,7 +1988,7 @@ static void ath10k_tx_htt(struct ath10k *ar, struct sk_buff *skb)
ar->fw_features)) {
if (skb_queue_len(&ar->wmi_mgmt_tx_queue) >=
ATH10K_MAX_NUM_MGMT_PENDING) {
- ath10k_warn("wmi mgmt_tx queue limit reached\n");
+ ath10k_warn("reached WMI management tranmist queue limit\n");
ret = -EBUSY;
goto exit;
}
@@ -1903,7 +2012,7 @@ static void ath10k_tx_htt(struct ath10k *ar, struct sk_buff *skb)
exit:
if (ret) {
- ath10k_warn("tx failed (%d). dropping packet.\n", ret);
+ ath10k_warn("failed to transmit packet, dropping: %d\n", ret);
ieee80211_free_txskb(ar->hw, skb);
}
}
@@ -1964,7 +2073,7 @@ void ath10k_offchan_tx_work(struct work_struct *work)
if (!peer) {
ret = ath10k_peer_create(ar, vdev_id, peer_addr);
if (ret)
- ath10k_warn("peer %pM on vdev %d not created (%d)\n",
+ ath10k_warn("failed to create peer %pM on vdev %d: %d\n",
peer_addr, vdev_id, ret);
}
@@ -1984,7 +2093,7 @@ void ath10k_offchan_tx_work(struct work_struct *work)
if (!peer) {
ret = ath10k_peer_delete(ar, vdev_id, peer_addr);
if (ret)
- ath10k_warn("peer %pM on vdev %d not deleted (%d)\n",
+ ath10k_warn("failed to delete peer %pM on vdev %d: %d\n",
peer_addr, vdev_id, ret);
}
@@ -2018,7 +2127,8 @@ void ath10k_mgmt_over_wmi_tx_work(struct work_struct *work)
ret = ath10k_wmi_mgmt_tx(ar, skb);
if (ret) {
- ath10k_warn("wmi mgmt_tx failed (%d)\n", ret);
+ ath10k_warn("failed to transmit management frame via WMI: %d\n",
+ ret);
ieee80211_free_txskb(ar->hw, skb);
}
}
@@ -2043,7 +2153,7 @@ void ath10k_reset_scan(unsigned long ptr)
return;
}
- ath10k_warn("scan timeout. resetting. fw issue?\n");
+ ath10k_warn("scan timed out, firmware problem?\n");
if (ar->scan.is_roc)
ieee80211_remain_on_channel_expired(ar->hw);
@@ -2079,7 +2189,7 @@ static int ath10k_abort_scan(struct ath10k *ar)
ret = ath10k_wmi_stop_scan(ar, &arg);
if (ret) {
- ath10k_warn("could not submit wmi stop scan (%d)\n", ret);
+ ath10k_warn("failed to stop wmi scan: %d\n", ret);
spin_lock_bh(&ar->data_lock);
ar->scan.in_progress = false;
ath10k_offchan_tx_purge(ar);
@@ -2099,7 +2209,7 @@ static int ath10k_abort_scan(struct ath10k *ar)
spin_lock_bh(&ar->data_lock);
if (ar->scan.in_progress) {
- ath10k_warn("could not stop scan. its still in progress\n");
+ ath10k_warn("failed to stop scan, it's still in progress\n");
ar->scan.in_progress = false;
ath10k_offchan_tx_purge(ar);
ret = -ETIMEDOUT;
@@ -2187,72 +2297,171 @@ static void ath10k_tx(struct ieee80211_hw *hw,
ath10k_tx_htt(ar, skb);
}
-/*
- * Initialize various parameters with default vaules.
- */
+/* Must not be called with conf_mutex held as workers can use that also. */
+static void ath10k_drain_tx(struct ath10k *ar)
+{
+ /* make sure rcu-protected mac80211 tx path itself is drained */
+ synchronize_net();
+
+ ath10k_offchan_tx_purge(ar);
+ ath10k_mgmt_over_wmi_tx_purge(ar);
+
+ cancel_work_sync(&ar->offchan_tx_work);
+ cancel_work_sync(&ar->wmi_mgmt_tx_work);
+}
+
void ath10k_halt(struct ath10k *ar)
{
+ struct ath10k_vif *arvif;
+
lockdep_assert_held(&ar->conf_mutex);
- ath10k_stop_cac(ar);
+ if (ath10k_monitor_is_enabled(ar)) {
+ clear_bit(ATH10K_CAC_RUNNING, &ar->dev_flags);
+ ar->promisc = false;
+ ar->monitor = false;
+ ath10k_monitor_stop(ar);
+ }
+
del_timer_sync(&ar->scan.timeout);
- ath10k_offchan_tx_purge(ar);
- ath10k_mgmt_over_wmi_tx_purge(ar);
+ ath10k_reset_scan((unsigned long)ar);
ath10k_peer_cleanup_all(ar);
ath10k_core_stop(ar);
ath10k_hif_power_down(ar);
spin_lock_bh(&ar->data_lock);
- if (ar->scan.in_progress) {
- del_timer(&ar->scan.timeout);
- ar->scan.in_progress = false;
- ieee80211_scan_completed(ar->hw, true);
+ list_for_each_entry(arvif, &ar->arvifs, list) {
+ if (!arvif->beacon)
+ continue;
+
+ dma_unmap_single(arvif->ar->dev,
+ ATH10K_SKB_CB(arvif->beacon)->paddr,
+ arvif->beacon->len, DMA_TO_DEVICE);
+ dev_kfree_skb_any(arvif->beacon);
+ arvif->beacon = NULL;
}
spin_unlock_bh(&ar->data_lock);
}
+static int ath10k_get_antenna(struct ieee80211_hw *hw, u32 *tx_ant, u32 *rx_ant)
+{
+ struct ath10k *ar = hw->priv;
+
+ mutex_lock(&ar->conf_mutex);
+
+ if (ar->cfg_tx_chainmask) {
+ *tx_ant = ar->cfg_tx_chainmask;
+ *rx_ant = ar->cfg_rx_chainmask;
+ } else {
+ *tx_ant = ar->supp_tx_chainmask;
+ *rx_ant = ar->supp_rx_chainmask;
+ }
+
+ mutex_unlock(&ar->conf_mutex);
+
+ return 0;
+}
+
+static int __ath10k_set_antenna(struct ath10k *ar, u32 tx_ant, u32 rx_ant)
+{
+ int ret;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ ar->cfg_tx_chainmask = tx_ant;
+ ar->cfg_rx_chainmask = rx_ant;
+
+ if ((ar->state != ATH10K_STATE_ON) &&
+ (ar->state != ATH10K_STATE_RESTARTED))
+ return 0;
+
+ ret = ath10k_wmi_pdev_set_param(ar, ar->wmi.pdev_param->tx_chain_mask,
+ tx_ant);
+ if (ret) {
+ ath10k_warn("failed to set tx-chainmask: %d, req 0x%x\n",
+ ret, tx_ant);
+ return ret;
+ }
+
+ ret = ath10k_wmi_pdev_set_param(ar, ar->wmi.pdev_param->rx_chain_mask,
+ rx_ant);
+ if (ret) {
+ ath10k_warn("failed to set rx-chainmask: %d, req 0x%x\n",
+ ret, rx_ant);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int ath10k_set_antenna(struct ieee80211_hw *hw, u32 tx_ant, u32 rx_ant)
+{
+ struct ath10k *ar = hw->priv;
+ int ret;
+
+ mutex_lock(&ar->conf_mutex);
+ ret = __ath10k_set_antenna(ar, tx_ant, rx_ant);
+ mutex_unlock(&ar->conf_mutex);
+ return ret;
+}
+
static int ath10k_start(struct ieee80211_hw *hw)
{
struct ath10k *ar = hw->priv;
int ret = 0;
+ /*
+ * This makes sense only when restarting hw. It is harmless to call
+ * uncoditionally. This is necessary to make sure no HTT/WMI tx
+ * commands will be submitted while restarting.
+ */
+ ath10k_drain_tx(ar);
+
mutex_lock(&ar->conf_mutex);
- if (ar->state != ATH10K_STATE_OFF &&
- ar->state != ATH10K_STATE_RESTARTING) {
+ switch (ar->state) {
+ case ATH10K_STATE_OFF:
+ ar->state = ATH10K_STATE_ON;
+ break;
+ case ATH10K_STATE_RESTARTING:
+ ath10k_halt(ar);
+ ar->state = ATH10K_STATE_RESTARTED;
+ break;
+ case ATH10K_STATE_ON:
+ case ATH10K_STATE_RESTARTED:
+ case ATH10K_STATE_WEDGED:
+ WARN_ON(1);
ret = -EINVAL;
- goto exit;
+ goto err;
}
ret = ath10k_hif_power_up(ar);
if (ret) {
- ath10k_err("could not init hif (%d)\n", ret);
- ar->state = ATH10K_STATE_OFF;
- goto exit;
+ ath10k_err("Could not init hif: %d\n", ret);
+ goto err_off;
}
ret = ath10k_core_start(ar);
if (ret) {
- ath10k_err("could not init core (%d)\n", ret);
- ath10k_hif_power_down(ar);
- ar->state = ATH10K_STATE_OFF;
- goto exit;
+ ath10k_err("Could not init core: %d\n", ret);
+ goto err_power_down;
}
- if (ar->state == ATH10K_STATE_OFF)
- ar->state = ATH10K_STATE_ON;
- else if (ar->state == ATH10K_STATE_RESTARTING)
- ar->state = ATH10K_STATE_RESTARTED;
-
ret = ath10k_wmi_pdev_set_param(ar, ar->wmi.pdev_param->pmf_qos, 1);
- if (ret)
- ath10k_warn("could not enable WMI_PDEV_PARAM_PMF_QOS (%d)\n",
- ret);
+ if (ret) {
+ ath10k_warn("failed to enable PMF QOS: %d\n", ret);
+ goto err_core_stop;
+ }
ret = ath10k_wmi_pdev_set_param(ar, ar->wmi.pdev_param->dynamic_bw, 1);
- if (ret)
- ath10k_warn("could not init WMI_PDEV_PARAM_DYNAMIC_BW (%d)\n",
- ret);
+ if (ret) {
+ ath10k_warn("failed to enable dynamic BW: %d\n", ret);
+ goto err_core_stop;
+ }
+
+ if (ar->cfg_tx_chainmask)
+ __ath10k_set_antenna(ar, ar->cfg_tx_chainmask,
+ ar->cfg_rx_chainmask);
/*
* By default FW set ARP frames ac to voice (6). In that case ARP
@@ -2266,15 +2475,27 @@ static int ath10k_start(struct ieee80211_hw *hw)
ret = ath10k_wmi_pdev_set_param(ar,
ar->wmi.pdev_param->arp_ac_override, 0);
if (ret) {
- ath10k_warn("could not set arp ac override parameter: %d\n",
+ ath10k_warn("failed to set arp ac override parameter: %d\n",
ret);
- goto exit;
+ goto err_core_stop;
}
+ ar->num_started_vdevs = 0;
ath10k_regd_update(ar);
- ret = 0;
-exit:
+ mutex_unlock(&ar->conf_mutex);
+ return 0;
+
+err_core_stop:
+ ath10k_core_stop(ar);
+
+err_power_down:
+ ath10k_hif_power_down(ar);
+
+err_off:
+ ar->state = ATH10K_STATE_OFF;
+
+err:
mutex_unlock(&ar->conf_mutex);
return ret;
}
@@ -2283,19 +2504,15 @@ static void ath10k_stop(struct ieee80211_hw *hw)
{
struct ath10k *ar = hw->priv;
+ ath10k_drain_tx(ar);
+
mutex_lock(&ar->conf_mutex);
- if (ar->state == ATH10K_STATE_ON ||
- ar->state == ATH10K_STATE_RESTARTED ||
- ar->state == ATH10K_STATE_WEDGED)
+ if (ar->state != ATH10K_STATE_OFF) {
ath10k_halt(ar);
-
- ar->state = ATH10K_STATE_OFF;
+ ar->state = ATH10K_STATE_OFF;
+ }
mutex_unlock(&ar->conf_mutex);
- ath10k_mgmt_over_wmi_tx_purge(ar);
-
- cancel_work_sync(&ar->offchan_tx_work);
- cancel_work_sync(&ar->wmi_mgmt_tx_work);
cancel_work_sync(&ar->restart_work);
}
@@ -2309,7 +2526,7 @@ static int ath10k_config_ps(struct ath10k *ar)
list_for_each_entry(arvif, &ar->arvifs, list) {
ret = ath10k_mac_vif_setup_ps(arvif);
if (ret) {
- ath10k_warn("could not setup powersave (%d)\n", ret);
+ ath10k_warn("failed to setup powersave: %d\n", ret);
break;
}
}
@@ -2343,7 +2560,6 @@ static const char *chandef_get_width(enum nl80211_chan_width width)
static void ath10k_config_chan(struct ath10k *ar)
{
struct ath10k_vif *arvif;
- bool monitor_was_enabled;
int ret;
lockdep_assert_held(&ar->conf_mutex);
@@ -2357,10 +2573,8 @@ static void ath10k_config_chan(struct ath10k *ar)
/* First stop monitor interface. Some FW versions crash if there's a
* lone monitor interface. */
- monitor_was_enabled = ar->monitor_enabled;
-
- if (ar->monitor_enabled)
- ath10k_monitor_stop(ar);
+ if (ar->monitor_started)
+ ath10k_monitor_vdev_stop(ar);
list_for_each_entry(arvif, &ar->arvifs, list) {
if (!arvif->is_started)
@@ -2371,7 +2585,7 @@ static void ath10k_config_chan(struct ath10k *ar)
ret = ath10k_vdev_stop(arvif);
if (ret) {
- ath10k_warn("could not stop vdev %d (%d)\n",
+ ath10k_warn("failed to stop vdev %d: %d\n",
arvif->vdev_id, ret);
continue;
}
@@ -2388,7 +2602,7 @@ static void ath10k_config_chan(struct ath10k *ar)
ret = ath10k_vdev_start(arvif);
if (ret) {
- ath10k_warn("could not start vdev %d (%d)\n",
+ ath10k_warn("failed to start vdev %d: %d\n",
arvif->vdev_id, ret);
continue;
}
@@ -2399,14 +2613,14 @@ static void ath10k_config_chan(struct ath10k *ar)
ret = ath10k_wmi_vdev_up(arvif->ar, arvif->vdev_id, arvif->aid,
arvif->bssid);
if (ret) {
- ath10k_warn("could not bring vdev up %d (%d)\n",
+ ath10k_warn("failed to bring vdev up %d: %d\n",
arvif->vdev_id, ret);
continue;
}
}
- if (monitor_was_enabled)
- ath10k_monitor_start(ar, ar->monitor_vdev_id);
+ if (ath10k_monitor_is_enabled(ar))
+ ath10k_monitor_vdev_start(ar, ar->monitor_vdev_id);
}
static int ath10k_config(struct ieee80211_hw *hw, u32 changed)
@@ -2420,15 +2634,17 @@ static int ath10k_config(struct ieee80211_hw *hw, u32 changed)
if (changed & IEEE80211_CONF_CHANGE_CHANNEL) {
ath10k_dbg(ATH10K_DBG_MAC,
- "mac config channel %d mhz flags 0x%x\n",
+ "mac config channel %dMHz flags 0x%x radar %d\n",
conf->chandef.chan->center_freq,
- conf->chandef.chan->flags);
+ conf->chandef.chan->flags,
+ conf->radar_enabled);
spin_lock_bh(&ar->data_lock);
ar->rx_channel = conf->chandef.chan;
spin_unlock_bh(&ar->data_lock);
- ath10k_config_radar_detection(ar);
+ ar->radar_enabled = conf->radar_enabled;
+ ath10k_recalc_radar_detection(ar);
if (!cfg80211_chandef_identical(&ar->chandef, &conf->chandef)) {
ar->chandef = conf->chandef;
@@ -2444,14 +2660,14 @@ static int ath10k_config(struct ieee80211_hw *hw, u32 changed)
ret = ath10k_wmi_pdev_set_param(ar, param,
hw->conf.power_level * 2);
if (ret)
- ath10k_warn("mac failed to set 2g txpower %d (%d)\n",
+ ath10k_warn("failed to set 2g txpower %d: %d\n",
hw->conf.power_level, ret);
param = ar->wmi.pdev_param->txpower_limit5g;
ret = ath10k_wmi_pdev_set_param(ar, param,
hw->conf.power_level * 2);
if (ret)
- ath10k_warn("mac failed to set 5g txpower %d (%d)\n",
+ ath10k_warn("failed to set 5g txpower %d: %d\n",
hw->conf.power_level, ret);
}
@@ -2459,10 +2675,19 @@ static int ath10k_config(struct ieee80211_hw *hw, u32 changed)
ath10k_config_ps(ar);
if (changed & IEEE80211_CONF_CHANGE_MONITOR) {
- if (conf->flags & IEEE80211_CONF_MONITOR)
- ret = ath10k_monitor_create(ar);
- else
- ret = ath10k_monitor_destroy(ar);
+ if (conf->flags & IEEE80211_CONF_MONITOR && !ar->monitor) {
+ ar->monitor = true;
+ ret = ath10k_monitor_start(ar);
+ if (ret) {
+ ath10k_warn("failed to start monitor (config): %d\n",
+ ret);
+ ar->monitor = false;
+ }
+ } else if (!(conf->flags & IEEE80211_CONF_MONITOR) &&
+ ar->monitor) {
+ ar->monitor = false;
+ ath10k_monitor_stop(ar);
+ }
}
mutex_unlock(&ar->conf_mutex);
@@ -2497,12 +2722,6 @@ static int ath10k_add_interface(struct ieee80211_hw *hw,
INIT_WORK(&arvif->wep_key_work, ath10k_tx_wep_key_work);
INIT_LIST_HEAD(&arvif->list);
- if ((vif->type == NL80211_IFTYPE_MONITOR) && ar->monitor_present) {
- ath10k_warn("Only one monitor interface allowed\n");
- ret = -EBUSY;
- goto err;
- }
-
bit = ffs(ar->free_vdev_map);
if (bit == 0) {
ret = -EBUSY;
@@ -2545,7 +2764,7 @@ static int ath10k_add_interface(struct ieee80211_hw *hw,
ret = ath10k_wmi_vdev_create(ar, arvif->vdev_id, arvif->vdev_type,
arvif->vdev_subtype, vif->addr);
if (ret) {
- ath10k_warn("WMI vdev %i create failed: ret %d\n",
+ ath10k_warn("failed to create WMI vdev %i: %d\n",
arvif->vdev_id, ret);
goto err;
}
@@ -2557,7 +2776,7 @@ static int ath10k_add_interface(struct ieee80211_hw *hw,
ret = ath10k_wmi_vdev_set_param(ar, 0, vdev_param,
arvif->def_wep_key_idx);
if (ret) {
- ath10k_warn("Failed to set vdev %i default keyid: %d\n",
+ ath10k_warn("failed to set vdev %i default key id: %d\n",
arvif->vdev_id, ret);
goto err_vdev_delete;
}
@@ -2567,7 +2786,7 @@ static int ath10k_add_interface(struct ieee80211_hw *hw,
ATH10K_HW_TXRX_NATIVE_WIFI);
/* 10.X firmware does not support this VDEV parameter. Do not warn */
if (ret && ret != -EOPNOTSUPP) {
- ath10k_warn("Failed to set vdev %i TX encap: %d\n",
+ ath10k_warn("failed to set vdev %i TX encapsulation: %d\n",
arvif->vdev_id, ret);
goto err_vdev_delete;
}
@@ -2575,14 +2794,14 @@ static int ath10k_add_interface(struct ieee80211_hw *hw,
if (arvif->vdev_type == WMI_VDEV_TYPE_AP) {
ret = ath10k_peer_create(ar, arvif->vdev_id, vif->addr);
if (ret) {
- ath10k_warn("Failed to create vdev %i peer for AP: %d\n",
+ ath10k_warn("failed to create vdev %i peer for AP: %d\n",
arvif->vdev_id, ret);
goto err_vdev_delete;
}
ret = ath10k_mac_set_kickout(arvif);
if (ret) {
- ath10k_warn("Failed to set vdev %i kickout parameters: %d\n",
+ ath10k_warn("failed to set vdev %i kickout parameters: %d\n",
arvif->vdev_id, ret);
goto err_peer_delete;
}
@@ -2594,7 +2813,7 @@ static int ath10k_add_interface(struct ieee80211_hw *hw,
ret = ath10k_wmi_set_sta_ps_param(ar, arvif->vdev_id,
param, value);
if (ret) {
- ath10k_warn("Failed to set vdev %i RX wake policy: %d\n",
+ ath10k_warn("failed to set vdev %i RX wake policy: %d\n",
arvif->vdev_id, ret);
goto err_peer_delete;
}
@@ -2604,7 +2823,7 @@ static int ath10k_add_interface(struct ieee80211_hw *hw,
ret = ath10k_wmi_set_sta_ps_param(ar, arvif->vdev_id,
param, value);
if (ret) {
- ath10k_warn("Failed to set vdev %i TX wake thresh: %d\n",
+ ath10k_warn("failed to set vdev %i TX wake thresh: %d\n",
arvif->vdev_id, ret);
goto err_peer_delete;
}
@@ -2614,7 +2833,7 @@ static int ath10k_add_interface(struct ieee80211_hw *hw,
ret = ath10k_wmi_set_sta_ps_param(ar, arvif->vdev_id,
param, value);
if (ret) {
- ath10k_warn("Failed to set vdev %i PSPOLL count: %d\n",
+ ath10k_warn("failed to set vdev %i PSPOLL count: %d\n",
arvif->vdev_id, ret);
goto err_peer_delete;
}
@@ -2622,21 +2841,18 @@ static int ath10k_add_interface(struct ieee80211_hw *hw,
ret = ath10k_mac_set_rts(arvif, ar->hw->wiphy->rts_threshold);
if (ret) {
- ath10k_warn("failed to set rts threshold for vdev %d (%d)\n",
+ ath10k_warn("failed to set rts threshold for vdev %d: %d\n",
arvif->vdev_id, ret);
goto err_peer_delete;
}
ret = ath10k_mac_set_frag(arvif, ar->hw->wiphy->frag_threshold);
if (ret) {
- ath10k_warn("failed to set frag threshold for vdev %d (%d)\n",
+ ath10k_warn("failed to set frag threshold for vdev %d: %d\n",
arvif->vdev_id, ret);
goto err_peer_delete;
}
- if (arvif->vdev_type == WMI_VDEV_TYPE_MONITOR)
- ar->monitor_present = true;
-
mutex_unlock(&ar->conf_mutex);
return 0;
@@ -2668,6 +2884,9 @@ static void ath10k_remove_interface(struct ieee80211_hw *hw,
spin_lock_bh(&ar->data_lock);
if (arvif->beacon) {
+ dma_unmap_single(arvif->ar->dev,
+ ATH10K_SKB_CB(arvif->beacon)->paddr,
+ arvif->beacon->len, DMA_TO_DEVICE);
dev_kfree_skb_any(arvif->beacon);
arvif->beacon = NULL;
}
@@ -2679,7 +2898,7 @@ static void ath10k_remove_interface(struct ieee80211_hw *hw,
if (arvif->vdev_type == WMI_VDEV_TYPE_AP) {
ret = ath10k_peer_delete(arvif->ar, arvif->vdev_id, vif->addr);
if (ret)
- ath10k_warn("Failed to remove peer for AP vdev %i: %d\n",
+ ath10k_warn("failed to remove peer for AP vdev %i: %d\n",
arvif->vdev_id, ret);
kfree(arvif->u.ap.noa_data);
@@ -2690,12 +2909,9 @@ static void ath10k_remove_interface(struct ieee80211_hw *hw,
ret = ath10k_wmi_vdev_delete(ar, arvif->vdev_id);
if (ret)
- ath10k_warn("WMI vdev %i delete failed: %d\n",
+ ath10k_warn("failed to delete WMI vdev %i: %d\n",
arvif->vdev_id, ret);
- if (arvif->vdev_type == WMI_VDEV_TYPE_MONITOR)
- ar->monitor_present = false;
-
ath10k_peer_cleanup(ar, arvif->vdev_id);
mutex_unlock(&ar->conf_mutex);
@@ -2728,28 +2944,17 @@ static void ath10k_configure_filter(struct ieee80211_hw *hw,
*total_flags &= SUPPORTED_FILTERS;
ar->filter_flags = *total_flags;
- /* Monitor must not be started if it wasn't created first.
- * Promiscuous mode may be started on a non-monitor interface - in
- * such case the monitor vdev is not created so starting the
- * monitor makes no sense. Since ath10k uses no special RX filters
- * (only BSS filter in STA mode) there's no need for any special
- * action here. */
- if ((ar->filter_flags & FIF_PROMISC_IN_BSS) &&
- !ar->monitor_enabled && ar->monitor_present) {
- ath10k_dbg(ATH10K_DBG_MAC, "mac monitor %d start\n",
- ar->monitor_vdev_id);
-
- ret = ath10k_monitor_start(ar, ar->monitor_vdev_id);
- if (ret)
- ath10k_warn("Unable to start monitor mode\n");
- } else if (!(ar->filter_flags & FIF_PROMISC_IN_BSS) &&
- ar->monitor_enabled && ar->monitor_present) {
- ath10k_dbg(ATH10K_DBG_MAC, "mac monitor %d stop\n",
- ar->monitor_vdev_id);
-
- ret = ath10k_monitor_stop(ar);
- if (ret)
- ath10k_warn("Unable to stop monitor mode\n");
+ if (ar->filter_flags & FIF_PROMISC_IN_BSS && !ar->promisc) {
+ ar->promisc = true;
+ ret = ath10k_monitor_start(ar);
+ if (ret) {
+ ath10k_warn("failed to start monitor (promisc): %d\n",
+ ret);
+ ar->promisc = false;
+ }
+ } else if (!(ar->filter_flags & FIF_PROMISC_IN_BSS) && ar->promisc) {
+ ar->promisc = false;
+ ath10k_monitor_stop(ar);
}
mutex_unlock(&ar->conf_mutex);
@@ -2780,7 +2985,7 @@ static void ath10k_bss_info_changed(struct ieee80211_hw *hw,
arvif->vdev_id, arvif->beacon_interval);
if (ret)
- ath10k_warn("Failed to set beacon interval for vdev %d: %i\n",
+ ath10k_warn("failed to set beacon interval for vdev %d: %i\n",
arvif->vdev_id, ret);
}
@@ -2793,7 +2998,7 @@ static void ath10k_bss_info_changed(struct ieee80211_hw *hw,
ret = ath10k_wmi_pdev_set_param(ar, pdev_param,
WMI_BEACON_STAGGERED_MODE);
if (ret)
- ath10k_warn("Failed to set beacon mode for vdev %d: %i\n",
+ ath10k_warn("failed to set beacon mode for vdev %d: %i\n",
arvif->vdev_id, ret);
}
@@ -2808,7 +3013,7 @@ static void ath10k_bss_info_changed(struct ieee80211_hw *hw,
ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
arvif->dtim_period);
if (ret)
- ath10k_warn("Failed to set dtim period for vdev %d: %i\n",
+ ath10k_warn("failed to set dtim period for vdev %d: %i\n",
arvif->vdev_id, ret);
}
@@ -2820,7 +3025,12 @@ static void ath10k_bss_info_changed(struct ieee80211_hw *hw,
arvif->u.ap.hidden_ssid = info->hidden_ssid;
}
- if (changed & BSS_CHANGED_BSSID) {
+ /*
+ * Firmware manages AP self-peer internally so make sure to not create
+ * it in driver. Otherwise AP self-peer deletion may timeout later.
+ */
+ if (changed & BSS_CHANGED_BSSID &&
+ vif->type != NL80211_IFTYPE_AP) {
if (!is_zero_ether_addr(info->bssid)) {
ath10k_dbg(ATH10K_DBG_MAC,
"mac vdev %d create peer %pM\n",
@@ -2829,7 +3039,7 @@ static void ath10k_bss_info_changed(struct ieee80211_hw *hw,
ret = ath10k_peer_create(ar, arvif->vdev_id,
info->bssid);
if (ret)
- ath10k_warn("Failed to add peer %pM for vdev %d when changing bssid: %i\n",
+ ath10k_warn("failed to add peer %pM for vdev %d when changing bssid: %i\n",
info->bssid, arvif->vdev_id, ret);
if (vif->type == NL80211_IFTYPE_STATION) {
@@ -2868,20 +3078,13 @@ static void ath10k_bss_info_changed(struct ieee80211_hw *hw,
ath10k_control_beaconing(arvif, info);
if (changed & BSS_CHANGED_ERP_CTS_PROT) {
- u32 cts_prot;
- if (info->use_cts_prot)
- cts_prot = 1;
- else
- cts_prot = 0;
-
+ arvif->use_cts_prot = info->use_cts_prot;
ath10k_dbg(ATH10K_DBG_MAC, "mac vdev %d cts_prot %d\n",
- arvif->vdev_id, cts_prot);
+ arvif->vdev_id, info->use_cts_prot);
- vdev_param = ar->wmi.vdev_param->enable_rtscts;
- ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
- cts_prot);
+ ret = ath10k_recalc_rtscts_prot(arvif);
if (ret)
- ath10k_warn("Failed to set CTS prot for vdev %d: %d\n",
+ ath10k_warn("failed to recalculate rts/cts prot for vdev %d: %d\n",
arvif->vdev_id, ret);
}
@@ -2900,7 +3103,7 @@ static void ath10k_bss_info_changed(struct ieee80211_hw *hw,
ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
slottime);
if (ret)
- ath10k_warn("Failed to set erp slot for vdev %d: %i\n",
+ ath10k_warn("failed to set erp slot for vdev %d: %i\n",
arvif->vdev_id, ret);
}
@@ -2919,7 +3122,7 @@ static void ath10k_bss_info_changed(struct ieee80211_hw *hw,
ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
preamble);
if (ret)
- ath10k_warn("Failed to set preamble for vdev %d: %i\n",
+ ath10k_warn("failed to set preamble for vdev %d: %i\n",
arvif->vdev_id, ret);
}
@@ -2990,7 +3193,7 @@ static int ath10k_hw_scan(struct ieee80211_hw *hw,
ret = ath10k_start_scan(ar, &arg);
if (ret) {
- ath10k_warn("could not start hw scan (%d)\n", ret);
+ ath10k_warn("failed to start hw scan: %d\n", ret);
spin_lock_bh(&ar->data_lock);
ar->scan.in_progress = false;
spin_unlock_bh(&ar->data_lock);
@@ -3010,8 +3213,7 @@ static void ath10k_cancel_hw_scan(struct ieee80211_hw *hw,
mutex_lock(&ar->conf_mutex);
ret = ath10k_abort_scan(ar);
if (ret) {
- ath10k_warn("couldn't abort scan (%d). forcefully sending scan completion to mac80211\n",
- ret);
+ ath10k_warn("failed to abort scan: %d\n", ret);
ieee80211_scan_completed(hw, 1 /* aborted */);
}
mutex_unlock(&ar->conf_mutex);
@@ -3089,7 +3291,7 @@ static int ath10k_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
if (!peer) {
if (cmd == SET_KEY) {
- ath10k_warn("cannot install key for non-existent peer %pM\n",
+ ath10k_warn("failed to install key for non-existent peer %pM\n",
peer_addr);
ret = -EOPNOTSUPP;
goto exit;
@@ -3112,7 +3314,7 @@ static int ath10k_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
ret = ath10k_install_key(arvif, key, cmd, peer_addr);
if (ret) {
- ath10k_warn("key installation failed for vdev %i peer %pM: %d\n",
+ ath10k_warn("failed to install key for vdev %i peer %pM: %d\n",
arvif->vdev_id, peer_addr, ret);
goto exit;
}
@@ -3127,7 +3329,7 @@ static int ath10k_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
peer->keys[key->keyidx] = NULL;
else if (peer == NULL)
/* impossible unless FW goes crazy */
- ath10k_warn("peer %pM disappeared!\n", peer_addr);
+ ath10k_warn("Peer %pM disappeared!\n", peer_addr);
spin_unlock_bh(&ar->data_lock);
exit:
@@ -3195,6 +3397,16 @@ static void ath10k_sta_rc_update_wk(struct work_struct *wk)
sta->addr, smps, err);
}
+ if (changed & IEEE80211_RC_SUPP_RATES_CHANGED) {
+ ath10k_dbg(ATH10K_DBG_MAC, "mac update sta %pM supp rates\n",
+ sta->addr);
+
+ err = ath10k_station_assoc(ar, arvif, sta, true);
+ if (err)
+ ath10k_warn("failed to reassociate station: %pM\n",
+ sta->addr);
+ }
+
mutex_unlock(&ar->conf_mutex);
}
@@ -3236,7 +3448,7 @@ static int ath10k_sta_state(struct ieee80211_hw *hw,
max_num_peers = TARGET_NUM_PEERS;
if (ar->num_peers >= max_num_peers) {
- ath10k_warn("Number of peers exceeded: peers number %d (max peers %d)\n",
+ ath10k_warn("number of peers exceeded: peers number %d (max peers %d)\n",
ar->num_peers, max_num_peers);
ret = -ENOBUFS;
goto exit;
@@ -3248,7 +3460,7 @@ static int ath10k_sta_state(struct ieee80211_hw *hw,
ret = ath10k_peer_create(ar, arvif->vdev_id, sta->addr);
if (ret)
- ath10k_warn("Failed to add peer %pM for vdev %d when adding a new sta: %i\n",
+ ath10k_warn("failed to add peer %pM for vdev %d when adding a new sta: %i\n",
sta->addr, arvif->vdev_id, ret);
} else if ((old_state == IEEE80211_STA_NONE &&
new_state == IEEE80211_STA_NOTEXIST)) {
@@ -3260,7 +3472,7 @@ static int ath10k_sta_state(struct ieee80211_hw *hw,
arvif->vdev_id, sta->addr);
ret = ath10k_peer_delete(ar, arvif->vdev_id, sta->addr);
if (ret)
- ath10k_warn("Failed to delete peer %pM for vdev %d: %i\n",
+ ath10k_warn("failed to delete peer %pM for vdev %d: %i\n",
sta->addr, arvif->vdev_id, ret);
if (vif->type == NL80211_IFTYPE_STATION)
@@ -3275,9 +3487,9 @@ static int ath10k_sta_state(struct ieee80211_hw *hw,
ath10k_dbg(ATH10K_DBG_MAC, "mac sta %pM associated\n",
sta->addr);
- ret = ath10k_station_assoc(ar, arvif, sta);
+ ret = ath10k_station_assoc(ar, arvif, sta, false);
if (ret)
- ath10k_warn("Failed to associate station %pM for vdev %i: %i\n",
+ ath10k_warn("failed to associate station %pM for vdev %i: %i\n",
sta->addr, arvif->vdev_id, ret);
} else if (old_state == IEEE80211_STA_ASSOC &&
new_state == IEEE80211_STA_AUTH &&
@@ -3291,7 +3503,7 @@ static int ath10k_sta_state(struct ieee80211_hw *hw,
ret = ath10k_station_disassoc(ar, arvif, sta);
if (ret)
- ath10k_warn("Failed to disassociate station: %pM vdev %i ret %i\n",
+ ath10k_warn("failed to disassociate station: %pM vdev %i: %i\n",
sta->addr, arvif->vdev_id, ret);
}
exit:
@@ -3339,7 +3551,7 @@ static int ath10k_conf_tx_uapsd(struct ath10k *ar, struct ieee80211_vif *vif,
WMI_STA_PS_PARAM_UAPSD,
arvif->u.sta.uapsd);
if (ret) {
- ath10k_warn("could not set uapsd params %d\n", ret);
+ ath10k_warn("failed to set uapsd params: %d\n", ret);
goto exit;
}
@@ -3352,7 +3564,7 @@ static int ath10k_conf_tx_uapsd(struct ath10k *ar, struct ieee80211_vif *vif,
WMI_STA_PS_PARAM_RX_WAKE_POLICY,
value);
if (ret)
- ath10k_warn("could not set rx wake param %d\n", ret);
+ ath10k_warn("failed to set rx wake param: %d\n", ret);
exit:
return ret;
@@ -3402,13 +3614,13 @@ static int ath10k_conf_tx(struct ieee80211_hw *hw,
/* FIXME: FW accepts wmm params per hw, not per vif */
ret = ath10k_wmi_pdev_set_wmm_params(ar, &ar->wmm_params);
if (ret) {
- ath10k_warn("could not set wmm params %d\n", ret);
+ ath10k_warn("failed to set wmm params: %d\n", ret);
goto exit;
}
ret = ath10k_conf_tx_uapsd(ar, vif, ac, params->uapsd);
if (ret)
- ath10k_warn("could not set sta uapsd %d\n", ret);
+ ath10k_warn("failed to set sta uapsd: %d\n", ret);
exit:
mutex_unlock(&ar->conf_mutex);
@@ -3461,7 +3673,7 @@ static int ath10k_remain_on_channel(struct ieee80211_hw *hw,
ret = ath10k_start_scan(ar, &arg);
if (ret) {
- ath10k_warn("could not start roc scan (%d)\n", ret);
+ ath10k_warn("failed to start roc scan: %d\n", ret);
spin_lock_bh(&ar->data_lock);
ar->scan.in_progress = false;
spin_unlock_bh(&ar->data_lock);
@@ -3470,7 +3682,7 @@ static int ath10k_remain_on_channel(struct ieee80211_hw *hw,
ret = wait_for_completion_timeout(&ar->scan.on_channel, 3*HZ);
if (ret == 0) {
- ath10k_warn("could not switch to channel for roc scan\n");
+ ath10k_warn("failed to switch to channel for roc scan\n");
ath10k_abort_scan(ar);
ret = -ETIMEDOUT;
goto exit;
@@ -3511,7 +3723,7 @@ static int ath10k_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
ret = ath10k_mac_set_rts(arvif, value);
if (ret) {
- ath10k_warn("could not set rts threshold for vdev %d (%d)\n",
+ ath10k_warn("failed to set rts threshold for vdev %d: %d\n",
arvif->vdev_id, ret);
break;
}
@@ -3534,7 +3746,7 @@ static int ath10k_set_frag_threshold(struct ieee80211_hw *hw, u32 value)
ret = ath10k_mac_set_rts(arvif, value);
if (ret) {
- ath10k_warn("could not set fragmentation threshold for vdev %d (%d)\n",
+ ath10k_warn("failed to set fragmentation threshold for vdev %d: %d\n",
arvif->vdev_id, ret);
break;
}
@@ -3544,7 +3756,8 @@ static int ath10k_set_frag_threshold(struct ieee80211_hw *hw, u32 value)
return ret;
}
-static void ath10k_flush(struct ieee80211_hw *hw, u32 queues, bool drop)
+static void ath10k_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ u32 queues, bool drop)
{
struct ath10k *ar = hw->priv;
bool skip;
@@ -3573,7 +3786,7 @@ static void ath10k_flush(struct ieee80211_hw *hw, u32 queues, bool drop)
}), ATH10K_FLUSH_TIMEOUT_HZ);
if (ret <= 0 || skip)
- ath10k_warn("tx not flushed (skip %i ar-state %i): %i\n",
+ ath10k_warn("failed to flush transmit queue (skip %i ar-state %i): %i\n",
skip, ar->state, ret);
skip:
@@ -3608,7 +3821,7 @@ static int ath10k_suspend(struct ieee80211_hw *hw,
ret = ath10k_hif_suspend(ar);
if (ret) {
- ath10k_warn("could not suspend hif (%d)\n", ret);
+ ath10k_warn("failed to suspend hif: %d\n", ret);
goto resume;
}
@@ -3617,7 +3830,7 @@ static int ath10k_suspend(struct ieee80211_hw *hw,
resume:
ret = ath10k_wmi_pdev_resume_target(ar);
if (ret)
- ath10k_warn("could not resume target (%d)\n", ret);
+ ath10k_warn("failed to resume target: %d\n", ret);
ret = 1;
exit:
@@ -3634,14 +3847,14 @@ static int ath10k_resume(struct ieee80211_hw *hw)
ret = ath10k_hif_resume(ar);
if (ret) {
- ath10k_warn("could not resume hif (%d)\n", ret);
+ ath10k_warn("failed to resume hif: %d\n", ret);
ret = 1;
goto exit;
}
ret = ath10k_wmi_pdev_resume_target(ar);
if (ret) {
- ath10k_warn("could not resume target (%d)\n", ret);
+ ath10k_warn("failed to resume target: %d\n", ret);
ret = 1;
goto exit;
}
@@ -3964,7 +4177,7 @@ static int ath10k_set_fixed_rate_param(struct ath10k_vif *arvif,
ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id,
vdev_param, fixed_rate);
if (ret) {
- ath10k_warn("Could not set fixed_rate param 0x%02x: %d\n",
+ ath10k_warn("failed to set fixed rate param 0x%02x: %d\n",
fixed_rate, ret);
ret = -EINVAL;
goto exit;
@@ -3977,7 +4190,7 @@ static int ath10k_set_fixed_rate_param(struct ath10k_vif *arvif,
vdev_param, fixed_nss);
if (ret) {
- ath10k_warn("Could not set fixed_nss param %d: %d\n",
+ ath10k_warn("failed to set fixed nss param %d: %d\n",
fixed_nss, ret);
ret = -EINVAL;
goto exit;
@@ -3990,7 +4203,7 @@ static int ath10k_set_fixed_rate_param(struct ath10k_vif *arvif,
force_sgi);
if (ret) {
- ath10k_warn("Could not set sgi param %d: %d\n",
+ ath10k_warn("failed to set sgi param %d: %d\n",
force_sgi, ret);
ret = -EINVAL;
goto exit;
@@ -4026,7 +4239,7 @@ static int ath10k_set_bitrate_mask(struct ieee80211_hw *hw,
}
if (fixed_rate == WMI_FIXED_RATE_NONE && force_sgi) {
- ath10k_warn("Could not force SGI usage for default rate settings\n");
+ ath10k_warn("failed to force SGI usage for default rate settings\n");
return -EINVAL;
}
@@ -4034,14 +4247,6 @@ static int ath10k_set_bitrate_mask(struct ieee80211_hw *hw,
fixed_nss, force_sgi);
}
-static void ath10k_channel_switch_beacon(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif,
- struct cfg80211_chan_def *chandef)
-{
- /* there's no need to do anything here. vif->csa_active is enough */
- return;
-}
-
static void ath10k_sta_rc_update(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta,
@@ -4072,8 +4277,8 @@ static void ath10k_sta_rc_update(struct ieee80211_hw *hw,
bw = WMI_PEER_CHWIDTH_80MHZ;
break;
case IEEE80211_STA_RX_BW_160:
- ath10k_warn("mac sta rc update for %pM: invalid bw %d\n",
- sta->addr, sta->bandwidth);
+ ath10k_warn("Invalid bandwith %d in rc update for %pM\n",
+ sta->bandwidth, sta->addr);
bw = WMI_PEER_CHWIDTH_20MHZ;
break;
}
@@ -4099,8 +4304,8 @@ static void ath10k_sta_rc_update(struct ieee80211_hw *hw,
smps = WMI_PEER_SMPS_DYNAMIC;
break;
case IEEE80211_SMPS_NUM_MODES:
- ath10k_warn("mac sta rc update for %pM: invalid smps: %d\n",
- sta->addr, sta->smps_mode);
+ ath10k_warn("Invalid smps %d in sta rc update for %pM\n",
+ sta->smps_mode, sta->addr);
smps = WMI_PEER_SMPS_PS_NONE;
break;
}
@@ -4108,15 +4313,6 @@ static void ath10k_sta_rc_update(struct ieee80211_hw *hw,
arsta->smps = smps;
}
- if (changed & IEEE80211_RC_SUPP_RATES_CHANGED) {
- /* FIXME: Not implemented. Probably the only way to do it would
- * be to re-assoc the peer. */
- changed &= ~IEEE80211_RC_SUPP_RATES_CHANGED;
- ath10k_dbg(ATH10K_DBG_MAC,
- "mac sta rc update for %pM: changing supported rates not implemented\n",
- sta->addr);
- }
-
arsta->changed |= changed;
spin_unlock_bh(&ar->data_lock);
@@ -4154,10 +4350,11 @@ static const struct ieee80211_ops ath10k_ops = {
.set_frag_threshold = ath10k_set_frag_threshold,
.flush = ath10k_flush,
.tx_last_beacon = ath10k_tx_last_beacon,
+ .set_antenna = ath10k_set_antenna,
+ .get_antenna = ath10k_get_antenna,
.restart_complete = ath10k_restart_complete,
.get_survey = ath10k_get_survey,
.set_bitrate_mask = ath10k_set_bitrate_mask,
- .channel_switch_beacon = ath10k_channel_switch_beacon,
.sta_rc_update = ath10k_sta_rc_update,
.get_tsf = ath10k_get_tsf,
#ifdef CONFIG_PM
@@ -4503,6 +4700,18 @@ int ath10k_mac_register(struct ath10k *ar)
BIT(NL80211_IFTYPE_ADHOC) |
BIT(NL80211_IFTYPE_AP);
+ if (test_bit(ATH10K_FW_FEATURE_WMI_10X, ar->fw_features)) {
+ /* TODO: Have to deal with 2x2 chips if/when the come out. */
+ ar->supp_tx_chainmask = TARGET_10X_TX_CHAIN_MASK;
+ ar->supp_rx_chainmask = TARGET_10X_RX_CHAIN_MASK;
+ } else {
+ ar->supp_tx_chainmask = TARGET_TX_CHAIN_MASK;
+ ar->supp_rx_chainmask = TARGET_RX_CHAIN_MASK;
+ }
+
+ ar->hw->wiphy->available_antennas_rx = ar->supp_rx_chainmask;
+ ar->hw->wiphy->available_antennas_tx = ar->supp_tx_chainmask;
+
if (!test_bit(ATH10K_FW_FEATURE_NO_P2P, ar->fw_features))
ar->hw->wiphy->interface_modes |=
BIT(NL80211_IFTYPE_P2P_CLIENT) |
@@ -4516,7 +4725,6 @@ int ath10k_mac_register(struct ath10k *ar)
IEEE80211_HW_REPORTS_TX_ACK_STATUS |
IEEE80211_HW_HAS_RATE_CONTROL |
IEEE80211_HW_SUPPORTS_STATIC_SMPS |
- IEEE80211_HW_WANT_MONITOR_VIF |
IEEE80211_HW_AP_LINK_PS |
IEEE80211_HW_SPECTRUM_MGMT;
@@ -4570,19 +4778,19 @@ int ath10k_mac_register(struct ath10k *ar)
NL80211_DFS_UNSET);
if (!ar->dfs_detector)
- ath10k_warn("dfs pattern detector init failed\n");
+ ath10k_warn("failed to initialise DFS pattern detector\n");
}
ret = ath_regd_init(&ar->ath_common.regulatory, ar->hw->wiphy,
ath10k_reg_notifier);
if (ret) {
- ath10k_err("Regulatory initialization failed: %i\n", ret);
+ ath10k_err("failed to initialise regulatory: %i\n", ret);
goto err_free;
}
ret = ieee80211_register_hw(ar->hw);
if (ret) {
- ath10k_err("ieee80211 registration failed: %d\n", ret);
+ ath10k_err("failed to register ieee80211: %d\n", ret);
goto err_free;
}
diff --git a/drivers/net/wireless/ath/ath10k/pci.c b/drivers/net/wireless/ath/ath10k/pci.c
index 9d242d801d9d..d0004d59c97e 100644
--- a/drivers/net/wireless/ath/ath10k/pci.c
+++ b/drivers/net/wireless/ath/ath10k/pci.c
@@ -39,15 +39,28 @@ enum ath10k_pci_irq_mode {
ATH10K_PCI_IRQ_MSI = 2,
};
-static unsigned int ath10k_target_ps;
+enum ath10k_pci_reset_mode {
+ ATH10K_PCI_RESET_AUTO = 0,
+ ATH10K_PCI_RESET_WARM_ONLY = 1,
+};
+
+static unsigned int ath10k_pci_target_ps;
static unsigned int ath10k_pci_irq_mode = ATH10K_PCI_IRQ_AUTO;
+static unsigned int ath10k_pci_reset_mode = ATH10K_PCI_RESET_AUTO;
-module_param(ath10k_target_ps, uint, 0644);
-MODULE_PARM_DESC(ath10k_target_ps, "Enable ath10k Target (SoC) PS option");
+module_param_named(target_ps, ath10k_pci_target_ps, uint, 0644);
+MODULE_PARM_DESC(target_ps, "Enable ath10k Target (SoC) PS option");
module_param_named(irq_mode, ath10k_pci_irq_mode, uint, 0644);
MODULE_PARM_DESC(irq_mode, "0: auto, 1: legacy, 2: msi (default: 0)");
+module_param_named(reset_mode, ath10k_pci_reset_mode, uint, 0644);
+MODULE_PARM_DESC(reset_mode, "0: auto, 1: warm only (default: 0)");
+
+/* how long wait to wait for target to initialise, in ms */
+#define ATH10K_PCI_TARGET_WAIT 3000
+#define ATH10K_PCI_NUM_WARM_RESET_ATTEMPTS 3
+
#define QCA988X_2_0_DEVICE_ID (0x003c)
static DEFINE_PCI_DEVICE_TABLE(ath10k_pci_id_table) = {
@@ -346,9 +359,10 @@ static int ath10k_pci_diag_read_mem(struct ath10k *ar, u32 address, void *data,
* 2) Buffer in DMA-able space
*/
orig_nbytes = nbytes;
- data_buf = (unsigned char *)pci_alloc_consistent(ar_pci->pdev,
- orig_nbytes,
- &ce_data_base);
+ data_buf = (unsigned char *)dma_alloc_coherent(ar->dev,
+ orig_nbytes,
+ &ce_data_base,
+ GFP_ATOMIC);
if (!data_buf) {
ret = -ENOMEM;
@@ -442,12 +456,12 @@ done:
__le32_to_cpu(((__le32 *)data_buf)[i]);
}
} else
- ath10k_dbg(ATH10K_DBG_PCI, "%s failure (0x%x)\n",
- __func__, address);
+ ath10k_warn("failed to read diag value at 0x%x: %d\n",
+ address, ret);
if (data_buf)
- pci_free_consistent(ar_pci->pdev, orig_nbytes,
- data_buf, ce_data_base);
+ dma_free_coherent(ar->dev, orig_nbytes, data_buf,
+ ce_data_base);
return ret;
}
@@ -490,9 +504,10 @@ static int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address,
* 2) Buffer in DMA-able space
*/
orig_nbytes = nbytes;
- data_buf = (unsigned char *)pci_alloc_consistent(ar_pci->pdev,
- orig_nbytes,
- &ce_data_base);
+ data_buf = (unsigned char *)dma_alloc_coherent(ar->dev,
+ orig_nbytes,
+ &ce_data_base,
+ GFP_ATOMIC);
if (!data_buf) {
ret = -ENOMEM;
goto done;
@@ -588,13 +603,13 @@ static int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address,
done:
if (data_buf) {
- pci_free_consistent(ar_pci->pdev, orig_nbytes, data_buf,
- ce_data_base);
+ dma_free_coherent(ar->dev, orig_nbytes, data_buf,
+ ce_data_base);
}
if (ret != 0)
- ath10k_dbg(ATH10K_DBG_PCI, "%s failure (0x%x)\n", __func__,
- address);
+ ath10k_warn("failed to write diag value at 0x%x: %d\n",
+ address, ret);
return ret;
}
@@ -747,17 +762,21 @@ static int ath10k_pci_hif_tx_sg(struct ath10k *ar, u8 pipe_id,
struct ath10k_pci_pipe *pci_pipe = &ar_pci->pipe_info[pipe_id];
struct ath10k_ce_pipe *ce_pipe = pci_pipe->ce_hdl;
struct ath10k_ce_ring *src_ring = ce_pipe->src_ring;
- unsigned int nentries_mask = src_ring->nentries_mask;
- unsigned int sw_index = src_ring->sw_index;
- unsigned int write_index = src_ring->write_index;
- int err, i;
+ unsigned int nentries_mask;
+ unsigned int sw_index;
+ unsigned int write_index;
+ int err, i = 0;
spin_lock_bh(&ar_pci->ce_lock);
+ nentries_mask = src_ring->nentries_mask;
+ sw_index = src_ring->sw_index;
+ write_index = src_ring->write_index;
+
if (unlikely(CE_RING_DELTA(nentries_mask,
write_index, sw_index - 1) < n_items)) {
err = -ENOBUFS;
- goto unlock;
+ goto err;
}
for (i = 0; i < n_items - 1; i++) {
@@ -774,7 +793,7 @@ static int ath10k_pci_hif_tx_sg(struct ath10k *ar, u8 pipe_id,
items[i].transfer_id,
CE_SEND_FLAG_GATHER);
if (err)
- goto unlock;
+ goto err;
}
/* `i` is equal to `n_items -1` after for() */
@@ -792,10 +811,15 @@ static int ath10k_pci_hif_tx_sg(struct ath10k *ar, u8 pipe_id,
items[i].transfer_id,
0);
if (err)
- goto unlock;
+ goto err;
+
+ spin_unlock_bh(&ar_pci->ce_lock);
+ return 0;
+
+err:
+ for (; i > 0; i--)
+ __ath10k_ce_send_revert(ce_pipe);
- err = 0;
-unlock:
spin_unlock_bh(&ar_pci->ce_lock);
return err;
}
@@ -803,6 +827,9 @@ unlock:
static u16 ath10k_pci_hif_get_free_queue_number(struct ath10k *ar, u8 pipe)
{
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+
+ ath10k_dbg(ATH10K_DBG_PCI, "pci hif get free queue number\n");
+
return ath10k_ce_num_free_src_entries(ar_pci->pipe_info[pipe].ce_hdl);
}
@@ -854,6 +881,8 @@ static void ath10k_pci_hif_dump_area(struct ath10k *ar)
static void ath10k_pci_hif_send_complete_check(struct ath10k *ar, u8 pipe,
int force)
{
+ ath10k_dbg(ATH10K_DBG_PCI, "pci hif send complete check\n");
+
if (!force) {
int resources;
/*
@@ -880,7 +909,7 @@ static void ath10k_pci_hif_set_callbacks(struct ath10k *ar,
{
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
- ath10k_dbg(ATH10K_DBG_PCI, "%s\n", __func__);
+ ath10k_dbg(ATH10K_DBG_PCI, "pci hif set callbacks\n");
memcpy(&ar_pci->msg_callbacks_current, callbacks,
sizeof(ar_pci->msg_callbacks_current));
@@ -938,6 +967,8 @@ static int ath10k_pci_hif_map_service_to_pipe(struct ath10k *ar,
{
int ret = 0;
+ ath10k_dbg(ATH10K_DBG_PCI, "pci hif map service\n");
+
/* polling for received messages not supported */
*dl_is_polled = 0;
@@ -997,6 +1028,8 @@ static void ath10k_pci_hif_get_default_pipe(struct ath10k *ar,
{
int ul_is_polled, dl_is_polled;
+ ath10k_dbg(ATH10K_DBG_PCI, "pci hif get default pipe\n");
+
(void)ath10k_pci_hif_map_service_to_pipe(ar,
ATH10K_HTC_SVC_ID_RSVD_CTRL,
ul_pipe,
@@ -1098,6 +1131,8 @@ static int ath10k_pci_hif_start(struct ath10k *ar)
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
int ret, ret_early;
+ ath10k_dbg(ATH10K_DBG_BOOT, "boot hif start\n");
+
ath10k_pci_free_early_irq(ar);
ath10k_pci_kill_tasklet(ar);
@@ -1233,18 +1268,10 @@ static void ath10k_pci_buffer_cleanup(struct ath10k *ar)
static void ath10k_pci_ce_deinit(struct ath10k *ar)
{
- struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
- struct ath10k_pci_pipe *pipe_info;
- int pipe_num;
+ int i;
- for (pipe_num = 0; pipe_num < CE_COUNT; pipe_num++) {
- pipe_info = &ar_pci->pipe_info[pipe_num];
- if (pipe_info->ce_hdl) {
- ath10k_ce_deinit(pipe_info->ce_hdl);
- pipe_info->ce_hdl = NULL;
- pipe_info->buf_sz = 0;
- }
- }
+ for (i = 0; i < CE_COUNT; i++)
+ ath10k_ce_deinit_pipe(ar, i);
}
static void ath10k_pci_hif_stop(struct ath10k *ar)
@@ -1252,7 +1279,10 @@ static void ath10k_pci_hif_stop(struct ath10k *ar)
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
int ret;
- ath10k_dbg(ATH10K_DBG_PCI, "%s\n", __func__);
+ ath10k_dbg(ATH10K_DBG_BOOT, "boot hif stop\n");
+
+ if (WARN_ON(!ar_pci->started))
+ return;
ret = ath10k_ce_disable_interrupts(ar);
if (ret)
@@ -1697,30 +1727,49 @@ static int ath10k_pci_init_config(struct ath10k *ar)
return 0;
}
+static int ath10k_pci_alloc_ce(struct ath10k *ar)
+{
+ int i, ret;
+
+ for (i = 0; i < CE_COUNT; i++) {
+ ret = ath10k_ce_alloc_pipe(ar, i, &host_ce_config_wlan[i]);
+ if (ret) {
+ ath10k_err("failed to allocate copy engine pipe %d: %d\n",
+ i, ret);
+ return ret;
+ }
+ }
+ return 0;
+}
+
+static void ath10k_pci_free_ce(struct ath10k *ar)
+{
+ int i;
+
+ for (i = 0; i < CE_COUNT; i++)
+ ath10k_ce_free_pipe(ar, i);
+}
static int ath10k_pci_ce_init(struct ath10k *ar)
{
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
struct ath10k_pci_pipe *pipe_info;
const struct ce_attr *attr;
- int pipe_num;
+ int pipe_num, ret;
for (pipe_num = 0; pipe_num < CE_COUNT; pipe_num++) {
pipe_info = &ar_pci->pipe_info[pipe_num];
+ pipe_info->ce_hdl = &ar_pci->ce_states[pipe_num];
pipe_info->pipe_num = pipe_num;
pipe_info->hif_ce_state = ar;
attr = &host_ce_config_wlan[pipe_num];
- pipe_info->ce_hdl = ath10k_ce_init(ar, pipe_num, attr);
- if (pipe_info->ce_hdl == NULL) {
- ath10k_err("failed to initialize CE for pipe: %d\n",
- pipe_num);
-
- /* It is safe to call it here. It checks if ce_hdl is
- * valid for each pipe */
- ath10k_pci_ce_deinit(ar);
- return -1;
+ ret = ath10k_ce_init_pipe(ar, pipe_num, attr);
+ if (ret) {
+ ath10k_err("failed to initialize copy engine pipe %d: %d\n",
+ pipe_num, ret);
+ return ret;
}
if (pipe_num == CE_COUNT - 1) {
@@ -1741,16 +1790,15 @@ static int ath10k_pci_ce_init(struct ath10k *ar)
static void ath10k_pci_fw_interrupt_handler(struct ath10k *ar)
{
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
- u32 fw_indicator_address, fw_indicator;
+ u32 fw_indicator;
ath10k_pci_wake(ar);
- fw_indicator_address = ar_pci->fw_indicator_address;
- fw_indicator = ath10k_pci_read32(ar, fw_indicator_address);
+ fw_indicator = ath10k_pci_read32(ar, FW_INDICATOR_ADDRESS);
if (fw_indicator & FW_IND_EVENT_PENDING) {
/* ACK: clear Target-side pending event */
- ath10k_pci_write32(ar, fw_indicator_address,
+ ath10k_pci_write32(ar, FW_INDICATOR_ADDRESS,
fw_indicator & ~FW_IND_EVENT_PENDING);
if (ar_pci->started) {
@@ -1767,13 +1815,32 @@ static void ath10k_pci_fw_interrupt_handler(struct ath10k *ar)
ath10k_pci_sleep(ar);
}
+/* this function effectively clears target memory controller assert line */
+static void ath10k_pci_warm_reset_si0(struct ath10k *ar)
+{
+ u32 val;
+
+ val = ath10k_pci_soc_read32(ar, SOC_RESET_CONTROL_ADDRESS);
+ ath10k_pci_soc_write32(ar, SOC_RESET_CONTROL_ADDRESS,
+ val | SOC_RESET_CONTROL_SI0_RST_MASK);
+ val = ath10k_pci_soc_read32(ar, SOC_RESET_CONTROL_ADDRESS);
+
+ msleep(10);
+
+ val = ath10k_pci_soc_read32(ar, SOC_RESET_CONTROL_ADDRESS);
+ ath10k_pci_soc_write32(ar, SOC_RESET_CONTROL_ADDRESS,
+ val & ~SOC_RESET_CONTROL_SI0_RST_MASK);
+ val = ath10k_pci_soc_read32(ar, SOC_RESET_CONTROL_ADDRESS);
+
+ msleep(10);
+}
+
static int ath10k_pci_warm_reset(struct ath10k *ar)
{
- struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
int ret = 0;
u32 val;
- ath10k_dbg(ATH10K_DBG_BOOT, "boot performing warm chip reset\n");
+ ath10k_dbg(ATH10K_DBG_BOOT, "boot warm reset\n");
ret = ath10k_do_pci_wake(ar);
if (ret) {
@@ -1801,7 +1868,7 @@ static int ath10k_pci_warm_reset(struct ath10k *ar)
msleep(100);
/* clear fw indicator */
- ath10k_pci_write32(ar, ar_pci->fw_indicator_address, 0);
+ ath10k_pci_write32(ar, FW_INDICATOR_ADDRESS, 0);
/* clear target LF timer interrupts */
val = ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS +
@@ -1826,6 +1893,8 @@ static int ath10k_pci_warm_reset(struct ath10k *ar)
SOC_RESET_CONTROL_ADDRESS);
msleep(10);
+ ath10k_pci_warm_reset_si0(ar);
+
/* debug */
val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
PCIE_INTR_CAUSE_ADDRESS);
@@ -1934,7 +2003,9 @@ static int __ath10k_pci_hif_power_up(struct ath10k *ar, bool cold_reset)
irq_mode = "legacy";
if (!test_bit(ATH10K_FLAG_FIRST_BOOT_DONE, &ar->dev_flags))
- ath10k_info("pci irq %s\n", irq_mode);
+ ath10k_info("pci irq %s irq_mode %d reset_mode %d\n",
+ irq_mode, ath10k_pci_irq_mode,
+ ath10k_pci_reset_mode);
return 0;
@@ -1952,23 +2023,52 @@ err:
return ret;
}
+static int ath10k_pci_hif_power_up_warm(struct ath10k *ar)
+{
+ int i, ret;
+
+ /*
+ * Sometime warm reset succeeds after retries.
+ *
+ * FIXME: It might be possible to tune ath10k_pci_warm_reset() to work
+ * at first try.
+ */
+ for (i = 0; i < ATH10K_PCI_NUM_WARM_RESET_ATTEMPTS; i++) {
+ ret = __ath10k_pci_hif_power_up(ar, false);
+ if (ret == 0)
+ break;
+
+ ath10k_warn("failed to warm reset (attempt %d out of %d): %d\n",
+ i + 1, ATH10K_PCI_NUM_WARM_RESET_ATTEMPTS, ret);
+ }
+
+ return ret;
+}
+
static int ath10k_pci_hif_power_up(struct ath10k *ar)
{
int ret;
+ ath10k_dbg(ATH10K_DBG_BOOT, "boot hif power up\n");
+
/*
* Hardware CUS232 version 2 has some issues with cold reset and the
* preferred (and safer) way to perform a device reset is through a
* warm reset.
*
- * Warm reset doesn't always work though (notably after a firmware
- * crash) so fall back to cold reset if necessary.
+ * Warm reset doesn't always work though so fall back to cold reset may
+ * be necessary.
*/
- ret = __ath10k_pci_hif_power_up(ar, false);
+ ret = ath10k_pci_hif_power_up_warm(ar);
if (ret) {
- ath10k_warn("failed to power up target using warm reset (%d), trying cold reset\n",
+ ath10k_warn("failed to power up target using warm reset: %d\n",
ret);
+ if (ath10k_pci_reset_mode == ATH10K_PCI_RESET_WARM_ONLY)
+ return ret;
+
+ ath10k_warn("trying cold reset\n");
+
ret = __ath10k_pci_hif_power_up(ar, true);
if (ret) {
ath10k_err("failed to power up target using cold reset too (%d)\n",
@@ -1984,12 +2084,14 @@ static void ath10k_pci_hif_power_down(struct ath10k *ar)
{
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+ ath10k_dbg(ATH10K_DBG_BOOT, "boot hif power down\n");
+
ath10k_pci_free_early_irq(ar);
ath10k_pci_kill_tasklet(ar);
ath10k_pci_deinit_irq(ar);
+ ath10k_pci_ce_deinit(ar);
ath10k_pci_warm_reset(ar);
- ath10k_pci_ce_deinit(ar);
if (!test_bit(ATH10K_PCI_FEATURE_SOC_POWER_SAVE, ar_pci->features))
ath10k_do_pci_sleep(ar);
}
@@ -2137,7 +2239,6 @@ static irqreturn_t ath10k_pci_interrupt_handler(int irq, void *arg)
static void ath10k_pci_early_irq_tasklet(unsigned long data)
{
struct ath10k *ar = (struct ath10k *)data;
- struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
u32 fw_ind;
int ret;
@@ -2148,14 +2249,11 @@ static void ath10k_pci_early_irq_tasklet(unsigned long data)
return;
}
- fw_ind = ath10k_pci_read32(ar, ar_pci->fw_indicator_address);
+ fw_ind = ath10k_pci_read32(ar, FW_INDICATOR_ADDRESS);
if (fw_ind & FW_IND_EVENT_PENDING) {
- ath10k_pci_write32(ar, ar_pci->fw_indicator_address,
+ ath10k_pci_write32(ar, FW_INDICATOR_ADDRESS,
fw_ind & ~FW_IND_EVENT_PENDING);
-
- /* Some structures are unavailable during early boot or at
- * driver teardown so just print that the device has crashed. */
- ath10k_warn("device crashed - no diagnostics available\n");
+ ath10k_pci_hif_dump_area(ar);
}
ath10k_pci_sleep(ar);
@@ -2385,33 +2483,69 @@ static int ath10k_pci_deinit_irq(struct ath10k *ar)
static int ath10k_pci_wait_for_target_init(struct ath10k *ar)
{
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
- int wait_limit = 300; /* 3 sec */
+ unsigned long timeout;
int ret;
+ u32 val;
+
+ ath10k_dbg(ATH10K_DBG_BOOT, "boot waiting target to initialise\n");
ret = ath10k_pci_wake(ar);
if (ret) {
- ath10k_err("failed to wake up target: %d\n", ret);
+ ath10k_err("failed to wake up target for init: %d\n", ret);
return ret;
}
- while (wait_limit-- &&
- !(ioread32(ar_pci->mem + FW_INDICATOR_ADDRESS) &
- FW_IND_INITIALIZED)) {
+ timeout = jiffies + msecs_to_jiffies(ATH10K_PCI_TARGET_WAIT);
+
+ do {
+ val = ath10k_pci_read32(ar, FW_INDICATOR_ADDRESS);
+
+ ath10k_dbg(ATH10K_DBG_BOOT, "boot target indicator %x\n", val);
+
+ /* target should never return this */
+ if (val == 0xffffffff)
+ continue;
+
+ /* the device has crashed so don't bother trying anymore */
+ if (val & FW_IND_EVENT_PENDING)
+ break;
+
+ if (val & FW_IND_INITIALIZED)
+ break;
+
if (ar_pci->num_msi_intrs == 0)
/* Fix potential race by repeating CORE_BASE writes */
- iowrite32(PCIE_INTR_FIRMWARE_MASK |
- PCIE_INTR_CE_MASK_ALL,
- ar_pci->mem + (SOC_CORE_BASE_ADDRESS |
- PCIE_INTR_ENABLE_ADDRESS));
+ ath10k_pci_soc_write32(ar, PCIE_INTR_ENABLE_ADDRESS,
+ PCIE_INTR_FIRMWARE_MASK |
+ PCIE_INTR_CE_MASK_ALL);
+
mdelay(10);
- }
+ } while (time_before(jiffies, timeout));
- if (wait_limit < 0) {
- ath10k_err("target stalled\n");
+ if (val == 0xffffffff) {
+ ath10k_err("failed to read device register, device is gone\n");
ret = -EIO;
goto out;
}
+ if (val & FW_IND_EVENT_PENDING) {
+ ath10k_warn("device has crashed during init\n");
+ ath10k_pci_write32(ar, FW_INDICATOR_ADDRESS,
+ val & ~FW_IND_EVENT_PENDING);
+ ath10k_pci_hif_dump_area(ar);
+ ret = -ECOMM;
+ goto out;
+ }
+
+ if (!(val & FW_IND_INITIALIZED)) {
+ ath10k_err("failed to receive initialized event from target: %08x\n",
+ val);
+ ret = -ETIMEDOUT;
+ goto out;
+ }
+
+ ath10k_dbg(ATH10K_DBG_BOOT, "boot target initialised\n");
+
out:
ath10k_pci_sleep(ar);
return ret;
@@ -2422,6 +2556,8 @@ static int ath10k_pci_cold_reset(struct ath10k *ar)
int i, ret;
u32 val;
+ ath10k_dbg(ATH10K_DBG_BOOT, "boot cold reset\n");
+
ret = ath10k_do_pci_wake(ar);
if (ret) {
ath10k_err("failed to wake up target: %d\n",
@@ -2453,6 +2589,9 @@ static int ath10k_pci_cold_reset(struct ath10k *ar)
}
ath10k_do_pci_sleep(ar);
+
+ ath10k_dbg(ATH10K_DBG_BOOT, "boot cold reset complete\n");
+
return 0;
}
@@ -2484,7 +2623,7 @@ static int ath10k_pci_probe(struct pci_dev *pdev,
struct ath10k_pci *ar_pci;
u32 lcr_val, chip_id;
- ath10k_dbg(ATH10K_DBG_PCI, "%s\n", __func__);
+ ath10k_dbg(ATH10K_DBG_PCI, "pci probe\n");
ar_pci = kzalloc(sizeof(*ar_pci), GFP_KERNEL);
if (ar_pci == NULL)
@@ -2503,7 +2642,7 @@ static int ath10k_pci_probe(struct pci_dev *pdev,
goto err_ar_pci;
}
- if (ath10k_target_ps)
+ if (ath10k_pci_target_ps)
set_bit(ATH10K_PCI_FEATURE_SOC_POWER_SAVE, ar_pci->features);
ath10k_pci_dump_features(ar_pci);
@@ -2516,23 +2655,10 @@ static int ath10k_pci_probe(struct pci_dev *pdev,
}
ar_pci->ar = ar;
- ar_pci->fw_indicator_address = FW_INDICATOR_ADDRESS;
atomic_set(&ar_pci->keep_awake_count, 0);
pci_set_drvdata(pdev, ar);
- /*
- * Without any knowledge of the Host, the Target may have been reset or
- * power cycled and its Config Space may no longer reflect the PCI
- * address space that was assigned earlier by the PCI infrastructure.
- * Refresh it now.
- */
- ret = pci_assign_resource(pdev, BAR_NUM);
- if (ret) {
- ath10k_err("failed to assign PCI space: %d\n", ret);
- goto err_ar;
- }
-
ret = pci_enable_device(pdev);
if (ret) {
ath10k_err("failed to enable PCI device: %d\n", ret);
@@ -2594,16 +2720,24 @@ static int ath10k_pci_probe(struct pci_dev *pdev,
ath10k_do_pci_sleep(ar);
+ ret = ath10k_pci_alloc_ce(ar);
+ if (ret) {
+ ath10k_err("failed to allocate copy engine pipes: %d\n", ret);
+ goto err_iomap;
+ }
+
ath10k_dbg(ATH10K_DBG_BOOT, "boot pci_mem 0x%p\n", ar_pci->mem);
ret = ath10k_core_register(ar, chip_id);
if (ret) {
ath10k_err("failed to register driver core: %d\n", ret);
- goto err_iomap;
+ goto err_free_ce;
}
return 0;
+err_free_ce:
+ ath10k_pci_free_ce(ar);
err_iomap:
pci_iounmap(pdev, mem);
err_master:
@@ -2626,7 +2760,7 @@ static void ath10k_pci_remove(struct pci_dev *pdev)
struct ath10k *ar = pci_get_drvdata(pdev);
struct ath10k_pci *ar_pci;
- ath10k_dbg(ATH10K_DBG_PCI, "%s\n", __func__);
+ ath10k_dbg(ATH10K_DBG_PCI, "pci remove\n");
if (!ar)
return;
@@ -2636,9 +2770,8 @@ static void ath10k_pci_remove(struct pci_dev *pdev)
if (!ar_pci)
return;
- tasklet_kill(&ar_pci->msi_fw_err);
-
ath10k_core_unregister(ar);
+ ath10k_pci_free_ce(ar);
pci_iounmap(pdev, ar_pci->mem);
pci_release_region(pdev, BAR_NUM);
@@ -2680,6 +2813,5 @@ module_exit(ath10k_pci_exit);
MODULE_AUTHOR("Qualcomm Atheros");
MODULE_DESCRIPTION("Driver support for Atheros QCA988X PCIe devices");
MODULE_LICENSE("Dual BSD/GPL");
-MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_FW_FILE);
-MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_OTP_FILE);
+MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_FW_2_FILE);
MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_BOARD_DATA_FILE);
diff --git a/drivers/net/wireless/ath/ath10k/pci.h b/drivers/net/wireless/ath/ath10k/pci.h
index b43fdb4f7319..dfdebb4157aa 100644
--- a/drivers/net/wireless/ath/ath10k/pci.h
+++ b/drivers/net/wireless/ath/ath10k/pci.h
@@ -189,9 +189,6 @@ struct ath10k_pci {
struct ath10k_hif_cb msg_callbacks_current;
- /* Target address used to signal a pending firmware event */
- u32 fw_indicator_address;
-
/* Copy Engine used for Diagnostic Accesses */
struct ath10k_ce_pipe *ce_diag;
diff --git a/drivers/net/wireless/ath/ath10k/txrx.c b/drivers/net/wireless/ath/ath10k/txrx.c
index 0541dd939ce9..82669a77e553 100644
--- a/drivers/net/wireless/ath/ath10k/txrx.c
+++ b/drivers/net/wireless/ath/ath10k/txrx.c
@@ -100,189 +100,6 @@ exit:
wake_up(&htt->empty_tx_wq);
}
-static const u8 rx_legacy_rate_idx[] = {
- 3, /* 0x00 - 11Mbps */
- 2, /* 0x01 - 5.5Mbps */
- 1, /* 0x02 - 2Mbps */
- 0, /* 0x03 - 1Mbps */
- 3, /* 0x04 - 11Mbps */
- 2, /* 0x05 - 5.5Mbps */
- 1, /* 0x06 - 2Mbps */
- 0, /* 0x07 - 1Mbps */
- 10, /* 0x08 - 48Mbps */
- 8, /* 0x09 - 24Mbps */
- 6, /* 0x0A - 12Mbps */
- 4, /* 0x0B - 6Mbps */
- 11, /* 0x0C - 54Mbps */
- 9, /* 0x0D - 36Mbps */
- 7, /* 0x0E - 18Mbps */
- 5, /* 0x0F - 9Mbps */
-};
-
-static void process_rx_rates(struct ath10k *ar, struct htt_rx_info *info,
- enum ieee80211_band band,
- struct ieee80211_rx_status *status)
-{
- u8 cck, rate, rate_idx, bw, sgi, mcs, nss;
- u8 info0 = info->rate.info0;
- u32 info1 = info->rate.info1;
- u32 info2 = info->rate.info2;
- u8 preamble = 0;
-
- /* Check if valid fields */
- if (!(info0 & HTT_RX_INDICATION_INFO0_START_VALID))
- return;
-
- preamble = MS(info1, HTT_RX_INDICATION_INFO1_PREAMBLE_TYPE);
-
- switch (preamble) {
- case HTT_RX_LEGACY:
- cck = info0 & HTT_RX_INDICATION_INFO0_LEGACY_RATE_CCK;
- rate = MS(info0, HTT_RX_INDICATION_INFO0_LEGACY_RATE);
- rate_idx = 0;
-
- if (rate < 0x08 || rate > 0x0F)
- break;
-
- switch (band) {
- case IEEE80211_BAND_2GHZ:
- if (cck)
- rate &= ~BIT(3);
- rate_idx = rx_legacy_rate_idx[rate];
- break;
- case IEEE80211_BAND_5GHZ:
- rate_idx = rx_legacy_rate_idx[rate];
- /* We are using same rate table registering
- HW - ath10k_rates[]. In case of 5GHz skip
- CCK rates, so -4 here */
- rate_idx -= 4;
- break;
- default:
- break;
- }
-
- status->rate_idx = rate_idx;
- break;
- case HTT_RX_HT:
- case HTT_RX_HT_WITH_TXBF:
- /* HT-SIG - Table 20-11 in info1 and info2 */
- mcs = info1 & 0x1F;
- nss = mcs >> 3;
- bw = (info1 >> 7) & 1;
- sgi = (info2 >> 7) & 1;
-
- status->rate_idx = mcs;
- status->flag |= RX_FLAG_HT;
- if (sgi)
- status->flag |= RX_FLAG_SHORT_GI;
- if (bw)
- status->flag |= RX_FLAG_40MHZ;
- break;
- case HTT_RX_VHT:
- case HTT_RX_VHT_WITH_TXBF:
- /* VHT-SIG-A1 in info 1, VHT-SIG-A2 in info2
- TODO check this */
- mcs = (info2 >> 4) & 0x0F;
- nss = ((info1 >> 10) & 0x07) + 1;
- bw = info1 & 3;
- sgi = info2 & 1;
-
- status->rate_idx = mcs;
- status->vht_nss = nss;
-
- if (sgi)
- status->flag |= RX_FLAG_SHORT_GI;
-
- switch (bw) {
- /* 20MHZ */
- case 0:
- break;
- /* 40MHZ */
- case 1:
- status->flag |= RX_FLAG_40MHZ;
- break;
- /* 80MHZ */
- case 2:
- status->vht_flag |= RX_VHT_FLAG_80MHZ;
- }
-
- status->flag |= RX_FLAG_VHT;
- break;
- default:
- break;
- }
-}
-
-void ath10k_process_rx(struct ath10k *ar, struct htt_rx_info *info)
-{
- struct ieee80211_rx_status *status;
- struct ieee80211_channel *ch;
- struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)info->skb->data;
-
- status = IEEE80211_SKB_RXCB(info->skb);
- memset(status, 0, sizeof(*status));
-
- if (info->encrypt_type != HTT_RX_MPDU_ENCRYPT_NONE) {
- status->flag |= RX_FLAG_DECRYPTED | RX_FLAG_IV_STRIPPED |
- RX_FLAG_MMIC_STRIPPED;
- hdr->frame_control = __cpu_to_le16(
- __le16_to_cpu(hdr->frame_control) &
- ~IEEE80211_FCTL_PROTECTED);
- }
-
- if (info->mic_err)
- status->flag |= RX_FLAG_MMIC_ERROR;
-
- if (info->fcs_err)
- status->flag |= RX_FLAG_FAILED_FCS_CRC;
-
- if (info->amsdu_more)
- status->flag |= RX_FLAG_AMSDU_MORE;
-
- status->signal = info->signal;
-
- spin_lock_bh(&ar->data_lock);
- ch = ar->scan_channel;
- if (!ch)
- ch = ar->rx_channel;
- spin_unlock_bh(&ar->data_lock);
-
- if (!ch) {
- ath10k_warn("no channel configured; ignoring frame!\n");
- dev_kfree_skb_any(info->skb);
- return;
- }
-
- process_rx_rates(ar, info, ch->band, status);
- status->band = ch->band;
- status->freq = ch->center_freq;
-
- if (info->rate.info0 & HTT_RX_INDICATION_INFO0_END_VALID) {
- /* TSF available only in 32-bit */
- status->mactime = info->tsf & 0xffffffff;
- status->flag |= RX_FLAG_MACTIME_END;
- }
-
- ath10k_dbg(ATH10K_DBG_DATA,
- "rx skb %p len %u %s%s%s%s%s %srate_idx %u vht_nss %u freq %u band %u flag 0x%x fcs-err %i\n",
- info->skb,
- info->skb->len,
- status->flag == 0 ? "legacy" : "",
- status->flag & RX_FLAG_HT ? "ht" : "",
- status->flag & RX_FLAG_VHT ? "vht" : "",
- status->flag & RX_FLAG_40MHZ ? "40" : "",
- status->vht_flag & RX_VHT_FLAG_80MHZ ? "80" : "",
- status->flag & RX_FLAG_SHORT_GI ? "sgi " : "",
- status->rate_idx,
- status->vht_nss,
- status->freq,
- status->band, status->flag, info->fcs_err);
- ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "rx skb: ",
- info->skb->data, info->skb->len);
-
- ieee80211_rx(ar->hw, info->skb);
-}
-
struct ath10k_peer *ath10k_peer_find(struct ath10k *ar, int vdev_id,
const u8 *addr)
{
diff --git a/drivers/net/wireless/ath/ath10k/txrx.h b/drivers/net/wireless/ath/ath10k/txrx.h
index 356dc9c04c9e..aee3e20058f8 100644
--- a/drivers/net/wireless/ath/ath10k/txrx.h
+++ b/drivers/net/wireless/ath/ath10k/txrx.h
@@ -21,7 +21,6 @@
void ath10k_txrx_tx_unref(struct ath10k_htt *htt,
const struct htt_tx_done *tx_done);
-void ath10k_process_rx(struct ath10k *ar, struct htt_rx_info *info);
struct ath10k_peer *ath10k_peer_find(struct ath10k *ar, int vdev_id,
const u8 *addr);
diff --git a/drivers/net/wireless/ath/ath10k/wmi.c b/drivers/net/wireless/ath/ath10k/wmi.c
index cb1f7b5bcf4c..4b7782a529ac 100644
--- a/drivers/net/wireless/ath/ath10k/wmi.c
+++ b/drivers/net/wireless/ath/ath10k/wmi.c
@@ -639,6 +639,7 @@ int ath10k_wmi_mgmt_tx(struct ath10k *ar, struct sk_buff *skb)
struct sk_buff *wmi_skb;
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
int len;
+ u32 buf_len = skb->len;
u16 fc;
hdr = (struct ieee80211_hdr *)skb->data;
@@ -648,6 +649,15 @@ int ath10k_wmi_mgmt_tx(struct ath10k *ar, struct sk_buff *skb)
return -EINVAL;
len = sizeof(cmd->hdr) + skb->len;
+
+ if ((ieee80211_is_action(hdr->frame_control) ||
+ ieee80211_is_deauth(hdr->frame_control) ||
+ ieee80211_is_disassoc(hdr->frame_control)) &&
+ ieee80211_has_protected(hdr->frame_control)) {
+ len += IEEE80211_CCMP_MIC_LEN;
+ buf_len += IEEE80211_CCMP_MIC_LEN;
+ }
+
len = round_up(len, 4);
wmi_skb = ath10k_wmi_alloc_skb(len);
@@ -659,7 +669,7 @@ int ath10k_wmi_mgmt_tx(struct ath10k *ar, struct sk_buff *skb)
cmd->hdr.vdev_id = __cpu_to_le32(ATH10K_SKB_CB(skb)->vdev_id);
cmd->hdr.tx_rate = 0;
cmd->hdr.tx_power = 0;
- cmd->hdr.buf_len = __cpu_to_le32((u32)(skb->len));
+ cmd->hdr.buf_len = __cpu_to_le32(buf_len);
memcpy(cmd->hdr.peer_macaddr.addr, ieee80211_get_DA(hdr), ETH_ALEN);
memcpy(cmd->buf, skb->data, skb->len);
@@ -957,10 +967,16 @@ static int ath10k_wmi_event_mgmt_rx(struct ath10k *ar, struct sk_buff *skb)
* frames with Protected Bit set. */
if (ieee80211_has_protected(hdr->frame_control) &&
!ieee80211_is_auth(hdr->frame_control)) {
- status->flag |= RX_FLAG_DECRYPTED | RX_FLAG_IV_STRIPPED |
- RX_FLAG_MMIC_STRIPPED;
- hdr->frame_control = __cpu_to_le16(fc &
+ status->flag |= RX_FLAG_DECRYPTED;
+
+ if (!ieee80211_is_action(hdr->frame_control) &&
+ !ieee80211_is_deauth(hdr->frame_control) &&
+ !ieee80211_is_disassoc(hdr->frame_control)) {
+ status->flag |= RX_FLAG_IV_STRIPPED |
+ RX_FLAG_MMIC_STRIPPED;
+ hdr->frame_control = __cpu_to_le16(fc &
~IEEE80211_FCTL_PROTECTED);
+ }
}
ath10k_dbg(ATH10K_DBG_MGMT,
@@ -1362,13 +1378,10 @@ static void ath10k_wmi_event_host_swba(struct ath10k *ar, struct sk_buff *skb)
struct sk_buff *bcn;
int ret, vdev_id = 0;
- ath10k_dbg(ATH10K_DBG_MGMT, "WMI_HOST_SWBA_EVENTID\n");
-
ev = (struct wmi_host_swba_event *)skb->data;
map = __le32_to_cpu(ev->vdev_map);
- ath10k_dbg(ATH10K_DBG_MGMT, "host swba:\n"
- "-vdev map 0x%x\n",
+ ath10k_dbg(ATH10K_DBG_MGMT, "mgmt swba vdev_map 0x%x\n",
ev->vdev_map);
for (; map; map >>= 1, vdev_id++) {
@@ -1385,12 +1398,7 @@ static void ath10k_wmi_event_host_swba(struct ath10k *ar, struct sk_buff *skb)
bcn_info = &ev->bcn_info[i];
ath10k_dbg(ATH10K_DBG_MGMT,
- "-bcn_info[%d]:\n"
- "--tim_len %d\n"
- "--tim_mcast %d\n"
- "--tim_changed %d\n"
- "--tim_num_ps_pending %d\n"
- "--tim_bitmap 0x%08x%08x%08x%08x\n",
+ "mgmt event bcn_info %d tim_len %d mcast %d changed %d num_ps_pending %d bitmap 0x%08x%08x%08x%08x\n",
i,
__le32_to_cpu(bcn_info->tim_info.tim_len),
__le32_to_cpu(bcn_info->tim_info.tim_mcast),
@@ -1439,6 +1447,7 @@ static void ath10k_wmi_event_host_swba(struct ath10k *ar, struct sk_buff *skb)
ATH10K_SKB_CB(arvif->beacon)->paddr,
arvif->beacon->len, DMA_TO_DEVICE);
dev_kfree_skb_any(arvif->beacon);
+ arvif->beacon = NULL;
}
ATH10K_SKB_CB(bcn)->paddr = dma_map_single(arvif->ar->dev,
@@ -1448,6 +1457,7 @@ static void ath10k_wmi_event_host_swba(struct ath10k *ar, struct sk_buff *skb)
ATH10K_SKB_CB(bcn)->paddr);
if (ret) {
ath10k_warn("failed to map beacon: %d\n", ret);
+ dev_kfree_skb_any(bcn);
goto skip;
}
@@ -2365,7 +2375,7 @@ void ath10k_wmi_detach(struct ath10k *ar)
ar->wmi.num_mem_chunks = 0;
}
-int ath10k_wmi_connect_htc_service(struct ath10k *ar)
+int ath10k_wmi_connect(struct ath10k *ar)
{
int status;
struct ath10k_htc_svc_conn_req conn_req;
@@ -2393,8 +2403,9 @@ int ath10k_wmi_connect_htc_service(struct ath10k *ar)
return 0;
}
-int ath10k_wmi_pdev_set_regdomain(struct ath10k *ar, u16 rd, u16 rd2g,
- u16 rd5g, u16 ctl2g, u16 ctl5g)
+static int ath10k_wmi_main_pdev_set_regdomain(struct ath10k *ar, u16 rd,
+ u16 rd2g, u16 rd5g, u16 ctl2g,
+ u16 ctl5g)
{
struct wmi_pdev_set_regdomain_cmd *cmd;
struct sk_buff *skb;
@@ -2418,6 +2429,46 @@ int ath10k_wmi_pdev_set_regdomain(struct ath10k *ar, u16 rd, u16 rd2g,
ar->wmi.cmd->pdev_set_regdomain_cmdid);
}
+static int ath10k_wmi_10x_pdev_set_regdomain(struct ath10k *ar, u16 rd,
+ u16 rd2g, u16 rd5g,
+ u16 ctl2g, u16 ctl5g,
+ enum wmi_dfs_region dfs_reg)
+{
+ struct wmi_pdev_set_regdomain_cmd_10x *cmd;
+ struct sk_buff *skb;
+
+ skb = ath10k_wmi_alloc_skb(sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_pdev_set_regdomain_cmd_10x *)skb->data;
+ cmd->reg_domain = __cpu_to_le32(rd);
+ cmd->reg_domain_2G = __cpu_to_le32(rd2g);
+ cmd->reg_domain_5G = __cpu_to_le32(rd5g);
+ cmd->conformance_test_limit_2G = __cpu_to_le32(ctl2g);
+ cmd->conformance_test_limit_5G = __cpu_to_le32(ctl5g);
+ cmd->dfs_domain = __cpu_to_le32(dfs_reg);
+
+ ath10k_dbg(ATH10K_DBG_WMI,
+ "wmi pdev regdomain rd %x rd2g %x rd5g %x ctl2g %x ctl5g %x dfs_region %x\n",
+ rd, rd2g, rd5g, ctl2g, ctl5g, dfs_reg);
+
+ return ath10k_wmi_cmd_send(ar, skb,
+ ar->wmi.cmd->pdev_set_regdomain_cmdid);
+}
+
+int ath10k_wmi_pdev_set_regdomain(struct ath10k *ar, u16 rd, u16 rd2g,
+ u16 rd5g, u16 ctl2g, u16 ctl5g,
+ enum wmi_dfs_region dfs_reg)
+{
+ if (test_bit(ATH10K_FW_FEATURE_WMI_10X, ar->fw_features))
+ return ath10k_wmi_10x_pdev_set_regdomain(ar, rd, rd2g, rd5g,
+ ctl2g, ctl5g, dfs_reg);
+ else
+ return ath10k_wmi_main_pdev_set_regdomain(ar, rd, rd2g, rd5g,
+ ctl2g, ctl5g);
+}
+
int ath10k_wmi_pdev_set_channel(struct ath10k *ar,
const struct wmi_channel_arg *arg)
{
@@ -3456,8 +3507,9 @@ int ath10k_wmi_peer_assoc(struct ath10k *ar,
__cpu_to_le32(arg->peer_vht_rates.tx_mcs_set);
ath10k_dbg(ATH10K_DBG_WMI,
- "wmi peer assoc vdev %d addr %pM\n",
- arg->vdev_id, arg->addr);
+ "wmi peer assoc vdev %d addr %pM (%s)\n",
+ arg->vdev_id, arg->addr,
+ arg->peer_reassoc ? "reassociate" : "new");
return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_assoc_cmdid);
}
diff --git a/drivers/net/wireless/ath/ath10k/wmi.h b/drivers/net/wireless/ath/ath10k/wmi.h
index f51d5ca0141f..e93df2c10413 100644
--- a/drivers/net/wireless/ath/ath10k/wmi.h
+++ b/drivers/net/wireless/ath/ath10k/wmi.h
@@ -198,16 +198,6 @@ struct wmi_mac_addr {
} __packed;
} __packed;
-/* macro to convert MAC address from WMI word format to char array */
-#define WMI_MAC_ADDR_TO_CHAR_ARRAY(pwmi_mac_addr, c_macaddr) do { \
- (c_macaddr)[0] = ((pwmi_mac_addr)->word0) & 0xff; \
- (c_macaddr)[1] = (((pwmi_mac_addr)->word0) >> 8) & 0xff; \
- (c_macaddr)[2] = (((pwmi_mac_addr)->word0) >> 16) & 0xff; \
- (c_macaddr)[3] = (((pwmi_mac_addr)->word0) >> 24) & 0xff; \
- (c_macaddr)[4] = ((pwmi_mac_addr)->word1) & 0xff; \
- (c_macaddr)[5] = (((pwmi_mac_addr)->word1) >> 8) & 0xff; \
- } while (0)
-
struct wmi_cmd_map {
u32 init_cmdid;
u32 start_scan_cmdid;
@@ -2185,6 +2175,31 @@ struct wmi_pdev_set_regdomain_cmd {
__le32 conformance_test_limit_5G;
} __packed;
+enum wmi_dfs_region {
+ /* Uninitialized dfs domain */
+ WMI_UNINIT_DFS_DOMAIN = 0,
+
+ /* FCC3 dfs domain */
+ WMI_FCC_DFS_DOMAIN = 1,
+
+ /* ETSI dfs domain */
+ WMI_ETSI_DFS_DOMAIN = 2,
+
+ /*Japan dfs domain */
+ WMI_MKK4_DFS_DOMAIN = 3,
+};
+
+struct wmi_pdev_set_regdomain_cmd_10x {
+ __le32 reg_domain;
+ __le32 reg_domain_2G;
+ __le32 reg_domain_5G;
+ __le32 conformance_test_limit_2G;
+ __le32 conformance_test_limit_5G;
+
+ /* dfs domain from wmi_dfs_region */
+ __le32 dfs_domain;
+} __packed;
+
/* Command to set/unset chip in quiet mode */
struct wmi_pdev_set_quiet_cmd {
/* period in TUs */
@@ -2210,6 +2225,19 @@ enum ath10k_protmode {
ATH10K_PROT_RTSCTS = 2, /* RTS-CTS */
};
+enum wmi_rtscts_profile {
+ WMI_RTSCTS_FOR_NO_RATESERIES = 0,
+ WMI_RTSCTS_FOR_SECOND_RATESERIES,
+ WMI_RTSCTS_ACROSS_SW_RETRIES
+};
+
+#define WMI_RTSCTS_ENABLED 1
+#define WMI_RTSCTS_SET_MASK 0x0f
+#define WMI_RTSCTS_SET_LSB 0
+
+#define WMI_RTSCTS_PROFILE_MASK 0xf0
+#define WMI_RTSCTS_PROFILE_LSB 4
+
enum wmi_beacon_gen_mode {
WMI_BEACON_STAGGERED_MODE = 0,
WMI_BEACON_BURST_MODE = 1
@@ -2295,9 +2323,9 @@ struct wmi_pdev_param_map {
#define WMI_PDEV_PARAM_UNSUPPORTED 0
enum wmi_pdev_param {
- /* TX chian mask */
+ /* TX chain mask */
WMI_PDEV_PARAM_TX_CHAIN_MASK = 0x1,
- /* RX chian mask */
+ /* RX chain mask */
WMI_PDEV_PARAM_RX_CHAIN_MASK,
/* TX power limit for 2G Radio */
WMI_PDEV_PARAM_TXPOWER_LIMIT2G,
@@ -2682,6 +2710,9 @@ struct wal_dbg_tx_stats {
/* wal pdev resets */
__le32 pdev_resets;
+ /* frames dropped due to non-availability of stateless TIDs */
+ __le32 stateless_tid_alloc_failure;
+
__le32 phy_underrun;
/* MPDU is more than txop limit */
@@ -2738,13 +2769,21 @@ enum wmi_stats_id {
WMI_REQUEST_AP_STAT = 0x02
};
+struct wlan_inst_rssi_args {
+ __le16 cfg_retry_count;
+ __le16 retry_count;
+};
+
struct wmi_request_stats_cmd {
__le32 stats_id;
- /*
- * Space to add parameters like
- * peer mac addr
- */
+ __le32 vdev_id;
+
+ /* peer MAC address */
+ struct wmi_mac_addr peer_macaddr;
+
+ /* Instantaneous RSSI arguments */
+ struct wlan_inst_rssi_args inst_rssi_args;
} __packed;
/* Suspend option */
@@ -2795,7 +2834,18 @@ struct wmi_stats_event {
* PDEV statistics
* TODO: add all PDEV stats here
*/
-struct wmi_pdev_stats {
+struct wmi_pdev_stats_old {
+ __le32 chan_nf; /* Channel noise floor */
+ __le32 tx_frame_count; /* TX frame count */
+ __le32 rx_frame_count; /* RX frame count */
+ __le32 rx_clear_count; /* rx clear count */
+ __le32 cycle_count; /* cycle count */
+ __le32 phy_err_count; /* Phy error count */
+ __le32 chan_tx_pwr; /* channel tx power */
+ struct wal_dbg_stats wal; /* WAL dbg stats */
+} __packed;
+
+struct wmi_pdev_stats_10x {
__le32 chan_nf; /* Channel noise floor */
__le32 tx_frame_count; /* TX frame count */
__le32 rx_frame_count; /* RX frame count */
@@ -2804,6 +2854,12 @@ struct wmi_pdev_stats {
__le32 phy_err_count; /* Phy error count */
__le32 chan_tx_pwr; /* channel tx power */
struct wal_dbg_stats wal; /* WAL dbg stats */
+ __le32 ack_rx_bad;
+ __le32 rts_bad;
+ __le32 rts_good;
+ __le32 fcs_bad;
+ __le32 no_beacons;
+ __le32 mib_int_count;
} __packed;
/*
@@ -2818,10 +2874,17 @@ struct wmi_vdev_stats {
* peer statistics.
* TODO: add more stats
*/
-struct wmi_peer_stats {
+struct wmi_peer_stats_old {
+ struct wmi_mac_addr peer_macaddr;
+ __le32 peer_rssi;
+ __le32 peer_tx_rate;
+} __packed;
+
+struct wmi_peer_stats_10x {
struct wmi_mac_addr peer_macaddr;
__le32 peer_rssi;
__le32 peer_tx_rate;
+ __le32 peer_rx_rate;
} __packed;
struct wmi_vdev_create_cmd {
@@ -4196,13 +4259,14 @@ void ath10k_wmi_detach(struct ath10k *ar);
int ath10k_wmi_wait_for_service_ready(struct ath10k *ar);
int ath10k_wmi_wait_for_unified_ready(struct ath10k *ar);
-int ath10k_wmi_connect_htc_service(struct ath10k *ar);
+int ath10k_wmi_connect(struct ath10k *ar);
int ath10k_wmi_pdev_set_channel(struct ath10k *ar,
const struct wmi_channel_arg *);
int ath10k_wmi_pdev_suspend_target(struct ath10k *ar, u32 suspend_opt);
int ath10k_wmi_pdev_resume_target(struct ath10k *ar);
int ath10k_wmi_pdev_set_regdomain(struct ath10k *ar, u16 rd, u16 rd2g,
- u16 rd5g, u16 ctl2g, u16 ctl5g);
+ u16 rd5g, u16 ctl2g, u16 ctl5g,
+ enum wmi_dfs_region dfs_reg);
int ath10k_wmi_pdev_set_param(struct ath10k *ar, u32 id, u32 value);
int ath10k_wmi_cmd_init(struct ath10k *ar);
int ath10k_wmi_start_scan(struct ath10k *ar, const struct wmi_start_scan_arg *);
diff --git a/drivers/net/wireless/ath/ath5k/phy.c b/drivers/net/wireless/ath/ath5k/phy.c
index 1a2973b7acf2..0fce1c76638e 100644
--- a/drivers/net/wireless/ath/ath5k/phy.c
+++ b/drivers/net/wireless/ath/ath5k/phy.c
@@ -3709,8 +3709,8 @@ ath5k_hw_txpower(struct ath5k_hw *ah, struct ieee80211_channel *channel,
AR5K_REG_MS(AR5K_TUNE_MAX_TXPOWER, AR5K_TPC_CHIRP),
AR5K_TPC);
} else {
- ath5k_hw_reg_write(ah, AR5K_PHY_TXPOWER_RATE_MAX |
- AR5K_TUNE_MAX_TXPOWER, AR5K_PHY_TXPOWER_RATE_MAX);
+ ath5k_hw_reg_write(ah, AR5K_TUNE_MAX_TXPOWER,
+ AR5K_PHY_TXPOWER_RATE_MAX);
}
return 0;
diff --git a/drivers/net/wireless/ath/ath6kl/Kconfig b/drivers/net/wireless/ath/ath6kl/Kconfig
index e39e5860a2e9..9c125ff083f7 100644
--- a/drivers/net/wireless/ath/ath6kl/Kconfig
+++ b/drivers/net/wireless/ath/ath6kl/Kconfig
@@ -1,11 +1,19 @@
config ATH6KL
tristate "Atheros mobile chipsets support"
+ depends on CFG80211
+ ---help---
+ This module adds core support for wireless adapters based on
+ Atheros AR6003 and AR6004 chipsets. You still need separate
+ bus drivers for USB and SDIO to be able to use real devices.
+
+ If you choose to build it as a module, it will be called
+ ath6kl_core. Please note that AR6002 and AR6001 are not
+ supported by this driver.
config ATH6KL_SDIO
tristate "Atheros ath6kl SDIO support"
depends on ATH6KL
depends on MMC
- depends on CFG80211
---help---
This module adds support for wireless adapters based on
Atheros AR6003 and AR6004 chipsets running over SDIO. If you
@@ -17,25 +25,31 @@ config ATH6KL_USB
tristate "Atheros ath6kl USB support"
depends on ATH6KL
depends on USB
- depends on CFG80211
---help---
This module adds support for wireless adapters based on
- Atheros AR6004 chipset running over USB. This is still under
- implementation and it isn't functional. If you choose to
- build it as a module, it will be called ath6kl_usb.
+ Atheros AR6004 chipset and chipsets based on it running over
+ USB. If you choose to build it as a module, it will be
+ called ath6kl_usb.
config ATH6KL_DEBUG
bool "Atheros ath6kl debugging"
depends on ATH6KL
---help---
- Enables debug support
+ Enables ath6kl debug support, including debug messages
+ enabled with debug_mask module parameter and debugfs
+ interface.
+
+ If unsure, say Y to make it easier to debug problems.
config ATH6KL_TRACING
bool "Atheros ath6kl tracing support"
depends on ATH6KL
depends on EVENT_TRACING
---help---
- Select this to ath6kl use tracing infrastructure.
+ Select this to ath6kl use tracing infrastructure which, for
+ example, can be enabled with help of trace-cmd. All debug
+ messages and commands are delivered to using individually
+ enablable trace points.
If unsure, say Y to make it easier to debug problems.
@@ -47,3 +61,5 @@ config ATH6KL_REGDOMAIN
Enabling this makes it possible to change the regdomain in
the firmware. This can be only enabled if regulatory requirements
are taken into account.
+
+ If unsure, say N.
diff --git a/drivers/net/wireless/ath/ath6kl/cfg80211.c b/drivers/net/wireless/ath/ath6kl/cfg80211.c
index c2c6f4604958..0e26f4a34fda 100644
--- a/drivers/net/wireless/ath/ath6kl/cfg80211.c
+++ b/drivers/net/wireless/ath/ath6kl/cfg80211.c
@@ -724,8 +724,9 @@ ath6kl_add_bss_if_needed(struct ath6kl_vif *vif,
ath6kl_dbg(ATH6KL_DBG_WLAN_CFG,
"added bss %pM to cfg80211\n", bssid);
kfree(ie);
- } else
+ } else {
ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "cfg80211 already has a bss\n");
+ }
return bss;
}
@@ -970,7 +971,6 @@ static int ath6kl_set_probed_ssids(struct ath6kl *ar,
ssid_list[i].flag,
ssid_list[i].ssid.ssid_len,
ssid_list[i].ssid.ssid);
-
}
/* Make sure no old entries are left behind */
@@ -1759,7 +1759,7 @@ static bool is_rate_ht40(s32 rate, u8 *mcs, bool *sgi)
}
static int ath6kl_get_station(struct wiphy *wiphy, struct net_device *dev,
- u8 *mac, struct station_info *sinfo)
+ const u8 *mac, struct station_info *sinfo)
{
struct ath6kl *ar = ath6kl_priv(dev);
struct ath6kl_vif *vif = netdev_priv(dev);
@@ -1897,7 +1897,6 @@ static int ath6kl_wow_usr(struct ath6kl *ar, struct ath6kl_vif *vif,
/* Configure the patterns that we received from the user. */
for (i = 0; i < wow->n_patterns; i++) {
-
/*
* Convert given nl80211 specific mask value to equivalent
* driver specific mask value and send it to the chip along
@@ -2850,8 +2849,9 @@ static int ath6kl_start_ap(struct wiphy *wiphy, struct net_device *dev,
if (p.prwise_crypto_type == 0) {
p.prwise_crypto_type = NONE_CRYPT;
ath6kl_set_cipher(vif, 0, true);
- } else if (info->crypto.n_ciphers_pairwise == 1)
+ } else if (info->crypto.n_ciphers_pairwise == 1) {
ath6kl_set_cipher(vif, info->crypto.ciphers_pairwise[0], true);
+ }
switch (info->crypto.cipher_group) {
case WLAN_CIPHER_SUITE_WEP40:
@@ -2897,7 +2897,6 @@ static int ath6kl_start_ap(struct wiphy *wiphy, struct net_device *dev,
}
if (info->inactivity_timeout) {
-
inactivity_timeout = info->inactivity_timeout;
if (ar->hw.flags & ATH6KL_HW_AP_INACTIVITY_MINS)
@@ -2975,7 +2974,7 @@ static int ath6kl_stop_ap(struct wiphy *wiphy, struct net_device *dev)
static const u8 bcast_addr[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
static int ath6kl_del_station(struct wiphy *wiphy, struct net_device *dev,
- u8 *mac)
+ const u8 *mac)
{
struct ath6kl *ar = ath6kl_priv(dev);
struct ath6kl_vif *vif = netdev_priv(dev);
@@ -2986,7 +2985,8 @@ static int ath6kl_del_station(struct wiphy *wiphy, struct net_device *dev,
}
static int ath6kl_change_station(struct wiphy *wiphy, struct net_device *dev,
- u8 *mac, struct station_parameters *params)
+ const u8 *mac,
+ struct station_parameters *params)
{
struct ath6kl *ar = ath6kl_priv(dev);
struct ath6kl_vif *vif = netdev_priv(dev);
diff --git a/drivers/net/wireless/ath/ath6kl/core.c b/drivers/net/wireless/ath/ath6kl/core.c
index 4b46adbe8c92..b0b652042760 100644
--- a/drivers/net/wireless/ath/ath6kl/core.c
+++ b/drivers/net/wireless/ath/ath6kl/core.c
@@ -45,9 +45,9 @@ module_param(testmode, uint, 0644);
module_param(recovery_enable, uint, 0644);
module_param(heart_beat_poll, uint, 0644);
MODULE_PARM_DESC(recovery_enable, "Enable recovery from firmware error");
-MODULE_PARM_DESC(heart_beat_poll, "Enable fw error detection periodic" \
- "polling. This also specifies the polling interval in" \
- "msecs. Set reocvery_enable for this to be effective");
+MODULE_PARM_DESC(heart_beat_poll,
+ "Enable fw error detection periodic polling in msecs - Also set recovery_enable for this to be effective");
+
void ath6kl_core_tx_complete(struct ath6kl *ar, struct sk_buff *skb)
{
diff --git a/drivers/net/wireless/ath/ath6kl/debug.c b/drivers/net/wireless/ath/ath6kl/debug.c
index dbfd17d0a5fa..55c4064dd506 100644
--- a/drivers/net/wireless/ath/ath6kl/debug.c
+++ b/drivers/net/wireless/ath/ath6kl/debug.c
@@ -172,7 +172,6 @@ void ath6kl_dump_registers(struct ath6kl_device *dev,
struct ath6kl_irq_proc_registers *irq_proc_reg,
struct ath6kl_irq_enable_reg *irq_enable_reg)
{
-
ath6kl_dbg(ATH6KL_DBG_IRQ, ("<------- Register Table -------->\n"));
if (irq_proc_reg != NULL) {
@@ -219,7 +218,6 @@ void ath6kl_dump_registers(struct ath6kl_device *dev,
"GMBOX lookahead alias 1: 0x%x\n",
irq_proc_reg->rx_gmbox_lkahd_alias[1]);
}
-
}
if (irq_enable_reg != NULL) {
@@ -1396,7 +1394,6 @@ static ssize_t ath6kl_create_qos_write(struct file *file,
const char __user *user_buf,
size_t count, loff_t *ppos)
{
-
struct ath6kl *ar = file->private_data;
struct ath6kl_vif *vif;
char buf[200];
@@ -1575,7 +1572,6 @@ static ssize_t ath6kl_delete_qos_write(struct file *file,
const char __user *user_buf,
size_t count, loff_t *ppos)
{
-
struct ath6kl *ar = file->private_data;
struct ath6kl_vif *vif;
char buf[100];
diff --git a/drivers/net/wireless/ath/ath6kl/debug.h b/drivers/net/wireless/ath/ath6kl/debug.h
index ca9ba005f287..e194c10d9f00 100644
--- a/drivers/net/wireless/ath/ath6kl/debug.h
+++ b/drivers/net/wireless/ath/ath6kl/debug.h
@@ -97,8 +97,8 @@ static inline void ath6kl_dump_registers(struct ath6kl_device *dev,
struct ath6kl_irq_proc_registers *irq_proc_reg,
struct ath6kl_irq_enable_reg *irq_en_reg)
{
-
}
+
static inline void dump_cred_dist_stats(struct htc_target *target)
{
}
diff --git a/drivers/net/wireless/ath/ath6kl/hif.c b/drivers/net/wireless/ath/ath6kl/hif.c
index fea7709b5dda..18c070850a09 100644
--- a/drivers/net/wireless/ath/ath6kl/hif.c
+++ b/drivers/net/wireless/ath/ath6kl/hif.c
@@ -37,7 +37,6 @@ static int ath6kl_hif_cp_scat_dma_buf(struct hif_scatter_req *req,
buf = req->virt_dma_buf;
for (i = 0; i < req->scat_entries; i++) {
-
if (from_dma)
memcpy(req->scat_list[i].buf, buf,
req->scat_list[i].len);
@@ -116,7 +115,6 @@ static void ath6kl_hif_dump_fw_crash(struct ath6kl *ar)
le32_to_cpu(regdump_val[i + 2]),
le32_to_cpu(regdump_val[i + 3]));
}
-
}
static int ath6kl_hif_proc_dbg_intr(struct ath6kl_device *dev)
@@ -701,5 +699,4 @@ int ath6kl_hif_setup(struct ath6kl_device *dev)
fail_setup:
return status;
-
}
diff --git a/drivers/net/wireless/ath/ath6kl/hif.h b/drivers/net/wireless/ath/ath6kl/hif.h
index 61f6b21fb0ae..dc6bd8cd9b83 100644
--- a/drivers/net/wireless/ath/ath6kl/hif.h
+++ b/drivers/net/wireless/ath/ath6kl/hif.h
@@ -197,9 +197,9 @@ struct hif_scatter_req {
/* bounce buffer for upper layers to copy to/from */
u8 *virt_dma_buf;
- struct hif_scatter_item scat_list[1];
-
u32 scat_q_depth;
+
+ struct hif_scatter_item scat_list[0];
};
struct ath6kl_irq_proc_registers {
diff --git a/drivers/net/wireless/ath/ath6kl/htc_mbox.c b/drivers/net/wireless/ath/ath6kl/htc_mbox.c
index 65e5b719093d..e481f14b9878 100644
--- a/drivers/net/wireless/ath/ath6kl/htc_mbox.c
+++ b/drivers/net/wireless/ath/ath6kl/htc_mbox.c
@@ -112,9 +112,9 @@ static void ath6kl_credit_init(struct ath6kl_htc_credit_info *cred_info,
if (cur_ep_dist->endpoint == ENDPOINT_0)
continue;
- if (cur_ep_dist->svc_id == WMI_CONTROL_SVC)
+ if (cur_ep_dist->svc_id == WMI_CONTROL_SVC) {
cur_ep_dist->cred_norm = cur_ep_dist->cred_per_msg;
- else {
+ } else {
/*
* For the remaining data endpoints, we assume that
* each cred_per_msg are the same. We use a simple
@@ -129,7 +129,6 @@ static void ath6kl_credit_init(struct ath6kl_htc_credit_info *cred_info,
count = (count * 3) >> 2;
count = max(count, cur_ep_dist->cred_per_msg);
cur_ep_dist->cred_norm = count;
-
}
ath6kl_dbg(ATH6KL_DBG_CREDIT,
@@ -549,7 +548,6 @@ static int htc_check_credits(struct htc_target *target,
enum htc_endpoint_id eid, unsigned int len,
int *req_cred)
{
-
*req_cred = (len > target->tgt_cred_sz) ?
DIV_ROUND_UP(len, target->tgt_cred_sz) : 1;
@@ -608,7 +606,6 @@ static void ath6kl_htc_tx_pkts_get(struct htc_target *target,
unsigned int len;
while (true) {
-
flags = 0;
if (list_empty(&endpoint->txq))
@@ -889,7 +886,6 @@ static void ath6kl_htc_tx_from_queue(struct htc_target *target,
ac = target->dev->ar->ep2ac_map[endpoint->eid];
while (true) {
-
if (list_empty(&endpoint->txq))
break;
@@ -1190,7 +1186,6 @@ static void ath6kl_htc_mbox_flush_txep(struct htc_target *target,
list_add_tail(&packet->list, &container);
htc_tx_complete(endpoint, &container);
}
-
}
static void ath6kl_htc_flush_txep_all(struct htc_target *target)
@@ -1394,7 +1389,6 @@ static int ath6kl_htc_rx_setup(struct htc_target *target,
ep_cb = ep->ep_cb;
for (j = 0; j < n_msg; j++) {
-
/*
* Reset flag, any packets allocated using the
* rx_alloc() API cannot be recycled on
@@ -1424,9 +1418,9 @@ static int ath6kl_htc_rx_setup(struct htc_target *target,
}
}
- if (list_empty(&ep->rx_bufq))
+ if (list_empty(&ep->rx_bufq)) {
packet = NULL;
- else {
+ } else {
packet = list_first_entry(&ep->rx_bufq,
struct htc_packet, list);
list_del(&packet->list);
@@ -1487,7 +1481,6 @@ static int ath6kl_htc_rx_alloc(struct htc_target *target,
spin_lock_bh(&target->rx_lock);
for (i = 0; i < msg; i++) {
-
htc_hdr = (struct htc_frame_hdr *)&lk_ahds[i];
if (htc_hdr->eid >= ENDPOINT_MAX) {
@@ -1708,7 +1701,6 @@ static int htc_parse_trailer(struct htc_target *target,
lk_ahd = (struct htc_lookahead_report *) record_buf;
if ((lk_ahd->pre_valid == ((~lk_ahd->post_valid) & 0xFF)) &&
next_lk_ahds) {
-
ath6kl_dbg(ATH6KL_DBG_HTC,
"htc rx lk_ahd found pre_valid 0x%x post_valid 0x%x\n",
lk_ahd->pre_valid, lk_ahd->post_valid);
@@ -1755,7 +1747,6 @@ static int htc_parse_trailer(struct htc_target *target,
}
return 0;
-
}
static int htc_proc_trailer(struct htc_target *target,
@@ -1776,7 +1767,6 @@ static int htc_proc_trailer(struct htc_target *target,
status = 0;
while (len > 0) {
-
if (len < sizeof(struct htc_record_hdr)) {
status = -ENOMEM;
break;
@@ -2098,7 +2088,6 @@ static int ath6kl_htc_rx_fetch(struct htc_target *target,
}
if (!fetched_pkts) {
-
packet = list_first_entry(rx_pktq, struct htc_packet,
list);
@@ -2173,7 +2162,6 @@ int ath6kl_htc_rxmsg_pending_handler(struct htc_target *target,
look_aheads[0] = msg_look_ahead;
while (true) {
-
/*
* First lookahead sets the expected endpoint IDs for all
* packets in a bundle.
@@ -2825,8 +2813,9 @@ static int ath6kl_htc_reset(struct htc_target *target)
packet->buf = packet->buf_start;
packet->endpoint = ENDPOINT_0;
list_add_tail(&packet->list, &target->free_ctrl_rxbuf);
- } else
+ } else {
list_add_tail(&packet->list, &target->free_ctrl_txbuf);
+ }
}
return 0;
diff --git a/drivers/net/wireless/ath/ath6kl/htc_pipe.c b/drivers/net/wireless/ath/ath6kl/htc_pipe.c
index 67aa924ed8b3..756fe52a12c8 100644
--- a/drivers/net/wireless/ath/ath6kl/htc_pipe.c
+++ b/drivers/net/wireless/ath/ath6kl/htc_pipe.c
@@ -137,7 +137,6 @@ static void get_htc_packet_credit_based(struct htc_target *target,
credits_required = 0;
} else {
-
if (ep->cred_dist.credits < credits_required)
break;
@@ -169,7 +168,6 @@ static void get_htc_packet_credit_based(struct htc_target *target,
/* queue this packet into the caller's queue */
list_add_tail(&packet->list, queue);
}
-
}
static void get_htc_packet(struct htc_target *target,
@@ -279,7 +277,6 @@ static int htc_issue_packets(struct htc_target *target,
list_add(&packet->list, pkt_queue);
break;
}
-
}
if (status != 0) {
@@ -385,7 +382,6 @@ static enum htc_send_queue_result htc_try_send(struct htc_target *target,
*/
list_for_each_entry_safe(packet, tmp_pkt,
txq, list) {
-
ath6kl_dbg(ATH6KL_DBG_HTC,
"%s: Indicat overflowed TX pkts: %p\n",
__func__, packet);
@@ -403,7 +399,6 @@ static enum htc_send_queue_result htc_try_send(struct htc_target *target,
list_move_tail(&packet->list,
&send_queue);
}
-
}
if (list_empty(&send_queue)) {
@@ -454,7 +449,6 @@ static enum htc_send_queue_result htc_try_send(struct htc_target *target,
* enough transmit resources.
*/
while (true) {
-
if (get_queue_depth(&ep->txq) == 0)
break;
@@ -495,8 +489,8 @@ static enum htc_send_queue_result htc_try_send(struct htc_target *target,
}
spin_lock_bh(&target->tx_lock);
-
}
+
/* done with this endpoint, we can clear the count */
ep->tx_proc_cnt = 0;
spin_unlock_bh(&target->tx_lock);
@@ -1106,7 +1100,6 @@ free_skb:
dev_kfree_skb(skb);
return status;
-
}
static void htc_flush_rx_queue(struct htc_target *target,
@@ -1258,7 +1251,6 @@ static int ath6kl_htc_pipe_conn_service(struct htc_target *target,
tx_alloc = 0;
} else {
-
tx_alloc = htc_get_credit_alloc(target, conn_req->svc_id);
if (tx_alloc == 0) {
status = -ENOMEM;
diff --git a/drivers/net/wireless/ath/ath6kl/init.c b/drivers/net/wireless/ath/ath6kl/init.c
index 4f316bdcbab5..d5ef211f261c 100644
--- a/drivers/net/wireless/ath/ath6kl/init.c
+++ b/drivers/net/wireless/ath/ath6kl/init.c
@@ -1192,7 +1192,6 @@ static int ath6kl_upload_board_file(struct ath6kl *ar)
if (board_ext_address &&
ar->fw_board_len == (board_data_size + board_ext_data_size)) {
-
/* write extended board data */
ath6kl_dbg(ATH6KL_DBG_BOOT,
"writing extended board data to 0x%x (%d B)\n",
diff --git a/drivers/net/wireless/ath/ath6kl/main.c b/drivers/net/wireless/ath/ath6kl/main.c
index 5839fc23bdc7..d56554674da4 100644
--- a/drivers/net/wireless/ath/ath6kl/main.c
+++ b/drivers/net/wireless/ath/ath6kl/main.c
@@ -571,7 +571,6 @@ void ath6kl_scan_complete_evt(struct ath6kl_vif *vif, int status)
static int ath6kl_commit_ch_switch(struct ath6kl_vif *vif, u16 channel)
{
-
struct ath6kl *ar = vif->ar;
vif->profile.ch = cpu_to_le16(channel);
@@ -600,7 +599,6 @@ static int ath6kl_commit_ch_switch(struct ath6kl_vif *vif, u16 channel)
static void ath6kl_check_ch_switch(struct ath6kl *ar, u16 channel)
{
-
struct ath6kl_vif *vif;
int res = 0;
@@ -692,9 +690,9 @@ void ath6kl_tkip_micerr_event(struct ath6kl_vif *vif, u8 keyid, bool ismcast)
cfg80211_michael_mic_failure(vif->ndev, sta->mac,
NL80211_KEYTYPE_PAIRWISE, keyid,
tsc, GFP_KERNEL);
- } else
+ } else {
ath6kl_cfg80211_tkip_micerr_event(vif, keyid, ismcast);
-
+ }
}
static void ath6kl_update_target_stats(struct ath6kl_vif *vif, u8 *ptr, u32 len)
@@ -1093,8 +1091,9 @@ static int ath6kl_open(struct net_device *dev)
if (test_bit(CONNECTED, &vif->flags)) {
netif_carrier_on(dev);
netif_wake_queue(dev);
- } else
+ } else {
netif_carrier_off(dev);
+ }
return 0;
}
@@ -1146,7 +1145,6 @@ static int ath6kl_set_features(struct net_device *dev,
dev->features = features | NETIF_F_RXCSUM;
return err;
}
-
}
return err;
diff --git a/drivers/net/wireless/ath/ath6kl/sdio.c b/drivers/net/wireless/ath/ath6kl/sdio.c
index 7126bdd4236c..339d89f14d32 100644
--- a/drivers/net/wireless/ath/ath6kl/sdio.c
+++ b/drivers/net/wireless/ath/ath6kl/sdio.c
@@ -348,7 +348,7 @@ static int ath6kl_sdio_alloc_prep_scat_req(struct ath6kl_sdio *ar_sdio,
int i, scat_req_sz, scat_list_sz, size;
u8 *virt_buf;
- scat_list_sz = (n_scat_entry - 1) * sizeof(struct hif_scatter_item);
+ scat_list_sz = n_scat_entry * sizeof(struct hif_scatter_item);
scat_req_sz = sizeof(*s_req) + scat_list_sz;
if (!virt_scat)
@@ -425,8 +425,9 @@ static int ath6kl_sdio_read_write_sync(struct ath6kl *ar, u32 addr, u8 *buf,
memcpy(tbuf, buf, len);
bounced = true;
- } else
+ } else {
tbuf = buf;
+ }
ret = ath6kl_sdio_io(ar_sdio->func, request, addr, tbuf, len);
if ((request & HIF_READ) && bounced)
@@ -441,9 +442,9 @@ static int ath6kl_sdio_read_write_sync(struct ath6kl *ar, u32 addr, u8 *buf,
static void __ath6kl_sdio_write_async(struct ath6kl_sdio *ar_sdio,
struct bus_request *req)
{
- if (req->scat_req)
+ if (req->scat_req) {
ath6kl_sdio_scat_rw(ar_sdio, req);
- else {
+ } else {
void *context;
int status;
@@ -656,7 +657,6 @@ static void ath6kl_sdio_scatter_req_add(struct ath6kl *ar,
list_add_tail(&s_req->list, &ar_sdio->scat_req);
spin_unlock_bh(&ar_sdio->scat_lock);
-
}
/* scatter gather read write request */
@@ -674,9 +674,9 @@ static int ath6kl_sdio_async_rw_scatter(struct ath6kl *ar,
"hif-scatter: total len: %d scatter entries: %d\n",
scat_req->len, scat_req->scat_entries);
- if (request & HIF_SYNCHRONOUS)
+ if (request & HIF_SYNCHRONOUS) {
status = ath6kl_sdio_scat_rw(ar_sdio, scat_req->busrequest);
- else {
+ } else {
spin_lock_bh(&ar_sdio->wr_async_lock);
list_add_tail(&scat_req->busrequest->list, &ar_sdio->wr_asyncq);
spin_unlock_bh(&ar_sdio->wr_async_lock);
@@ -856,7 +856,6 @@ static int ath6kl_sdio_suspend(struct ath6kl *ar, struct cfg80211_wowlan *wow)
if (ar->suspend_mode == WLAN_POWER_STATE_WOW ||
(!ar->suspend_mode && wow)) {
-
ret = ath6kl_set_sdio_pm_caps(ar);
if (ret)
goto cut_pwr;
@@ -878,7 +877,6 @@ static int ath6kl_sdio_suspend(struct ath6kl *ar, struct cfg80211_wowlan *wow)
if (ar->suspend_mode == WLAN_POWER_STATE_DEEP_SLEEP ||
!ar->suspend_mode || try_deepsleep) {
-
flags = sdio_get_host_pm_caps(func);
if (!(flags & MMC_PM_KEEP_POWER))
goto cut_pwr;
@@ -1061,7 +1059,6 @@ static int ath6kl_sdio_bmi_credits(struct ath6kl *ar)
timeout = jiffies + msecs_to_jiffies(BMI_COMMUNICATION_TIMEOUT);
while (time_before(jiffies, timeout) && !ar->bmi.cmd_credits) {
-
/*
* Hit the credit counter with a 4-byte access, the first byte
* read will hit the counter and cause a decrement, while the
diff --git a/drivers/net/wireless/ath/ath6kl/target.h b/drivers/net/wireless/ath/ath6kl/target.h
index a580a629a0da..d5eeeae7711b 100644
--- a/drivers/net/wireless/ath/ath6kl/target.h
+++ b/drivers/net/wireless/ath/ath6kl/target.h
@@ -289,7 +289,7 @@ struct host_interest {
u32 hi_hp_rx_traffic_ratio; /* 0xd8 */
/* test applications flags */
- u32 hi_test_apps_related ; /* 0xdc */
+ u32 hi_test_apps_related; /* 0xdc */
/* location of test script */
u32 hi_ota_testscript; /* 0xe0 */
/* location of CAL data */
diff --git a/drivers/net/wireless/ath/ath6kl/txrx.c b/drivers/net/wireless/ath/ath6kl/txrx.c
index ebb24045a8ae..40432fe7a5d2 100644
--- a/drivers/net/wireless/ath/ath6kl/txrx.c
+++ b/drivers/net/wireless/ath/ath6kl/txrx.c
@@ -125,8 +125,9 @@ static bool ath6kl_process_uapsdq(struct ath6kl_sta *conn,
*flags |= WMI_DATA_HDR_FLAGS_UAPSD;
spin_unlock_bh(&conn->psq_lock);
return false;
- } else if (!conn->apsd_info)
+ } else if (!conn->apsd_info) {
return false;
+ }
if (test_bit(WMM_ENABLED, &vif->flags)) {
ether_type = be16_to_cpu(datap->h_proto);
@@ -316,8 +317,9 @@ int ath6kl_control_tx(void *devt, struct sk_buff *skb,
cookie = NULL;
ath6kl_err("wmi ctrl ep full, dropping pkt : 0x%p, len:%d\n",
skb, skb->len);
- } else
+ } else {
cookie = ath6kl_alloc_cookie(ar);
+ }
if (cookie == NULL) {
spin_unlock_bh(&ar->lock);
@@ -359,7 +361,7 @@ int ath6kl_data_tx(struct sk_buff *skb, struct net_device *dev)
struct ath6kl_vif *vif = netdev_priv(dev);
u32 map_no = 0;
u16 htc_tag = ATH6KL_DATA_PKT_TAG;
- u8 ac = 99 ; /* initialize to unmapped ac */
+ u8 ac = 99; /* initialize to unmapped ac */
bool chk_adhoc_ps_mapping = false;
int ret;
struct wmi_tx_meta_v2 meta_v2;
@@ -449,8 +451,9 @@ int ath6kl_data_tx(struct sk_buff *skb, struct net_device *dev)
if (ret)
goto fail_tx;
}
- } else
+ } else {
goto fail_tx;
+ }
spin_lock_bh(&ar->lock);
@@ -702,7 +705,6 @@ void ath6kl_tx_complete(struct htc_target *target,
/* reap completed packets */
while (!list_empty(packet_queue)) {
-
packet = list_first_entry(packet_queue, struct htc_packet,
list);
list_del(&packet->list);
@@ -1089,8 +1091,9 @@ static void aggr_deque_frms(struct aggr_info_conn *agg_conn, u8 tid,
else
skb_queue_tail(&rxtid->q, node->skb);
node->skb = NULL;
- } else
+ } else {
stats->num_hole++;
+ }
rxtid->seq_next = ATH6KL_NEXT_SEQ_NO(rxtid->seq_next);
idx = AGGR_WIN_IDX(rxtid->seq_next, rxtid->hold_q_sz);
@@ -1211,7 +1214,7 @@ static bool aggr_process_recv_frm(struct aggr_info_conn *agg_conn, u8 tid,
return is_queued;
spin_lock_bh(&rxtid->lock);
- for (idx = 0 ; idx < rxtid->hold_q_sz; idx++) {
+ for (idx = 0; idx < rxtid->hold_q_sz; idx++) {
if (rxtid->hold_q[idx].skb) {
/*
* There is a frame in the queue and no
@@ -1265,7 +1268,6 @@ static void ath6kl_uapsd_trigger_frame_rx(struct ath6kl_vif *vif,
is_apsdq_empty_at_start = is_apsdq_empty;
while ((!is_apsdq_empty) && (num_frames_to_deliver)) {
-
spin_lock_bh(&conn->psq_lock);
skb = skb_dequeue(&conn->apsdq);
is_apsdq_empty = skb_queue_empty(&conn->apsdq);
@@ -1606,16 +1608,18 @@ void ath6kl_rx(struct htc_target *target, struct htc_packet *packet)
if (!conn)
return;
aggr_conn = conn->aggr_conn;
- } else
+ } else {
aggr_conn = vif->aggr_cntxt->aggr_conn;
+ }
if (aggr_process_recv_frm(aggr_conn, tid, seq_no,
is_amsdu, skb)) {
/* aggregation code will handle the skb */
return;
}
- } else if (!is_broadcast_ether_addr(datap->h_dest))
+ } else if (!is_broadcast_ether_addr(datap->h_dest)) {
vif->net_stats.multicast++;
+ }
ath6kl_deliver_frames_to_nw_stack(vif->ndev, skb);
}
@@ -1710,8 +1714,9 @@ void aggr_recv_addba_req_evt(struct ath6kl_vif *vif, u8 tid_mux, u16 seq_no,
sta = ath6kl_find_sta_by_aid(vif->ar, aid);
if (sta)
aggr_conn = sta->aggr_conn;
- } else
+ } else {
aggr_conn = vif->aggr_cntxt->aggr_conn;
+ }
if (!aggr_conn)
return;
@@ -1766,7 +1771,6 @@ void aggr_conn_init(struct ath6kl_vif *vif, struct aggr_info *aggr_info,
skb_queue_head_init(&rxtid->q);
spin_lock_init(&rxtid->lock);
}
-
}
struct aggr_info *aggr_init(struct ath6kl_vif *vif)
@@ -1806,8 +1810,9 @@ void aggr_recv_delba_req_evt(struct ath6kl_vif *vif, u8 tid_mux)
sta = ath6kl_find_sta_by_aid(vif->ar, aid);
if (sta)
aggr_conn = sta->aggr_conn;
- } else
+ } else {
aggr_conn = vif->aggr_cntxt->aggr_conn;
+ }
if (!aggr_conn)
return;
diff --git a/drivers/net/wireless/ath/ath6kl/usb.c b/drivers/net/wireless/ath/ath6kl/usb.c
index 56c3fd5cef65..3afc5a463d06 100644
--- a/drivers/net/wireless/ath/ath6kl/usb.c
+++ b/drivers/net/wireless/ath/ath6kl/usb.c
@@ -236,7 +236,6 @@ static void ath6kl_usb_free_pipe_resources(struct ath6kl_usb_pipe *pipe)
break;
kfree(urb_context);
}
-
}
static void ath6kl_usb_cleanup_pipe_resources(struct ath6kl_usb *ar_usb)
@@ -245,7 +244,6 @@ static void ath6kl_usb_cleanup_pipe_resources(struct ath6kl_usb *ar_usb)
for (i = 0; i < ATH6KL_USB_PIPE_MAX; i++)
ath6kl_usb_free_pipe_resources(&ar_usb->pipes[i]);
-
}
static u8 ath6kl_usb_get_logical_pipe_num(struct ath6kl_usb *ar_usb,
diff --git a/drivers/net/wireless/ath/ath6kl/wmi.c b/drivers/net/wireless/ath/ath6kl/wmi.c
index 8b4ce28e3ce8..4d7f9e4712e9 100644
--- a/drivers/net/wireless/ath/ath6kl/wmi.c
+++ b/drivers/net/wireless/ath/ath6kl/wmi.c
@@ -289,8 +289,9 @@ int ath6kl_wmi_implicit_create_pstream(struct wmi *wmi, u8 if_idx,
ath6kl_wmi_determine_user_priority(((u8 *) llc_hdr) +
sizeof(struct ath6kl_llc_snap_hdr),
layer2_priority);
- } else
+ } else {
usr_pri = layer2_priority & 0x7;
+ }
/*
* Queue the EAPOL frames in the same WMM_AC_VO queue
@@ -359,8 +360,9 @@ int ath6kl_wmi_dot11_hdr_remove(struct wmi *wmi, struct sk_buff *skb)
hdr_size = roundup(sizeof(struct ieee80211_qos_hdr),
sizeof(u32));
skb_pull(skb, hdr_size);
- } else if (sub_type == cpu_to_le16(IEEE80211_STYPE_DATA))
+ } else if (sub_type == cpu_to_le16(IEEE80211_STYPE_DATA)) {
skb_pull(skb, sizeof(struct ieee80211_hdr_3addr));
+ }
datap = skb->data;
llc_hdr = (struct ath6kl_llc_snap_hdr *)(datap);
@@ -936,7 +938,6 @@ ath6kl_regd_find_country_by_rd(u16 regdmn)
static void ath6kl_wmi_regdomain_event(struct wmi *wmi, u8 *datap, int len)
{
-
struct ath6kl_wmi_regdomain *ev;
struct country_code_to_enum_rd *country = NULL;
struct reg_dmn_pair_mapping *regpair = NULL;
@@ -946,10 +947,9 @@ static void ath6kl_wmi_regdomain_event(struct wmi *wmi, u8 *datap, int len)
ev = (struct ath6kl_wmi_regdomain *) datap;
reg_code = le32_to_cpu(ev->reg_code);
- if ((reg_code >> ATH6KL_COUNTRY_RD_SHIFT) & COUNTRY_ERD_FLAG)
+ if ((reg_code >> ATH6KL_COUNTRY_RD_SHIFT) & COUNTRY_ERD_FLAG) {
country = ath6kl_regd_find_country((u16) reg_code);
- else if (!(((u16) reg_code & WORLD_SKU_MASK) == WORLD_SKU_PREFIX)) {
-
+ } else if (!(((u16) reg_code & WORLD_SKU_MASK) == WORLD_SKU_PREFIX)) {
regpair = ath6kl_get_regpair((u16) reg_code);
country = ath6kl_regd_find_country_by_rd((u16) reg_code);
if (regpair)
@@ -1499,7 +1499,6 @@ static int ath6kl_wmi_cac_event_rx(struct wmi *wmi, u8 *datap, int len,
if ((reply->cac_indication == CAC_INDICATION_ADMISSION_RESP) &&
(reply->status_code != IEEE80211_TSPEC_STATUS_ADMISS_ACCEPTED)) {
-
ts = (struct ieee80211_tspec_ie *) &(reply->tspec_suggestion);
tsinfo = le16_to_cpu(ts->tsinfo);
tsid = (tsinfo >> IEEE80211_WMM_IE_TSPEC_TID_SHIFT) &
@@ -1530,7 +1529,6 @@ static int ath6kl_wmi_cac_event_rx(struct wmi *wmi, u8 *datap, int len,
* for delete qos stream from AP
*/
else if (reply->cac_indication == CAC_INDICATION_DELETE) {
-
ts = (struct ieee80211_tspec_ie *) &(reply->tspec_suggestion);
tsinfo = le16_to_cpu(ts->tsinfo);
ts_id = ((tsinfo >> IEEE80211_WMM_IE_TSPEC_TID_SHIFT) &
@@ -2322,7 +2320,7 @@ int ath6kl_wmi_addkey_cmd(struct wmi *wmi, u8 if_idx, u8 key_index,
return ret;
}
-int ath6kl_wmi_add_krk_cmd(struct wmi *wmi, u8 if_idx, u8 *krk)
+int ath6kl_wmi_add_krk_cmd(struct wmi *wmi, u8 if_idx, const u8 *krk)
{
struct sk_buff *skb;
struct wmi_add_krk_cmd *cmd;
@@ -2479,7 +2477,6 @@ static int ath6kl_wmi_sync_point(struct wmi *wmi, u8 if_idx)
goto free_data_skb;
for (index = 0; index < num_pri_streams; index++) {
-
if (WARN_ON(!data_sync_bufs[index].skb))
goto free_data_skb;
@@ -2704,7 +2701,6 @@ static void ath6kl_wmi_relinquish_implicit_pstream_credits(struct wmi *wmi)
for (i = 0; i < WMM_NUM_AC; i++) {
if (stream_exist & (1 << i)) {
-
/*
* FIXME: Is this lock & unlock inside
* for loop correct? may need rework.
@@ -2870,8 +2866,9 @@ int ath6kl_wmi_set_host_sleep_mode_cmd(struct wmi *wmi, u8 if_idx,
if (host_mode == ATH6KL_HOST_MODE_ASLEEP) {
ath6kl_wmi_relinquish_implicit_pstream_credits(wmi);
cmd->asleep = cpu_to_le32(1);
- } else
+ } else {
cmd->awake = cpu_to_le32(1);
+ }
ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb,
WMI_SET_HOST_SLEEP_MODE_CMDID,
diff --git a/drivers/net/wireless/ath/ath6kl/wmi.h b/drivers/net/wireless/ath/ath6kl/wmi.h
index 5c702ae4d9f8..bb23fc00111d 100644
--- a/drivers/net/wireless/ath/ath6kl/wmi.h
+++ b/drivers/net/wireless/ath/ath6kl/wmi.h
@@ -898,7 +898,6 @@ struct wmi_start_scan_cmd {
* flags here
*/
enum wmi_scan_ctrl_flags_bits {
-
/* set if can scan in the connect cmd */
CONNECT_SCAN_CTRL_FLAGS = 0x01,
@@ -2617,7 +2616,7 @@ int ath6kl_wmi_addkey_cmd(struct wmi *wmi, u8 if_idx, u8 key_index,
u8 *key_material,
u8 key_op_ctrl, u8 *mac_addr,
enum wmi_sync_flag sync_flag);
-int ath6kl_wmi_add_krk_cmd(struct wmi *wmi, u8 if_idx, u8 *krk);
+int ath6kl_wmi_add_krk_cmd(struct wmi *wmi, u8 if_idx, const u8 *krk);
int ath6kl_wmi_deletekey_cmd(struct wmi *wmi, u8 if_idx, u8 key_index);
int ath6kl_wmi_setpmkid_cmd(struct wmi *wmi, u8 if_idx, const u8 *bssid,
const u8 *pmkid, bool set);
diff --git a/drivers/net/wireless/ath/ath9k/Makefile b/drivers/net/wireless/ath/ath9k/Makefile
index 8e1c7b0fe76c..8fcd586d1c39 100644
--- a/drivers/net/wireless/ath/ath9k/Makefile
+++ b/drivers/net/wireless/ath/ath9k/Makefile
@@ -53,7 +53,8 @@ obj-$(CONFIG_ATH9K_HW) += ath9k_hw.o
obj-$(CONFIG_ATH9K_COMMON) += ath9k_common.o
ath9k_common-y:= common.o \
common-init.o \
- common-beacon.o
+ common-beacon.o \
+ common-debug.o
ath9k_htc-y += htc_hst.o \
hif_usb.o \
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h b/drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h
index 0a6163e9248c..c38399bc9aa9 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h
+++ b/drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h
@@ -410,7 +410,7 @@ static const u32 ar9300_2p2_baseband_core[][2] = {
{0x00009e30, 0x06336f77},
{0x00009e34, 0x6af6532f},
{0x00009e38, 0x0cc80c00},
- {0x00009e40, 0x0d261820},
+ {0x00009e40, 0x0d261800},
{0x00009e4c, 0x00001004},
{0x00009e50, 0x00ff03f1},
{0x00009e54, 0x00000000},
diff --git a/drivers/net/wireless/ath/ath9k/ar9330_1p1_initvals.h b/drivers/net/wireless/ath/ath9k/ar9330_1p1_initvals.h
index f76139bbb74f..2c42ff05efa3 100644
--- a/drivers/net/wireless/ath/ath9k/ar9330_1p1_initvals.h
+++ b/drivers/net/wireless/ath/ath9k/ar9330_1p1_initvals.h
@@ -592,7 +592,7 @@ static const u32 ar9331_1p1_baseband_core[][2] = {
{0x00009e30, 0x06336f77},
{0x00009e34, 0x6af6532f},
{0x00009e38, 0x0cc80c00},
- {0x00009e40, 0x0d261820},
+ {0x00009e40, 0x0d261800},
{0x00009e4c, 0x00001004},
{0x00009e50, 0x00ff03f1},
{0x00009fc0, 0x803e4788},
diff --git a/drivers/net/wireless/ath/ath9k/ar9330_1p2_initvals.h b/drivers/net/wireless/ath/ath9k/ar9330_1p2_initvals.h
index 0ac8be96097f..2154efcd3900 100644
--- a/drivers/net/wireless/ath/ath9k/ar9330_1p2_initvals.h
+++ b/drivers/net/wireless/ath/ath9k/ar9330_1p2_initvals.h
@@ -231,7 +231,7 @@ static const u32 ar9331_1p2_baseband_core[][2] = {
{0x00009e30, 0x06336f77},
{0x00009e34, 0x6af6532f},
{0x00009e38, 0x0cc80c00},
- {0x00009e40, 0x0d261820},
+ {0x00009e40, 0x0d261800},
{0x00009e4c, 0x00001004},
{0x00009e50, 0x00ff03f1},
{0x00009fc0, 0x803e4788},
diff --git a/drivers/net/wireless/ath/ath9k/ar9340_initvals.h b/drivers/net/wireless/ath/ath9k/ar9340_initvals.h
index a01f0edb6518..b995ffe88b33 100644
--- a/drivers/net/wireless/ath/ath9k/ar9340_initvals.h
+++ b/drivers/net/wireless/ath/ath9k/ar9340_initvals.h
@@ -318,7 +318,7 @@ static const u32 ar9340_1p0_baseband_core[][2] = {
{0x00009e30, 0x06336f77},
{0x00009e34, 0x6af6532f},
{0x00009e38, 0x0cc80c00},
- {0x00009e40, 0x0d261820},
+ {0x00009e40, 0x0d261800},
{0x00009e4c, 0x00001004},
{0x00009e50, 0x00ff03f1},
{0x00009e54, 0x00000000},
@@ -348,9 +348,9 @@ static const u32 ar9340_1p0_baseband_core[][2] = {
{0x0000a370, 0x00000000},
{0x0000a390, 0x00000001},
{0x0000a394, 0x00000444},
- {0x0000a398, 0x00000000},
- {0x0000a39c, 0x210d0401},
- {0x0000a3a0, 0xab9a7144},
+ {0x0000a398, 0x001f0e0f},
+ {0x0000a39c, 0x0075393f},
+ {0x0000a3a0, 0xb79f6427},
{0x0000a3a4, 0x00000000},
{0x0000a3a8, 0xaaaaaaaa},
{0x0000a3ac, 0x3c466478},
diff --git a/drivers/net/wireless/ath/ath9k/ar953x_initvals.h b/drivers/net/wireless/ath/ath9k/ar953x_initvals.h
index 3c9113d9b1bc..8e5c3b9786e3 100644
--- a/drivers/net/wireless/ath/ath9k/ar953x_initvals.h
+++ b/drivers/net/wireless/ath/ath9k/ar953x_initvals.h
@@ -257,9 +257,9 @@ static const u32 qca953x_1p0_baseband_core[][2] = {
{0x0000a370, 0x00000000},
{0x0000a390, 0x00000001},
{0x0000a394, 0x00000444},
- {0x0000a398, 0x1f020503},
- {0x0000a39c, 0x29180c03},
- {0x0000a3a0, 0x9a8b6844},
+ {0x0000a398, 0x001f0e0f},
+ {0x0000a39c, 0x0075393f},
+ {0x0000a3a0, 0xb79f6427},
{0x0000a3a4, 0x000000ff},
{0x0000a3a8, 0x6a6a6a6a},
{0x0000a3ac, 0x6a6a6a6a},
diff --git a/drivers/net/wireless/ath/ath9k/ar9580_1p0_initvals.h b/drivers/net/wireless/ath/ath9k/ar9580_1p0_initvals.h
index e6aec2c0207f..a5ca65240af3 100644
--- a/drivers/net/wireless/ath/ath9k/ar9580_1p0_initvals.h
+++ b/drivers/net/wireless/ath/ath9k/ar9580_1p0_initvals.h
@@ -90,7 +90,7 @@ static const u32 ar9580_1p0_baseband_core[][2] = {
{0x00009e30, 0x06336f77},
{0x00009e34, 0x6af6532f},
{0x00009e38, 0x0cc80c00},
- {0x00009e40, 0x0d261820},
+ {0x00009e40, 0x0d261800},
{0x00009e4c, 0x00001004},
{0x00009e50, 0x00ff03f1},
{0x00009e54, 0x00000000},
diff --git a/drivers/net/wireless/ath/ath9k/ath9k.h b/drivers/net/wireless/ath/ath9k/ath9k.h
index 3ba03dde4215..2ca8f7e06174 100644
--- a/drivers/net/wireless/ath/ath9k/ath9k.h
+++ b/drivers/net/wireless/ath/ath9k/ath9k.h
@@ -23,8 +23,8 @@
#include <linux/leds.h>
#include <linux/completion.h>
-#include "debug.h"
#include "common.h"
+#include "debug.h"
#include "mci.h"
#include "dfs.h"
#include "spectral.h"
@@ -114,6 +114,9 @@ int ath_descdma_setup(struct ath_softc *sc, struct ath_descdma *dd,
#define ATH_TXFIFO_DEPTH 8
#define ATH_TX_ERROR 0x01
+/* Stop tx traffic 1ms before the GO goes away */
+#define ATH_P2P_PS_STOP_TIME 1000
+
#define IEEE80211_SEQ_SEQ_SHIFT 4
#define IEEE80211_SEQ_MAX 4096
#define IEEE80211_WEP_IVLEN 3
@@ -271,6 +274,7 @@ struct ath_node {
#ifdef CONFIG_ATH9K_STATION_STATISTICS
struct ath_rx_rate_stats rx_rate_stats;
#endif
+ u8 key_idx[4];
};
struct ath_tx_control {
@@ -366,11 +370,15 @@ void ath9k_release_buffered_frames(struct ieee80211_hw *hw,
/********/
struct ath_vif {
+ struct ieee80211_vif *vif;
struct ath_node mcast_node;
int av_bslot;
bool primary_sta_vif;
__le64 tsf_adjust; /* TSF adjustment for staggered beacons */
struct ath_buf *av_bcbuf;
+
+ /* P2P Client */
+ struct ieee80211_noa_data noa;
};
struct ath9k_vif_iter_data {
@@ -463,6 +471,8 @@ int ath_update_survey_stats(struct ath_softc *sc);
void ath_update_survey_nf(struct ath_softc *sc, int channel);
void ath9k_queue_reset(struct ath_softc *sc, enum ath_reset_type type);
void ath_ps_full_sleep(unsigned long data);
+void ath9k_p2p_ps_timer(void *priv);
+void ath9k_update_p2p_ps(struct ath_softc *sc, struct ieee80211_vif *vif);
/**********/
/* BTCOEX */
@@ -713,6 +723,9 @@ struct ath_softc {
struct completion paprd_complete;
wait_queue_head_t tx_wait;
+ struct ath_gen_timer *p2p_ps_timer;
+ struct ath_vif *p2p_ps_vif;
+
unsigned long driver_data;
u8 gtt_cnt;
@@ -757,6 +770,7 @@ struct ath_softc {
struct ath_ant_comb ant_comb;
u8 ant_tx, ant_rx;
struct dfs_pattern_detector *dfs_detector;
+ u64 dfs_prev_pulse_ts;
u32 wow_enabled;
/* relay(fs) channel for spectral scan */
struct rchan *rfs_chan_spec_scan;
diff --git a/drivers/net/wireless/ath/ath9k/beacon.c b/drivers/net/wireless/ath/ath9k/beacon.c
index bd9e634879e6..e387f0b2954a 100644
--- a/drivers/net/wireless/ath/ath9k/beacon.c
+++ b/drivers/net/wireless/ath/ath9k/beacon.c
@@ -537,8 +537,6 @@ static void ath9k_cache_beacon_config(struct ath_softc *sc,
cur_conf->dtim_period = bss_conf->dtim_period;
cur_conf->dtim_count = 1;
cur_conf->ibss_creator = bss_conf->ibss_creator;
- cur_conf->bmiss_timeout =
- ATH_DEFAULT_BMISS_LIMIT * cur_conf->beacon_interval;
/*
* It looks like mac80211 may end up using beacon interval of zero in
@@ -549,6 +547,9 @@ static void ath9k_cache_beacon_config(struct ath_softc *sc,
if (cur_conf->beacon_interval == 0)
cur_conf->beacon_interval = 100;
+ cur_conf->bmiss_timeout =
+ ATH_DEFAULT_BMISS_LIMIT * cur_conf->beacon_interval;
+
/*
* We don't parse dtim period from mac80211 during the driver
* initialization as it breaks association with hidden-ssid
diff --git a/drivers/net/wireless/ath/ath9k/common-debug.c b/drivers/net/wireless/ath/ath9k/common-debug.c
new file mode 100644
index 000000000000..3b289f933405
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/common-debug.c
@@ -0,0 +1,253 @@
+/*
+ * Copyright (c) 2008-2011 Atheros Communications Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "common.h"
+
+static ssize_t read_file_modal_eeprom(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath_hw *ah = file->private_data;
+ u32 len = 0, size = 6000;
+ char *buf;
+ size_t retval;
+
+ buf = kzalloc(size, GFP_KERNEL);
+ if (buf == NULL)
+ return -ENOMEM;
+
+ len = ah->eep_ops->dump_eeprom(ah, false, buf, len, size);
+
+ retval = simple_read_from_buffer(user_buf, count, ppos, buf, len);
+ kfree(buf);
+
+ return retval;
+}
+
+static const struct file_operations fops_modal_eeprom = {
+ .read = read_file_modal_eeprom,
+ .open = simple_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+
+void ath9k_cmn_debug_modal_eeprom(struct dentry *debugfs_phy,
+ struct ath_hw *ah)
+{
+ debugfs_create_file("modal_eeprom", S_IRUSR, debugfs_phy, ah,
+ &fops_modal_eeprom);
+}
+EXPORT_SYMBOL(ath9k_cmn_debug_modal_eeprom);
+
+static ssize_t read_file_base_eeprom(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath_hw *ah = file->private_data;
+ u32 len = 0, size = 1500;
+ ssize_t retval = 0;
+ char *buf;
+
+ buf = kzalloc(size, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ len = ah->eep_ops->dump_eeprom(ah, true, buf, len, size);
+
+ retval = simple_read_from_buffer(user_buf, count, ppos, buf, len);
+ kfree(buf);
+
+ return retval;
+}
+
+static const struct file_operations fops_base_eeprom = {
+ .read = read_file_base_eeprom,
+ .open = simple_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+void ath9k_cmn_debug_base_eeprom(struct dentry *debugfs_phy,
+ struct ath_hw *ah)
+{
+ debugfs_create_file("base_eeprom", S_IRUSR, debugfs_phy, ah,
+ &fops_base_eeprom);
+}
+EXPORT_SYMBOL(ath9k_cmn_debug_base_eeprom);
+
+void ath9k_cmn_debug_stat_rx(struct ath_rx_stats *rxstats,
+ struct ath_rx_status *rs)
+{
+#define RX_PHY_ERR_INC(c) rxstats->phy_err_stats[c]++
+#define RX_CMN_STAT_INC(c) (rxstats->c++)
+
+ RX_CMN_STAT_INC(rx_pkts_all);
+ rxstats->rx_bytes_all += rs->rs_datalen;
+
+ if (rs->rs_status & ATH9K_RXERR_CRC)
+ RX_CMN_STAT_INC(crc_err);
+ if (rs->rs_status & ATH9K_RXERR_DECRYPT)
+ RX_CMN_STAT_INC(decrypt_crc_err);
+ if (rs->rs_status & ATH9K_RXERR_MIC)
+ RX_CMN_STAT_INC(mic_err);
+ if (rs->rs_status & ATH9K_RX_DELIM_CRC_PRE)
+ RX_CMN_STAT_INC(pre_delim_crc_err);
+ if (rs->rs_status & ATH9K_RX_DELIM_CRC_POST)
+ RX_CMN_STAT_INC(post_delim_crc_err);
+ if (rs->rs_status & ATH9K_RX_DECRYPT_BUSY)
+ RX_CMN_STAT_INC(decrypt_busy_err);
+
+ if (rs->rs_status & ATH9K_RXERR_PHY) {
+ RX_CMN_STAT_INC(phy_err);
+ if (rs->rs_phyerr < ATH9K_PHYERR_MAX)
+ RX_PHY_ERR_INC(rs->rs_phyerr);
+ }
+
+#undef RX_CMN_STAT_INC
+#undef RX_PHY_ERR_INC
+}
+EXPORT_SYMBOL(ath9k_cmn_debug_stat_rx);
+
+static ssize_t read_file_recv(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+#define RXS_ERR(s, e) \
+ do { \
+ len += scnprintf(buf + len, size - len, \
+ "%18s : %10u\n", s, \
+ rxstats->e); \
+ } while (0)
+
+ struct ath_rx_stats *rxstats = file->private_data;
+ char *buf;
+ unsigned int len = 0, size = 1600;
+ ssize_t retval = 0;
+
+ buf = kzalloc(size, GFP_KERNEL);
+ if (buf == NULL)
+ return -ENOMEM;
+
+ RXS_ERR("PKTS-ALL", rx_pkts_all);
+ RXS_ERR("BYTES-ALL", rx_bytes_all);
+ RXS_ERR("BEACONS", rx_beacons);
+ RXS_ERR("FRAGS", rx_frags);
+ RXS_ERR("SPECTRAL", rx_spectral);
+
+ RXS_ERR("CRC ERR", crc_err);
+ RXS_ERR("DECRYPT CRC ERR", decrypt_crc_err);
+ RXS_ERR("PHY ERR", phy_err);
+ RXS_ERR("MIC ERR", mic_err);
+ RXS_ERR("PRE-DELIM CRC ERR", pre_delim_crc_err);
+ RXS_ERR("POST-DELIM CRC ERR", post_delim_crc_err);
+ RXS_ERR("DECRYPT BUSY ERR", decrypt_busy_err);
+ RXS_ERR("LENGTH-ERR", rx_len_err);
+ RXS_ERR("OOM-ERR", rx_oom_err);
+ RXS_ERR("RATE-ERR", rx_rate_err);
+ RXS_ERR("TOO-MANY-FRAGS", rx_too_many_frags_err);
+
+ if (len > size)
+ len = size;
+
+ retval = simple_read_from_buffer(user_buf, count, ppos, buf, len);
+ kfree(buf);
+
+ return retval;
+
+#undef RXS_ERR
+}
+
+static const struct file_operations fops_recv = {
+ .read = read_file_recv,
+ .open = simple_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+void ath9k_cmn_debug_recv(struct dentry *debugfs_phy,
+ struct ath_rx_stats *rxstats)
+{
+ debugfs_create_file("recv", S_IRUSR, debugfs_phy, rxstats,
+ &fops_recv);
+}
+EXPORT_SYMBOL(ath9k_cmn_debug_recv);
+
+static ssize_t read_file_phy_err(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+#define PHY_ERR(s, p) \
+ len += scnprintf(buf + len, size - len, "%22s : %10u\n", s, \
+ rxstats->phy_err_stats[p]);
+
+ struct ath_rx_stats *rxstats = file->private_data;
+ char *buf;
+ unsigned int len = 0, size = 1600;
+ ssize_t retval = 0;
+
+ buf = kzalloc(size, GFP_KERNEL);
+ if (buf == NULL)
+ return -ENOMEM;
+
+ PHY_ERR("UNDERRUN ERR", ATH9K_PHYERR_UNDERRUN);
+ PHY_ERR("TIMING ERR", ATH9K_PHYERR_TIMING);
+ PHY_ERR("PARITY ERR", ATH9K_PHYERR_PARITY);
+ PHY_ERR("RATE ERR", ATH9K_PHYERR_RATE);
+ PHY_ERR("LENGTH ERR", ATH9K_PHYERR_LENGTH);
+ PHY_ERR("RADAR ERR", ATH9K_PHYERR_RADAR);
+ PHY_ERR("SERVICE ERR", ATH9K_PHYERR_SERVICE);
+ PHY_ERR("TOR ERR", ATH9K_PHYERR_TOR);
+ PHY_ERR("OFDM-TIMING ERR", ATH9K_PHYERR_OFDM_TIMING);
+ PHY_ERR("OFDM-SIGNAL-PARITY ERR", ATH9K_PHYERR_OFDM_SIGNAL_PARITY);
+ PHY_ERR("OFDM-RATE ERR", ATH9K_PHYERR_OFDM_RATE_ILLEGAL);
+ PHY_ERR("OFDM-LENGTH ERR", ATH9K_PHYERR_OFDM_LENGTH_ILLEGAL);
+ PHY_ERR("OFDM-POWER-DROP ERR", ATH9K_PHYERR_OFDM_POWER_DROP);
+ PHY_ERR("OFDM-SERVICE ERR", ATH9K_PHYERR_OFDM_SERVICE);
+ PHY_ERR("OFDM-RESTART ERR", ATH9K_PHYERR_OFDM_RESTART);
+ PHY_ERR("FALSE-RADAR-EXT ERR", ATH9K_PHYERR_FALSE_RADAR_EXT);
+ PHY_ERR("CCK-TIMING ERR", ATH9K_PHYERR_CCK_TIMING);
+ PHY_ERR("CCK-HEADER-CRC ERR", ATH9K_PHYERR_CCK_HEADER_CRC);
+ PHY_ERR("CCK-RATE ERR", ATH9K_PHYERR_CCK_RATE_ILLEGAL);
+ PHY_ERR("CCK-SERVICE ERR", ATH9K_PHYERR_CCK_SERVICE);
+ PHY_ERR("CCK-RESTART ERR", ATH9K_PHYERR_CCK_RESTART);
+ PHY_ERR("CCK-LENGTH ERR", ATH9K_PHYERR_CCK_LENGTH_ILLEGAL);
+ PHY_ERR("CCK-POWER-DROP ERR", ATH9K_PHYERR_CCK_POWER_DROP);
+ PHY_ERR("HT-CRC ERR", ATH9K_PHYERR_HT_CRC_ERROR);
+ PHY_ERR("HT-LENGTH ERR", ATH9K_PHYERR_HT_LENGTH_ILLEGAL);
+ PHY_ERR("HT-RATE ERR", ATH9K_PHYERR_HT_RATE_ILLEGAL);
+
+ if (len > size)
+ len = size;
+
+ retval = simple_read_from_buffer(user_buf, count, ppos, buf, len);
+ kfree(buf);
+
+ return retval;
+
+#undef PHY_ERR
+}
+
+static const struct file_operations fops_phy_err = {
+ .read = read_file_phy_err,
+ .open = simple_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+void ath9k_cmn_debug_phy_err(struct dentry *debugfs_phy,
+ struct ath_rx_stats *rxstats)
+{
+ debugfs_create_file("phy_err", S_IRUSR, debugfs_phy, rxstats,
+ &fops_phy_err);
+}
+EXPORT_SYMBOL(ath9k_cmn_debug_phy_err);
diff --git a/drivers/net/wireless/ath/ath9k/common-debug.h b/drivers/net/wireless/ath/ath9k/common-debug.h
new file mode 100644
index 000000000000..7c9788490f7f
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/common-debug.h
@@ -0,0 +1,72 @@
+/*
+ * Copyright (c) 2008-2011 Atheros Communications Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+
+
+/**
+ * struct ath_rx_stats - RX Statistics
+ * @rx_pkts_all: No. of total frames received, including ones that
+ may have had errors.
+ * @rx_bytes_all: No. of total bytes received, including ones that
+ may have had errors.
+ * @crc_err: No. of frames with incorrect CRC value
+ * @decrypt_crc_err: No. of frames whose CRC check failed after
+ decryption process completed
+ * @phy_err: No. of frames whose reception failed because the PHY
+ encountered an error
+ * @mic_err: No. of frames with incorrect TKIP MIC verification failure
+ * @pre_delim_crc_err: Pre-Frame delimiter CRC error detections
+ * @post_delim_crc_err: Post-Frame delimiter CRC error detections
+ * @decrypt_busy_err: Decryption interruptions counter
+ * @phy_err_stats: Individual PHY error statistics
+ * @rx_len_err: No. of frames discarded due to bad length.
+ * @rx_oom_err: No. of frames dropped due to OOM issues.
+ * @rx_rate_err: No. of frames dropped due to rate errors.
+ * @rx_too_many_frags_err: Frames dropped due to too-many-frags received.
+ * @rx_beacons: No. of beacons received.
+ * @rx_frags: No. of rx-fragements received.
+ * @rx_spectral: No of spectral packets received.
+ */
+struct ath_rx_stats {
+ u32 rx_pkts_all;
+ u32 rx_bytes_all;
+ u32 crc_err;
+ u32 decrypt_crc_err;
+ u32 phy_err;
+ u32 mic_err;
+ u32 pre_delim_crc_err;
+ u32 post_delim_crc_err;
+ u32 decrypt_busy_err;
+ u32 phy_err_stats[ATH9K_PHYERR_MAX];
+ u32 rx_len_err;
+ u32 rx_oom_err;
+ u32 rx_rate_err;
+ u32 rx_too_many_frags_err;
+ u32 rx_beacons;
+ u32 rx_frags;
+ u32 rx_spectral;
+};
+
+void ath9k_cmn_debug_modal_eeprom(struct dentry *debugfs_phy,
+ struct ath_hw *ah);
+void ath9k_cmn_debug_base_eeprom(struct dentry *debugfs_phy,
+ struct ath_hw *ah);
+void ath9k_cmn_debug_stat_rx(struct ath_rx_stats *rxstats,
+ struct ath_rx_status *rs);
+void ath9k_cmn_debug_recv(struct dentry *debugfs_phy,
+ struct ath_rx_stats *rxstats);
+void ath9k_cmn_debug_phy_err(struct dentry *debugfs_phy,
+ struct ath_rx_stats *rxstats);
diff --git a/drivers/net/wireless/ath/ath9k/common.h b/drivers/net/wireless/ath/ath9k/common.h
index ca38116838f0..ffc454b18637 100644
--- a/drivers/net/wireless/ath/ath9k/common.h
+++ b/drivers/net/wireless/ath/ath9k/common.h
@@ -23,6 +23,7 @@
#include "common-init.h"
#include "common-beacon.h"
+#include "common-debug.h"
/* Common header for Atheros 802.11n base driver cores */
diff --git a/drivers/net/wireless/ath/ath9k/debug.c b/drivers/net/wireless/ath/ath9k/debug.c
index 780ff1bee6f6..6cc42be48d4e 100644
--- a/drivers/net/wireless/ath/ath9k/debug.c
+++ b/drivers/net/wireless/ath/ath9k/debug.c
@@ -948,151 +948,11 @@ static const struct file_operations fops_reset = {
.llseek = default_llseek,
};
-static ssize_t read_file_recv(struct file *file, char __user *user_buf,
- size_t count, loff_t *ppos)
-{
-#define RXS_ERR(s, e) \
- do { \
- len += scnprintf(buf + len, size - len, \
- "%18s : %10u\n", s, \
- sc->debug.stats.rxstats.e);\
- } while (0)
-
- struct ath_softc *sc = file->private_data;
- char *buf;
- unsigned int len = 0, size = 1600;
- ssize_t retval = 0;
-
- buf = kzalloc(size, GFP_KERNEL);
- if (buf == NULL)
- return -ENOMEM;
-
- RXS_ERR("PKTS-ALL", rx_pkts_all);
- RXS_ERR("BYTES-ALL", rx_bytes_all);
- RXS_ERR("BEACONS", rx_beacons);
- RXS_ERR("FRAGS", rx_frags);
- RXS_ERR("SPECTRAL", rx_spectral);
-
- RXS_ERR("CRC ERR", crc_err);
- RXS_ERR("DECRYPT CRC ERR", decrypt_crc_err);
- RXS_ERR("PHY ERR", phy_err);
- RXS_ERR("MIC ERR", mic_err);
- RXS_ERR("PRE-DELIM CRC ERR", pre_delim_crc_err);
- RXS_ERR("POST-DELIM CRC ERR", post_delim_crc_err);
- RXS_ERR("DECRYPT BUSY ERR", decrypt_busy_err);
- RXS_ERR("LENGTH-ERR", rx_len_err);
- RXS_ERR("OOM-ERR", rx_oom_err);
- RXS_ERR("RATE-ERR", rx_rate_err);
- RXS_ERR("TOO-MANY-FRAGS", rx_too_many_frags_err);
-
- if (len > size)
- len = size;
-
- retval = simple_read_from_buffer(user_buf, count, ppos, buf, len);
- kfree(buf);
-
- return retval;
-
-#undef RXS_ERR
-}
-
void ath_debug_stat_rx(struct ath_softc *sc, struct ath_rx_status *rs)
{
-#define RX_PHY_ERR_INC(c) sc->debug.stats.rxstats.phy_err_stats[c]++
-
- RX_STAT_INC(rx_pkts_all);
- sc->debug.stats.rxstats.rx_bytes_all += rs->rs_datalen;
-
- if (rs->rs_status & ATH9K_RXERR_CRC)
- RX_STAT_INC(crc_err);
- if (rs->rs_status & ATH9K_RXERR_DECRYPT)
- RX_STAT_INC(decrypt_crc_err);
- if (rs->rs_status & ATH9K_RXERR_MIC)
- RX_STAT_INC(mic_err);
- if (rs->rs_status & ATH9K_RX_DELIM_CRC_PRE)
- RX_STAT_INC(pre_delim_crc_err);
- if (rs->rs_status & ATH9K_RX_DELIM_CRC_POST)
- RX_STAT_INC(post_delim_crc_err);
- if (rs->rs_status & ATH9K_RX_DECRYPT_BUSY)
- RX_STAT_INC(decrypt_busy_err);
-
- if (rs->rs_status & ATH9K_RXERR_PHY) {
- RX_STAT_INC(phy_err);
- if (rs->rs_phyerr < ATH9K_PHYERR_MAX)
- RX_PHY_ERR_INC(rs->rs_phyerr);
- }
-
-#undef RX_PHY_ERR_INC
+ ath9k_cmn_debug_stat_rx(&sc->debug.stats.rxstats, rs);
}
-static const struct file_operations fops_recv = {
- .read = read_file_recv,
- .open = simple_open,
- .owner = THIS_MODULE,
- .llseek = default_llseek,
-};
-
-static ssize_t read_file_phy_err(struct file *file, char __user *user_buf,
- size_t count, loff_t *ppos)
-{
-#define PHY_ERR(s, p) \
- len += scnprintf(buf + len, size - len, "%22s : %10u\n", s, \
- sc->debug.stats.rxstats.phy_err_stats[p]);
-
- struct ath_softc *sc = file->private_data;
- char *buf;
- unsigned int len = 0, size = 1600;
- ssize_t retval = 0;
-
- buf = kzalloc(size, GFP_KERNEL);
- if (buf == NULL)
- return -ENOMEM;
-
- PHY_ERR("UNDERRUN ERR", ATH9K_PHYERR_UNDERRUN);
- PHY_ERR("TIMING ERR", ATH9K_PHYERR_TIMING);
- PHY_ERR("PARITY ERR", ATH9K_PHYERR_PARITY);
- PHY_ERR("RATE ERR", ATH9K_PHYERR_RATE);
- PHY_ERR("LENGTH ERR", ATH9K_PHYERR_LENGTH);
- PHY_ERR("RADAR ERR", ATH9K_PHYERR_RADAR);
- PHY_ERR("SERVICE ERR", ATH9K_PHYERR_SERVICE);
- PHY_ERR("TOR ERR", ATH9K_PHYERR_TOR);
- PHY_ERR("OFDM-TIMING ERR", ATH9K_PHYERR_OFDM_TIMING);
- PHY_ERR("OFDM-SIGNAL-PARITY ERR", ATH9K_PHYERR_OFDM_SIGNAL_PARITY);
- PHY_ERR("OFDM-RATE ERR", ATH9K_PHYERR_OFDM_RATE_ILLEGAL);
- PHY_ERR("OFDM-LENGTH ERR", ATH9K_PHYERR_OFDM_LENGTH_ILLEGAL);
- PHY_ERR("OFDM-POWER-DROP ERR", ATH9K_PHYERR_OFDM_POWER_DROP);
- PHY_ERR("OFDM-SERVICE ERR", ATH9K_PHYERR_OFDM_SERVICE);
- PHY_ERR("OFDM-RESTART ERR", ATH9K_PHYERR_OFDM_RESTART);
- PHY_ERR("FALSE-RADAR-EXT ERR", ATH9K_PHYERR_FALSE_RADAR_EXT);
- PHY_ERR("CCK-TIMING ERR", ATH9K_PHYERR_CCK_TIMING);
- PHY_ERR("CCK-HEADER-CRC ERR", ATH9K_PHYERR_CCK_HEADER_CRC);
- PHY_ERR("CCK-RATE ERR", ATH9K_PHYERR_CCK_RATE_ILLEGAL);
- PHY_ERR("CCK-SERVICE ERR", ATH9K_PHYERR_CCK_SERVICE);
- PHY_ERR("CCK-RESTART ERR", ATH9K_PHYERR_CCK_RESTART);
- PHY_ERR("CCK-LENGTH ERR", ATH9K_PHYERR_CCK_LENGTH_ILLEGAL);
- PHY_ERR("CCK-POWER-DROP ERR", ATH9K_PHYERR_CCK_POWER_DROP);
- PHY_ERR("HT-CRC ERR", ATH9K_PHYERR_HT_CRC_ERROR);
- PHY_ERR("HT-LENGTH ERR", ATH9K_PHYERR_HT_LENGTH_ILLEGAL);
- PHY_ERR("HT-RATE ERR", ATH9K_PHYERR_HT_RATE_ILLEGAL);
-
- if (len > size)
- len = size;
-
- retval = simple_read_from_buffer(user_buf, count, ppos, buf, len);
- kfree(buf);
-
- return retval;
-
-#undef PHY_ERR
-}
-
-static const struct file_operations fops_phy_err = {
- .read = read_file_phy_err,
- .open = simple_open,
- .owner = THIS_MODULE,
- .llseek = default_llseek,
-};
-
static ssize_t read_file_regidx(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
{
@@ -1268,62 +1128,6 @@ static const struct file_operations fops_dump_nfcal = {
.llseek = default_llseek,
};
-static ssize_t read_file_base_eeprom(struct file *file, char __user *user_buf,
- size_t count, loff_t *ppos)
-{
- struct ath_softc *sc = file->private_data;
- struct ath_hw *ah = sc->sc_ah;
- u32 len = 0, size = 1500;
- ssize_t retval = 0;
- char *buf;
-
- buf = kzalloc(size, GFP_KERNEL);
- if (!buf)
- return -ENOMEM;
-
- len = ah->eep_ops->dump_eeprom(ah, true, buf, len, size);
-
- retval = simple_read_from_buffer(user_buf, count, ppos, buf, len);
- kfree(buf);
-
- return retval;
-}
-
-static const struct file_operations fops_base_eeprom = {
- .read = read_file_base_eeprom,
- .open = simple_open,
- .owner = THIS_MODULE,
- .llseek = default_llseek,
-};
-
-static ssize_t read_file_modal_eeprom(struct file *file, char __user *user_buf,
- size_t count, loff_t *ppos)
-{
- struct ath_softc *sc = file->private_data;
- struct ath_hw *ah = sc->sc_ah;
- u32 len = 0, size = 6000;
- char *buf;
- size_t retval;
-
- buf = kzalloc(size, GFP_KERNEL);
- if (buf == NULL)
- return -ENOMEM;
-
- len = ah->eep_ops->dump_eeprom(ah, false, buf, len, size);
-
- retval = simple_read_from_buffer(user_buf, count, ppos, buf, len);
- kfree(buf);
-
- return retval;
-}
-
-static const struct file_operations fops_modal_eeprom = {
- .read = read_file_modal_eeprom,
- .open = simple_open,
- .owner = THIS_MODULE,
- .llseek = default_llseek,
-};
-
#ifdef CONFIG_ATH9K_BTCOEX_SUPPORT
static ssize_t read_file_btcoex(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
@@ -1524,10 +1328,10 @@ int ath9k_init_debug(struct ath_hw *ah)
&fops_misc);
debugfs_create_file("reset", S_IRUSR, sc->debug.debugfs_phy, sc,
&fops_reset);
- debugfs_create_file("recv", S_IRUSR, sc->debug.debugfs_phy, sc,
- &fops_recv);
- debugfs_create_file("phy_err", S_IRUSR, sc->debug.debugfs_phy, sc,
- &fops_phy_err);
+
+ ath9k_cmn_debug_recv(sc->debug.debugfs_phy, &sc->debug.stats.rxstats);
+ ath9k_cmn_debug_phy_err(sc->debug.debugfs_phy, &sc->debug.stats.rxstats);
+
debugfs_create_u8("rx_chainmask", S_IRUSR, sc->debug.debugfs_phy,
&ah->rxchainmask);
debugfs_create_u8("tx_chainmask", S_IRUSR, sc->debug.debugfs_phy,
@@ -1547,10 +1351,10 @@ int ath9k_init_debug(struct ath_hw *ah)
&fops_regdump);
debugfs_create_file("dump_nfcal", S_IRUSR, sc->debug.debugfs_phy, sc,
&fops_dump_nfcal);
- debugfs_create_file("base_eeprom", S_IRUSR, sc->debug.debugfs_phy, sc,
- &fops_base_eeprom);
- debugfs_create_file("modal_eeprom", S_IRUSR, sc->debug.debugfs_phy, sc,
- &fops_modal_eeprom);
+
+ ath9k_cmn_debug_base_eeprom(sc->debug.debugfs_phy, sc->sc_ah);
+ ath9k_cmn_debug_modal_eeprom(sc->debug.debugfs_phy, sc->sc_ah);
+
debugfs_create_u32("gpio_mask", S_IRUSR | S_IWUSR,
sc->debug.debugfs_phy, &sc->sc_ah->gpio_mask);
debugfs_create_u32("gpio_val", S_IRUSR | S_IWUSR,
diff --git a/drivers/net/wireless/ath/ath9k/debug.h b/drivers/net/wireless/ath/ath9k/debug.h
index 559a68c2709c..53ae15bd0c9d 100644
--- a/drivers/net/wireless/ath/ath9k/debug.h
+++ b/drivers/net/wireless/ath/ath9k/debug.h
@@ -221,50 +221,6 @@ struct ath_rx_rate_stats {
} cck_stats[4];
};
-/**
- * struct ath_rx_stats - RX Statistics
- * @rx_pkts_all: No. of total frames received, including ones that
- may have had errors.
- * @rx_bytes_all: No. of total bytes received, including ones that
- may have had errors.
- * @crc_err: No. of frames with incorrect CRC value
- * @decrypt_crc_err: No. of frames whose CRC check failed after
- decryption process completed
- * @phy_err: No. of frames whose reception failed because the PHY
- encountered an error
- * @mic_err: No. of frames with incorrect TKIP MIC verification failure
- * @pre_delim_crc_err: Pre-Frame delimiter CRC error detections
- * @post_delim_crc_err: Post-Frame delimiter CRC error detections
- * @decrypt_busy_err: Decryption interruptions counter
- * @phy_err_stats: Individual PHY error statistics
- * @rx_len_err: No. of frames discarded due to bad length.
- * @rx_oom_err: No. of frames dropped due to OOM issues.
- * @rx_rate_err: No. of frames dropped due to rate errors.
- * @rx_too_many_frags_err: Frames dropped due to too-many-frags received.
- * @rx_beacons: No. of beacons received.
- * @rx_frags: No. of rx-fragements received.
- * @rx_spectral: No of spectral packets received.
- */
-struct ath_rx_stats {
- u32 rx_pkts_all;
- u32 rx_bytes_all;
- u32 crc_err;
- u32 decrypt_crc_err;
- u32 phy_err;
- u32 mic_err;
- u32 pre_delim_crc_err;
- u32 post_delim_crc_err;
- u32 decrypt_busy_err;
- u32 phy_err_stats[ATH9K_PHYERR_MAX];
- u32 rx_len_err;
- u32 rx_oom_err;
- u32 rx_rate_err;
- u32 rx_too_many_frags_err;
- u32 rx_beacons;
- u32 rx_frags;
- u32 rx_spectral;
-};
-
#define ANT_MAIN 0
#define ANT_ALT 1
diff --git a/drivers/net/wireless/ath/ath9k/dfs.c b/drivers/net/wireless/ath/ath9k/dfs.c
index 857bb28b3894..726271c7c330 100644
--- a/drivers/net/wireless/ath/ath9k/dfs.c
+++ b/drivers/net/wireless/ath/ath9k/dfs.c
@@ -178,12 +178,12 @@ void ath9k_dfs_process_phyerr(struct ath_softc *sc, void *data,
pe.ts = mactime;
if (ath9k_postprocess_radar_event(sc, &ard, &pe)) {
struct dfs_pattern_detector *pd = sc->dfs_detector;
- static u64 last_ts;
ath_dbg(common, DFS,
"ath9k_dfs_process_phyerr: channel=%d, ts=%llu, "
"width=%d, rssi=%d, delta_ts=%llu\n",
- pe.freq, pe.ts, pe.width, pe.rssi, pe.ts-last_ts);
- last_ts = pe.ts;
+ pe.freq, pe.ts, pe.width, pe.rssi,
+ pe.ts - sc->dfs_prev_pulse_ts);
+ sc->dfs_prev_pulse_ts = pe.ts;
DFS_STAT_INC(sc, pulses_processed);
if (pd != NULL && pd->add_pulse(pd, &pe)) {
DFS_STAT_INC(sc, radar_detected);
diff --git a/drivers/net/wireless/ath/ath9k/htc.h b/drivers/net/wireless/ath/ath9k/htc.h
index dab1f0cab993..09a5d72f3ff5 100644
--- a/drivers/net/wireless/ath/ath9k/htc.h
+++ b/drivers/net/wireless/ath/ath9k/htc.h
@@ -325,14 +325,14 @@ static inline struct ath9k_htc_tx_ctl *HTC_SKB_CB(struct sk_buff *skb)
#define TX_STAT_INC(c) (hif_dev->htc_handle->drv_priv->debug.tx_stats.c++)
#define TX_STAT_ADD(c, a) (hif_dev->htc_handle->drv_priv->debug.tx_stats.c += a)
-#define RX_STAT_INC(c) (hif_dev->htc_handle->drv_priv->debug.rx_stats.c++)
-#define RX_STAT_ADD(c, a) (hif_dev->htc_handle->drv_priv->debug.rx_stats.c += a)
+#define RX_STAT_INC(c) (hif_dev->htc_handle->drv_priv->debug.skbrx_stats.c++)
+#define RX_STAT_ADD(c, a) (hif_dev->htc_handle->drv_priv->debug.skbrx_stats.c += a)
#define CAB_STAT_INC priv->debug.tx_stats.cab_queued++
#define TX_QSTAT_INC(q) (priv->debug.tx_stats.queue_stats[q]++)
void ath9k_htc_err_stat_rx(struct ath9k_htc_priv *priv,
- struct ath_htc_rx_status *rxs);
+ struct ath_rx_status *rs);
struct ath_tx_stats {
u32 buf_queued;
@@ -345,25 +345,18 @@ struct ath_tx_stats {
u32 queue_stats[IEEE80211_NUM_ACS];
};
-struct ath_rx_stats {
+struct ath_skbrx_stats {
u32 skb_allocated;
u32 skb_completed;
u32 skb_completed_bytes;
u32 skb_dropped;
- u32 err_crc;
- u32 err_decrypt_crc;
- u32 err_mic;
- u32 err_pre_delim;
- u32 err_post_delim;
- u32 err_decrypt_busy;
- u32 err_phy;
- u32 err_phy_stats[ATH9K_PHYERR_MAX];
};
struct ath9k_debug {
struct dentry *debugfs_phy;
struct ath_tx_stats tx_stats;
struct ath_rx_stats rx_stats;
+ struct ath_skbrx_stats skbrx_stats;
};
void ath9k_htc_get_et_strings(struct ieee80211_hw *hw,
@@ -385,7 +378,7 @@ void ath9k_htc_get_et_stats(struct ieee80211_hw *hw,
#define TX_QSTAT_INC(c) do { } while (0)
static inline void ath9k_htc_err_stat_rx(struct ath9k_htc_priv *priv,
- struct ath_htc_rx_status *rxs)
+ struct ath_rx_status *rs)
{
}
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_debug.c b/drivers/net/wireless/ath/ath9k/htc_drv_debug.c
index fb071ee4fcfb..8b529e4b8ac4 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_debug.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_debug.c
@@ -243,39 +243,14 @@ static const struct file_operations fops_xmit = {
};
void ath9k_htc_err_stat_rx(struct ath9k_htc_priv *priv,
- struct ath_htc_rx_status *rxs)
+ struct ath_rx_status *rs)
{
-#define RX_PHY_ERR_INC(c) priv->debug.rx_stats.err_phy_stats[c]++
-
- if (rxs->rs_status & ATH9K_RXERR_CRC)
- priv->debug.rx_stats.err_crc++;
- if (rxs->rs_status & ATH9K_RXERR_DECRYPT)
- priv->debug.rx_stats.err_decrypt_crc++;
- if (rxs->rs_status & ATH9K_RXERR_MIC)
- priv->debug.rx_stats.err_mic++;
- if (rxs->rs_status & ATH9K_RX_DELIM_CRC_PRE)
- priv->debug.rx_stats.err_pre_delim++;
- if (rxs->rs_status & ATH9K_RX_DELIM_CRC_POST)
- priv->debug.rx_stats.err_post_delim++;
- if (rxs->rs_status & ATH9K_RX_DECRYPT_BUSY)
- priv->debug.rx_stats.err_decrypt_busy++;
-
- if (rxs->rs_status & ATH9K_RXERR_PHY) {
- priv->debug.rx_stats.err_phy++;
- if (rxs->rs_phyerr < ATH9K_PHYERR_MAX)
- RX_PHY_ERR_INC(rxs->rs_phyerr);
- }
-
-#undef RX_PHY_ERR_INC
+ ath9k_cmn_debug_stat_rx(&priv->debug.rx_stats, rs);
}
-static ssize_t read_file_recv(struct file *file, char __user *user_buf,
+static ssize_t read_file_skb_rx(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
{
-#define PHY_ERR(s, p) \
- len += scnprintf(buf + len, size - len, "%20s : %10u\n", s, \
- priv->debug.rx_stats.err_phy_stats[p]);
-
struct ath9k_htc_priv *priv = file->private_data;
char *buf;
unsigned int len = 0, size = 1500;
@@ -287,63 +262,13 @@ static ssize_t read_file_recv(struct file *file, char __user *user_buf,
len += scnprintf(buf + len, size - len,
"%20s : %10u\n", "SKBs allocated",
- priv->debug.rx_stats.skb_allocated);
+ priv->debug.skbrx_stats.skb_allocated);
len += scnprintf(buf + len, size - len,
"%20s : %10u\n", "SKBs completed",
- priv->debug.rx_stats.skb_completed);
+ priv->debug.skbrx_stats.skb_completed);
len += scnprintf(buf + len, size - len,
"%20s : %10u\n", "SKBs Dropped",
- priv->debug.rx_stats.skb_dropped);
-
- len += scnprintf(buf + len, size - len,
- "%20s : %10u\n", "CRC ERR",
- priv->debug.rx_stats.err_crc);
- len += scnprintf(buf + len, size - len,
- "%20s : %10u\n", "DECRYPT CRC ERR",
- priv->debug.rx_stats.err_decrypt_crc);
- len += scnprintf(buf + len, size - len,
- "%20s : %10u\n", "MIC ERR",
- priv->debug.rx_stats.err_mic);
- len += scnprintf(buf + len, size - len,
- "%20s : %10u\n", "PRE-DELIM CRC ERR",
- priv->debug.rx_stats.err_pre_delim);
- len += scnprintf(buf + len, size - len,
- "%20s : %10u\n", "POST-DELIM CRC ERR",
- priv->debug.rx_stats.err_post_delim);
- len += scnprintf(buf + len, size - len,
- "%20s : %10u\n", "DECRYPT BUSY ERR",
- priv->debug.rx_stats.err_decrypt_busy);
- len += scnprintf(buf + len, size - len,
- "%20s : %10u\n", "TOTAL PHY ERR",
- priv->debug.rx_stats.err_phy);
-
-
- PHY_ERR("UNDERRUN", ATH9K_PHYERR_UNDERRUN);
- PHY_ERR("TIMING", ATH9K_PHYERR_TIMING);
- PHY_ERR("PARITY", ATH9K_PHYERR_PARITY);
- PHY_ERR("RATE", ATH9K_PHYERR_RATE);
- PHY_ERR("LENGTH", ATH9K_PHYERR_LENGTH);
- PHY_ERR("RADAR", ATH9K_PHYERR_RADAR);
- PHY_ERR("SERVICE", ATH9K_PHYERR_SERVICE);
- PHY_ERR("TOR", ATH9K_PHYERR_TOR);
- PHY_ERR("OFDM-TIMING", ATH9K_PHYERR_OFDM_TIMING);
- PHY_ERR("OFDM-SIGNAL-PARITY", ATH9K_PHYERR_OFDM_SIGNAL_PARITY);
- PHY_ERR("OFDM-RATE", ATH9K_PHYERR_OFDM_RATE_ILLEGAL);
- PHY_ERR("OFDM-LENGTH", ATH9K_PHYERR_OFDM_LENGTH_ILLEGAL);
- PHY_ERR("OFDM-POWER-DROP", ATH9K_PHYERR_OFDM_POWER_DROP);
- PHY_ERR("OFDM-SERVICE", ATH9K_PHYERR_OFDM_SERVICE);
- PHY_ERR("OFDM-RESTART", ATH9K_PHYERR_OFDM_RESTART);
- PHY_ERR("FALSE-RADAR-EXT", ATH9K_PHYERR_FALSE_RADAR_EXT);
- PHY_ERR("CCK-TIMING", ATH9K_PHYERR_CCK_TIMING);
- PHY_ERR("CCK-HEADER-CRC", ATH9K_PHYERR_CCK_HEADER_CRC);
- PHY_ERR("CCK-RATE", ATH9K_PHYERR_CCK_RATE_ILLEGAL);
- PHY_ERR("CCK-SERVICE", ATH9K_PHYERR_CCK_SERVICE);
- PHY_ERR("CCK-RESTART", ATH9K_PHYERR_CCK_RESTART);
- PHY_ERR("CCK-LENGTH", ATH9K_PHYERR_CCK_LENGTH_ILLEGAL);
- PHY_ERR("CCK-POWER-DROP", ATH9K_PHYERR_CCK_POWER_DROP);
- PHY_ERR("HT-CRC", ATH9K_PHYERR_HT_CRC_ERROR);
- PHY_ERR("HT-LENGTH", ATH9K_PHYERR_HT_LENGTH_ILLEGAL);
- PHY_ERR("HT-RATE", ATH9K_PHYERR_HT_RATE_ILLEGAL);
+ priv->debug.skbrx_stats.skb_dropped);
if (len > size)
len = size;
@@ -352,12 +277,10 @@ static ssize_t read_file_recv(struct file *file, char __user *user_buf,
kfree(buf);
return retval;
-
-#undef PHY_ERR
}
-static const struct file_operations fops_recv = {
- .read = read_file_recv,
+static const struct file_operations fops_skb_rx = {
+ .read = read_file_skb_rx,
.open = simple_open,
.owner = THIS_MODULE,
.llseek = default_llseek,
@@ -486,423 +409,6 @@ static const struct file_operations fops_debug = {
.llseek = default_llseek,
};
-static ssize_t read_file_base_eeprom(struct file *file, char __user *user_buf,
- size_t count, loff_t *ppos)
-{
- struct ath9k_htc_priv *priv = file->private_data;
- struct ath_common *common = ath9k_hw_common(priv->ah);
- struct base_eep_header *pBase = NULL;
- unsigned int len = 0, size = 1500;
- ssize_t retval = 0;
- char *buf;
-
- pBase = ath9k_htc_get_eeprom_base(priv);
-
- if (pBase == NULL) {
- ath_err(common, "Unknown EEPROM type\n");
- return 0;
- }
-
- buf = kzalloc(size, GFP_KERNEL);
- if (buf == NULL)
- return -ENOMEM;
-
- len += scnprintf(buf + len, size - len,
- "%20s : %10d\n", "Major Version",
- pBase->version >> 12);
- len += scnprintf(buf + len, size - len,
- "%20s : %10d\n", "Minor Version",
- pBase->version & 0xFFF);
- len += scnprintf(buf + len, size - len,
- "%20s : %10d\n", "Checksum",
- pBase->checksum);
- len += scnprintf(buf + len, size - len,
- "%20s : %10d\n", "Length",
- pBase->length);
- len += scnprintf(buf + len, size - len,
- "%20s : %10d\n", "RegDomain1",
- pBase->regDmn[0]);
- len += scnprintf(buf + len, size - len,
- "%20s : %10d\n", "RegDomain2",
- pBase->regDmn[1]);
- len += scnprintf(buf + len, size - len,
- "%20s : %10d\n",
- "TX Mask", pBase->txMask);
- len += scnprintf(buf + len, size - len,
- "%20s : %10d\n",
- "RX Mask", pBase->rxMask);
- len += scnprintf(buf + len, size - len,
- "%20s : %10d\n",
- "Allow 5GHz",
- !!(pBase->opCapFlags & AR5416_OPFLAGS_11A));
- len += scnprintf(buf + len, size - len,
- "%20s : %10d\n",
- "Allow 2GHz",
- !!(pBase->opCapFlags & AR5416_OPFLAGS_11G));
- len += scnprintf(buf + len, size - len,
- "%20s : %10d\n",
- "Disable 2GHz HT20",
- !!(pBase->opCapFlags & AR5416_OPFLAGS_N_2G_HT20));
- len += scnprintf(buf + len, size - len,
- "%20s : %10d\n",
- "Disable 2GHz HT40",
- !!(pBase->opCapFlags & AR5416_OPFLAGS_N_2G_HT40));
- len += scnprintf(buf + len, size - len,
- "%20s : %10d\n",
- "Disable 5Ghz HT20",
- !!(pBase->opCapFlags & AR5416_OPFLAGS_N_5G_HT20));
- len += scnprintf(buf + len, size - len,
- "%20s : %10d\n",
- "Disable 5Ghz HT40",
- !!(pBase->opCapFlags & AR5416_OPFLAGS_N_5G_HT40));
- len += scnprintf(buf + len, size - len,
- "%20s : %10d\n",
- "Big Endian",
- !!(pBase->eepMisc & 0x01));
- len += scnprintf(buf + len, size - len,
- "%20s : %10d\n",
- "Cal Bin Major Ver",
- (pBase->binBuildNumber >> 24) & 0xFF);
- len += scnprintf(buf + len, size - len,
- "%20s : %10d\n",
- "Cal Bin Minor Ver",
- (pBase->binBuildNumber >> 16) & 0xFF);
- len += scnprintf(buf + len, size - len,
- "%20s : %10d\n",
- "Cal Bin Build",
- (pBase->binBuildNumber >> 8) & 0xFF);
-
- /*
- * UB91 specific data.
- */
- if (AR_SREV_9271(priv->ah)) {
- struct base_eep_header_4k *pBase4k =
- &priv->ah->eeprom.map4k.baseEepHeader;
-
- len += scnprintf(buf + len, size - len,
- "%20s : %10d\n",
- "TX Gain type",
- pBase4k->txGainType);
- }
-
- /*
- * UB95 specific data.
- */
- if (priv->ah->hw_version.usbdev == AR9287_USB) {
- struct base_eep_ar9287_header *pBase9287 =
- &priv->ah->eeprom.map9287.baseEepHeader;
-
- len += scnprintf(buf + len, size - len,
- "%20s : %10ddB\n",
- "Power Table Offset",
- pBase9287->pwrTableOffset);
-
- len += scnprintf(buf + len, size - len,
- "%20s : %10d\n",
- "OpenLoop Power Ctrl",
- pBase9287->openLoopPwrCntl);
- }
-
- len += scnprintf(buf + len, size - len, "%20s : %pM\n", "MacAddress",
- pBase->macAddr);
- if (len > size)
- len = size;
-
- retval = simple_read_from_buffer(user_buf, count, ppos, buf, len);
- kfree(buf);
-
- return retval;
-}
-
-static const struct file_operations fops_base_eeprom = {
- .read = read_file_base_eeprom,
- .open = simple_open,
- .owner = THIS_MODULE,
- .llseek = default_llseek,
-};
-
-static ssize_t read_4k_modal_eeprom(struct file *file,
- char __user *user_buf,
- size_t count, loff_t *ppos)
-{
-#define PR_EEP(_s, _val) \
- do { \
- len += scnprintf(buf + len, size - len, "%20s : %10d\n",\
- _s, (_val)); \
- } while (0)
-
- struct ath9k_htc_priv *priv = file->private_data;
- struct modal_eep_4k_header *pModal = &priv->ah->eeprom.map4k.modalHeader;
- unsigned int len = 0, size = 2048;
- ssize_t retval = 0;
- char *buf;
-
- buf = kzalloc(size, GFP_KERNEL);
- if (buf == NULL)
- return -ENOMEM;
-
- PR_EEP("Chain0 Ant. Control", pModal->antCtrlChain[0]);
- PR_EEP("Ant. Common Control", pModal->antCtrlCommon);
- PR_EEP("Chain0 Ant. Gain", pModal->antennaGainCh[0]);
- PR_EEP("Switch Settle", pModal->switchSettling);
- PR_EEP("Chain0 TxRxAtten", pModal->txRxAttenCh[0]);
- PR_EEP("Chain0 RxTxMargin", pModal->rxTxMarginCh[0]);
- PR_EEP("ADC Desired size", pModal->adcDesiredSize);
- PR_EEP("PGA Desired size", pModal->pgaDesiredSize);
- PR_EEP("Chain0 xlna Gain", pModal->xlnaGainCh[0]);
- PR_EEP("txEndToXpaOff", pModal->txEndToXpaOff);
- PR_EEP("txEndToRxOn", pModal->txEndToRxOn);
- PR_EEP("txFrameToXpaOn", pModal->txFrameToXpaOn);
- PR_EEP("CCA Threshold)", pModal->thresh62);
- PR_EEP("Chain0 NF Threshold", pModal->noiseFloorThreshCh[0]);
- PR_EEP("xpdGain", pModal->xpdGain);
- PR_EEP("External PD", pModal->xpd);
- PR_EEP("Chain0 I Coefficient", pModal->iqCalICh[0]);
- PR_EEP("Chain0 Q Coefficient", pModal->iqCalQCh[0]);
- PR_EEP("pdGainOverlap", pModal->pdGainOverlap);
- PR_EEP("O/D Bias Version", pModal->version);
- PR_EEP("CCK OutputBias", pModal->ob_0);
- PR_EEP("BPSK OutputBias", pModal->ob_1);
- PR_EEP("QPSK OutputBias", pModal->ob_2);
- PR_EEP("16QAM OutputBias", pModal->ob_3);
- PR_EEP("64QAM OutputBias", pModal->ob_4);
- PR_EEP("CCK Driver1_Bias", pModal->db1_0);
- PR_EEP("BPSK Driver1_Bias", pModal->db1_1);
- PR_EEP("QPSK Driver1_Bias", pModal->db1_2);
- PR_EEP("16QAM Driver1_Bias", pModal->db1_3);
- PR_EEP("64QAM Driver1_Bias", pModal->db1_4);
- PR_EEP("CCK Driver2_Bias", pModal->db2_0);
- PR_EEP("BPSK Driver2_Bias", pModal->db2_1);
- PR_EEP("QPSK Driver2_Bias", pModal->db2_2);
- PR_EEP("16QAM Driver2_Bias", pModal->db2_3);
- PR_EEP("64QAM Driver2_Bias", pModal->db2_4);
- PR_EEP("xPA Bias Level", pModal->xpaBiasLvl);
- PR_EEP("txFrameToDataStart", pModal->txFrameToDataStart);
- PR_EEP("txFrameToPaOn", pModal->txFrameToPaOn);
- PR_EEP("HT40 Power Inc.", pModal->ht40PowerIncForPdadc);
- PR_EEP("Chain0 bswAtten", pModal->bswAtten[0]);
- PR_EEP("Chain0 bswMargin", pModal->bswMargin[0]);
- PR_EEP("HT40 Switch Settle", pModal->swSettleHt40);
- PR_EEP("Chain0 xatten2Db", pModal->xatten2Db[0]);
- PR_EEP("Chain0 xatten2Margin", pModal->xatten2Margin[0]);
- PR_EEP("Ant. Diversity ctl1", pModal->antdiv_ctl1);
- PR_EEP("Ant. Diversity ctl2", pModal->antdiv_ctl2);
- PR_EEP("TX Diversity", pModal->tx_diversity);
-
- if (len > size)
- len = size;
-
- retval = simple_read_from_buffer(user_buf, count, ppos, buf, len);
- kfree(buf);
-
- return retval;
-
-#undef PR_EEP
-}
-
-static ssize_t read_def_modal_eeprom(struct file *file,
- char __user *user_buf,
- size_t count, loff_t *ppos)
-{
-#define PR_EEP(_s, _val) \
- do { \
- if (pBase->opCapFlags & AR5416_OPFLAGS_11G) { \
- pModal = &priv->ah->eeprom.def.modalHeader[1]; \
- len += scnprintf(buf + len, size - len, "%20s : %8d%7s", \
- _s, (_val), "|"); \
- } \
- if (pBase->opCapFlags & AR5416_OPFLAGS_11A) { \
- pModal = &priv->ah->eeprom.def.modalHeader[0]; \
- len += scnprintf(buf + len, size - len, "%9d\n",\
- (_val)); \
- } \
- } while (0)
-
- struct ath9k_htc_priv *priv = file->private_data;
- struct base_eep_header *pBase = &priv->ah->eeprom.def.baseEepHeader;
- struct modal_eep_header *pModal = NULL;
- unsigned int len = 0, size = 3500;
- ssize_t retval = 0;
- char *buf;
-
- buf = kzalloc(size, GFP_KERNEL);
- if (buf == NULL)
- return -ENOMEM;
-
- len += scnprintf(buf + len, size - len,
- "%31s %15s\n", "2G", "5G");
- len += scnprintf(buf + len, size - len,
- "%32s %16s\n", "====", "====\n");
-
- PR_EEP("Chain0 Ant. Control", pModal->antCtrlChain[0]);
- PR_EEP("Chain1 Ant. Control", pModal->antCtrlChain[1]);
- PR_EEP("Chain2 Ant. Control", pModal->antCtrlChain[2]);
- PR_EEP("Ant. Common Control", pModal->antCtrlCommon);
- PR_EEP("Chain0 Ant. Gain", pModal->antennaGainCh[0]);
- PR_EEP("Chain1 Ant. Gain", pModal->antennaGainCh[1]);
- PR_EEP("Chain2 Ant. Gain", pModal->antennaGainCh[2]);
- PR_EEP("Switch Settle", pModal->switchSettling);
- PR_EEP("Chain0 TxRxAtten", pModal->txRxAttenCh[0]);
- PR_EEP("Chain1 TxRxAtten", pModal->txRxAttenCh[1]);
- PR_EEP("Chain2 TxRxAtten", pModal->txRxAttenCh[2]);
- PR_EEP("Chain0 RxTxMargin", pModal->rxTxMarginCh[0]);
- PR_EEP("Chain1 RxTxMargin", pModal->rxTxMarginCh[1]);
- PR_EEP("Chain2 RxTxMargin", pModal->rxTxMarginCh[2]);
- PR_EEP("ADC Desired size", pModal->adcDesiredSize);
- PR_EEP("PGA Desired size", pModal->pgaDesiredSize);
- PR_EEP("Chain0 xlna Gain", pModal->xlnaGainCh[0]);
- PR_EEP("Chain1 xlna Gain", pModal->xlnaGainCh[1]);
- PR_EEP("Chain2 xlna Gain", pModal->xlnaGainCh[2]);
- PR_EEP("txEndToXpaOff", pModal->txEndToXpaOff);
- PR_EEP("txEndToRxOn", pModal->txEndToRxOn);
- PR_EEP("txFrameToXpaOn", pModal->txFrameToXpaOn);
- PR_EEP("CCA Threshold)", pModal->thresh62);
- PR_EEP("Chain0 NF Threshold", pModal->noiseFloorThreshCh[0]);
- PR_EEP("Chain1 NF Threshold", pModal->noiseFloorThreshCh[1]);
- PR_EEP("Chain2 NF Threshold", pModal->noiseFloorThreshCh[2]);
- PR_EEP("xpdGain", pModal->xpdGain);
- PR_EEP("External PD", pModal->xpd);
- PR_EEP("Chain0 I Coefficient", pModal->iqCalICh[0]);
- PR_EEP("Chain1 I Coefficient", pModal->iqCalICh[1]);
- PR_EEP("Chain2 I Coefficient", pModal->iqCalICh[2]);
- PR_EEP("Chain0 Q Coefficient", pModal->iqCalQCh[0]);
- PR_EEP("Chain1 Q Coefficient", pModal->iqCalQCh[1]);
- PR_EEP("Chain2 Q Coefficient", pModal->iqCalQCh[2]);
- PR_EEP("pdGainOverlap", pModal->pdGainOverlap);
- PR_EEP("Chain0 OutputBias", pModal->ob);
- PR_EEP("Chain0 DriverBias", pModal->db);
- PR_EEP("xPA Bias Level", pModal->xpaBiasLvl);
- PR_EEP("2chain pwr decrease", pModal->pwrDecreaseFor2Chain);
- PR_EEP("3chain pwr decrease", pModal->pwrDecreaseFor3Chain);
- PR_EEP("txFrameToDataStart", pModal->txFrameToDataStart);
- PR_EEP("txFrameToPaOn", pModal->txFrameToPaOn);
- PR_EEP("HT40 Power Inc.", pModal->ht40PowerIncForPdadc);
- PR_EEP("Chain0 bswAtten", pModal->bswAtten[0]);
- PR_EEP("Chain1 bswAtten", pModal->bswAtten[1]);
- PR_EEP("Chain2 bswAtten", pModal->bswAtten[2]);
- PR_EEP("Chain0 bswMargin", pModal->bswMargin[0]);
- PR_EEP("Chain1 bswMargin", pModal->bswMargin[1]);
- PR_EEP("Chain2 bswMargin", pModal->bswMargin[2]);
- PR_EEP("HT40 Switch Settle", pModal->swSettleHt40);
- PR_EEP("Chain0 xatten2Db", pModal->xatten2Db[0]);
- PR_EEP("Chain1 xatten2Db", pModal->xatten2Db[1]);
- PR_EEP("Chain2 xatten2Db", pModal->xatten2Db[2]);
- PR_EEP("Chain0 xatten2Margin", pModal->xatten2Margin[0]);
- PR_EEP("Chain1 xatten2Margin", pModal->xatten2Margin[1]);
- PR_EEP("Chain2 xatten2Margin", pModal->xatten2Margin[2]);
- PR_EEP("Chain1 OutputBias", pModal->ob_ch1);
- PR_EEP("Chain1 DriverBias", pModal->db_ch1);
- PR_EEP("LNA Control", pModal->lna_ctl);
- PR_EEP("XPA Bias Freq0", pModal->xpaBiasLvlFreq[0]);
- PR_EEP("XPA Bias Freq1", pModal->xpaBiasLvlFreq[1]);
- PR_EEP("XPA Bias Freq2", pModal->xpaBiasLvlFreq[2]);
-
- if (len > size)
- len = size;
-
- retval = simple_read_from_buffer(user_buf, count, ppos, buf, len);
- kfree(buf);
-
- return retval;
-
-#undef PR_EEP
-}
-
-static ssize_t read_9287_modal_eeprom(struct file *file,
- char __user *user_buf,
- size_t count, loff_t *ppos)
-{
-#define PR_EEP(_s, _val) \
- do { \
- len += scnprintf(buf + len, size - len, "%20s : %10d\n",\
- _s, (_val)); \
- } while (0)
-
- struct ath9k_htc_priv *priv = file->private_data;
- struct modal_eep_ar9287_header *pModal = &priv->ah->eeprom.map9287.modalHeader;
- unsigned int len = 0, size = 3000;
- ssize_t retval = 0;
- char *buf;
-
- buf = kzalloc(size, GFP_KERNEL);
- if (buf == NULL)
- return -ENOMEM;
-
- PR_EEP("Chain0 Ant. Control", pModal->antCtrlChain[0]);
- PR_EEP("Chain1 Ant. Control", pModal->antCtrlChain[1]);
- PR_EEP("Ant. Common Control", pModal->antCtrlCommon);
- PR_EEP("Chain0 Ant. Gain", pModal->antennaGainCh[0]);
- PR_EEP("Chain1 Ant. Gain", pModal->antennaGainCh[1]);
- PR_EEP("Switch Settle", pModal->switchSettling);
- PR_EEP("Chain0 TxRxAtten", pModal->txRxAttenCh[0]);
- PR_EEP("Chain1 TxRxAtten", pModal->txRxAttenCh[1]);
- PR_EEP("Chain0 RxTxMargin", pModal->rxTxMarginCh[0]);
- PR_EEP("Chain1 RxTxMargin", pModal->rxTxMarginCh[1]);
- PR_EEP("ADC Desired size", pModal->adcDesiredSize);
- PR_EEP("txEndToXpaOff", pModal->txEndToXpaOff);
- PR_EEP("txEndToRxOn", pModal->txEndToRxOn);
- PR_EEP("txFrameToXpaOn", pModal->txFrameToXpaOn);
- PR_EEP("CCA Threshold)", pModal->thresh62);
- PR_EEP("Chain0 NF Threshold", pModal->noiseFloorThreshCh[0]);
- PR_EEP("Chain1 NF Threshold", pModal->noiseFloorThreshCh[1]);
- PR_EEP("xpdGain", pModal->xpdGain);
- PR_EEP("External PD", pModal->xpd);
- PR_EEP("Chain0 I Coefficient", pModal->iqCalICh[0]);
- PR_EEP("Chain1 I Coefficient", pModal->iqCalICh[1]);
- PR_EEP("Chain0 Q Coefficient", pModal->iqCalQCh[0]);
- PR_EEP("Chain1 Q Coefficient", pModal->iqCalQCh[1]);
- PR_EEP("pdGainOverlap", pModal->pdGainOverlap);
- PR_EEP("xPA Bias Level", pModal->xpaBiasLvl);
- PR_EEP("txFrameToDataStart", pModal->txFrameToDataStart);
- PR_EEP("txFrameToPaOn", pModal->txFrameToPaOn);
- PR_EEP("HT40 Power Inc.", pModal->ht40PowerIncForPdadc);
- PR_EEP("Chain0 bswAtten", pModal->bswAtten[0]);
- PR_EEP("Chain1 bswAtten", pModal->bswAtten[1]);
- PR_EEP("Chain0 bswMargin", pModal->bswMargin[0]);
- PR_EEP("Chain1 bswMargin", pModal->bswMargin[1]);
- PR_EEP("HT40 Switch Settle", pModal->swSettleHt40);
- PR_EEP("AR92x7 Version", pModal->version);
- PR_EEP("DriverBias1", pModal->db1);
- PR_EEP("DriverBias2", pModal->db1);
- PR_EEP("CCK OutputBias", pModal->ob_cck);
- PR_EEP("PSK OutputBias", pModal->ob_psk);
- PR_EEP("QAM OutputBias", pModal->ob_qam);
- PR_EEP("PAL_OFF OutputBias", pModal->ob_pal_off);
-
- if (len > size)
- len = size;
-
- retval = simple_read_from_buffer(user_buf, count, ppos, buf, len);
- kfree(buf);
-
- return retval;
-
-#undef PR_EEP
-}
-
-static ssize_t read_file_modal_eeprom(struct file *file, char __user *user_buf,
- size_t count, loff_t *ppos)
-{
- struct ath9k_htc_priv *priv = file->private_data;
-
- if (AR_SREV_9271(priv->ah))
- return read_4k_modal_eeprom(file, user_buf, count, ppos);
- else if (priv->ah->hw_version.usbdev == AR9280_USB)
- return read_def_modal_eeprom(file, user_buf, count, ppos);
- else if (priv->ah->hw_version.usbdev == AR9287_USB)
- return read_9287_modal_eeprom(file, user_buf, count, ppos);
-
- return 0;
-}
-
-static const struct file_operations fops_modal_eeprom = {
- .read = read_file_modal_eeprom,
- .open = simple_open,
- .owner = THIS_MODULE,
- .llseek = default_llseek,
-};
-
-
/* Ethtool support for get-stats */
#define AMKSTR(nm) #nm "_BE", #nm "_BK", #nm "_VI", #nm "_VO"
static const char ath9k_htc_gstrings_stats[][ETH_GSTRING_LEN] = {
@@ -947,6 +453,8 @@ int ath9k_htc_get_et_sset_count(struct ieee80211_hw *hw,
#define STXBASE priv->debug.tx_stats
#define SRXBASE priv->debug.rx_stats
+#define SKBTXBASE priv->debug.tx_stats
+#define SKBRXBASE priv->debug.skbrx_stats
#define ASTXQ(a) \
data[i++] = STXBASE.a[IEEE80211_AC_BE]; \
data[i++] = STXBASE.a[IEEE80211_AC_BK]; \
@@ -960,24 +468,24 @@ void ath9k_htc_get_et_stats(struct ieee80211_hw *hw,
struct ath9k_htc_priv *priv = hw->priv;
int i = 0;
- data[i++] = STXBASE.skb_success;
- data[i++] = STXBASE.skb_success_bytes;
- data[i++] = SRXBASE.skb_completed;
- data[i++] = SRXBASE.skb_completed_bytes;
+ data[i++] = SKBTXBASE.skb_success;
+ data[i++] = SKBTXBASE.skb_success_bytes;
+ data[i++] = SKBRXBASE.skb_completed;
+ data[i++] = SKBRXBASE.skb_completed_bytes;
ASTXQ(queue_stats);
- data[i++] = SRXBASE.err_crc;
- data[i++] = SRXBASE.err_decrypt_crc;
- data[i++] = SRXBASE.err_phy;
- data[i++] = SRXBASE.err_mic;
- data[i++] = SRXBASE.err_pre_delim;
- data[i++] = SRXBASE.err_post_delim;
- data[i++] = SRXBASE.err_decrypt_busy;
+ data[i++] = SRXBASE.crc_err;
+ data[i++] = SRXBASE.decrypt_crc_err;
+ data[i++] = SRXBASE.phy_err;
+ data[i++] = SRXBASE.mic_err;
+ data[i++] = SRXBASE.pre_delim_crc_err;
+ data[i++] = SRXBASE.post_delim_crc_err;
+ data[i++] = SRXBASE.decrypt_busy_err;
- data[i++] = SRXBASE.err_phy_stats[ATH9K_PHYERR_RADAR];
- data[i++] = SRXBASE.err_phy_stats[ATH9K_PHYERR_OFDM_TIMING];
- data[i++] = SRXBASE.err_phy_stats[ATH9K_PHYERR_CCK_TIMING];
+ data[i++] = SRXBASE.phy_err_stats[ATH9K_PHYERR_RADAR];
+ data[i++] = SRXBASE.phy_err_stats[ATH9K_PHYERR_OFDM_TIMING];
+ data[i++] = SRXBASE.phy_err_stats[ATH9K_PHYERR_CCK_TIMING];
WARN_ON(i != ATH9K_HTC_SSTATS_LEN);
}
@@ -1001,18 +509,21 @@ int ath9k_htc_init_debug(struct ath_hw *ah)
priv, &fops_tgt_rx_stats);
debugfs_create_file("xmit", S_IRUSR, priv->debug.debugfs_phy,
priv, &fops_xmit);
- debugfs_create_file("recv", S_IRUSR, priv->debug.debugfs_phy,
- priv, &fops_recv);
+ debugfs_create_file("skb_rx", S_IRUSR, priv->debug.debugfs_phy,
+ priv, &fops_skb_rx);
+
+ ath9k_cmn_debug_recv(priv->debug.debugfs_phy, &priv->debug.rx_stats);
+ ath9k_cmn_debug_phy_err(priv->debug.debugfs_phy, &priv->debug.rx_stats);
+
debugfs_create_file("slot", S_IRUSR, priv->debug.debugfs_phy,
priv, &fops_slot);
debugfs_create_file("queue", S_IRUSR, priv->debug.debugfs_phy,
priv, &fops_queue);
debugfs_create_file("debug", S_IRUSR | S_IWUSR, priv->debug.debugfs_phy,
priv, &fops_debug);
- debugfs_create_file("base_eeprom", S_IRUSR, priv->debug.debugfs_phy,
- priv, &fops_base_eeprom);
- debugfs_create_file("modal_eeprom", S_IRUSR, priv->debug.debugfs_phy,
- priv, &fops_modal_eeprom);
+
+ ath9k_cmn_debug_base_eeprom(priv->debug.debugfs_phy, priv->ah);
+ ath9k_cmn_debug_modal_eeprom(priv->debug.debugfs_phy, priv->ah);
return 0;
}
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
index 289f3d8924b5..bb86eb2ffc95 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
@@ -996,8 +996,6 @@ static bool ath9k_rx_prepare(struct ath9k_htc_priv *priv,
goto rx_next;
}
- ath9k_htc_err_stat_rx(priv, rxstatus);
-
/* Get the RX status information */
memset(rx_status, 0, sizeof(struct ieee80211_rx_status));
@@ -1005,6 +1003,7 @@ static bool ath9k_rx_prepare(struct ath9k_htc_priv *priv,
/* Copy everything from ath_htc_rx_status (HTC_RX_FRAME_HEADER).
* After this, we can drop this part of skb. */
rx_status_htc_to_ath(&rx_stats, rxstatus);
+ ath9k_htc_err_stat_rx(priv, &rx_stats);
rx_status->mactime = be64_to_cpu(rxstatus->rs_tstamp);
skb_pull(skb, HTC_RX_FRAME_HEADER_SIZE);
diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c
index c8a9dfab1fee..2a8ed8375ec0 100644
--- a/drivers/net/wireless/ath/ath9k/hw.c
+++ b/drivers/net/wireless/ath/ath9k/hw.c
@@ -26,7 +26,6 @@
#include "ar9003_mac.h"
#include "ar9003_mci.h"
#include "ar9003_phy.h"
-#include "debug.h"
#include "ath9k.h"
static bool ath9k_hw_set_reset_reg(struct ath_hw *ah, u32 type);
@@ -246,6 +245,8 @@ static void ath9k_hw_read_revisions(struct ath_hw *ah)
return;
case AR9300_DEVID_AR953X:
ah->hw_version.macVersion = AR_SREV_VERSION_9531;
+ if (ah->get_mac_revision)
+ ah->hw_version.macRev = ah->get_mac_revision();
return;
}
diff --git a/drivers/net/wireless/ath/ath9k/init.c b/drivers/net/wireless/ath/ath9k/init.c
index 36ae6490e554..0246b990fe87 100644
--- a/drivers/net/wireless/ath/ath9k/init.c
+++ b/drivers/net/wireless/ath/ath9k/init.c
@@ -61,6 +61,10 @@ static int ath9k_ps_enable;
module_param_named(ps_enable, ath9k_ps_enable, int, 0444);
MODULE_PARM_DESC(ps_enable, "Enable WLAN PowerSave");
+static int ath9k_use_chanctx;
+module_param_named(use_chanctx, ath9k_use_chanctx, int, 0444);
+MODULE_PARM_DESC(use_chanctx, "Enable channel context for concurrency");
+
bool is_ath9k_unloaded;
#ifdef CONFIG_MAC80211_LEDS
@@ -508,7 +512,7 @@ static int ath9k_init_softc(u16 devid, struct ath_softc *sc,
sc->tx99_power = MAX_RATE_POWER + 1;
init_waitqueue_head(&sc->tx_wait);
- if (!pdata) {
+ if (!pdata || pdata->use_eeprom) {
ah->ah_flags |= AH_USE_EEPROM;
sc->sc_ah->led_pin = -1;
} else {
@@ -589,6 +593,9 @@ static int ath9k_init_softc(u16 devid, struct ath_softc *sc,
if (ret)
goto err_btcoex;
+ sc->p2p_ps_timer = ath_gen_timer_alloc(sc->sc_ah, ath9k_p2p_ps_timer,
+ NULL, sc, AR_FIRST_NDP_TIMER);
+
ath9k_cmn_init_crypto(sc->sc_ah);
ath9k_init_misc(sc);
ath_fill_led_pin(sc);
@@ -643,17 +650,20 @@ static void ath9k_init_txpower_limits(struct ath_softc *sc)
}
static const struct ieee80211_iface_limit if_limits[] = {
- { .max = 2048, .types = BIT(NL80211_IFTYPE_STATION) |
- BIT(NL80211_IFTYPE_P2P_CLIENT) |
- BIT(NL80211_IFTYPE_WDS) },
+ { .max = 2048, .types = BIT(NL80211_IFTYPE_STATION) },
{ .max = 8, .types =
#ifdef CONFIG_MAC80211_MESH
BIT(NL80211_IFTYPE_MESH_POINT) |
#endif
- BIT(NL80211_IFTYPE_AP) |
+ BIT(NL80211_IFTYPE_AP) },
+ { .max = 1, .types = BIT(NL80211_IFTYPE_P2P_CLIENT) |
BIT(NL80211_IFTYPE_P2P_GO) },
};
+static const struct ieee80211_iface_limit wds_limits[] = {
+ { .max = 2048, .types = BIT(NL80211_IFTYPE_WDS) },
+};
+
static const struct ieee80211_iface_limit if_dfs_limits[] = {
{ .max = 1, .types = BIT(NL80211_IFTYPE_AP) |
#ifdef CONFIG_MAC80211_MESH
@@ -670,6 +680,13 @@ static const struct ieee80211_iface_combination if_comb[] = {
.num_different_channels = 1,
.beacon_int_infra_match = true,
},
+ {
+ .limits = wds_limits,
+ .n_limits = ARRAY_SIZE(wds_limits),
+ .max_interfaces = 2048,
+ .num_different_channels = 1,
+ .beacon_int_infra_match = true,
+ },
#ifdef CONFIG_ATH9K_DFS_CERTIFIED
{
.limits = if_dfs_limits,
@@ -711,19 +728,23 @@ static void ath9k_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw)
if (AR_SREV_9160_10_OR_LATER(sc->sc_ah) || ath9k_modparam_nohwcrypt)
hw->flags |= IEEE80211_HW_MFP_CAPABLE;
- hw->wiphy->features |= NL80211_FEATURE_ACTIVE_MONITOR;
+ hw->wiphy->features |= (NL80211_FEATURE_ACTIVE_MONITOR |
+ NL80211_FEATURE_AP_MODE_CHAN_WIDTH_CHANGE);
if (!config_enabled(CONFIG_ATH9K_TX99)) {
hw->wiphy->interface_modes =
BIT(NL80211_IFTYPE_P2P_GO) |
BIT(NL80211_IFTYPE_P2P_CLIENT) |
BIT(NL80211_IFTYPE_AP) |
- BIT(NL80211_IFTYPE_WDS) |
BIT(NL80211_IFTYPE_STATION) |
BIT(NL80211_IFTYPE_ADHOC) |
BIT(NL80211_IFTYPE_MESH_POINT);
hw->wiphy->iface_combinations = if_comb;
- hw->wiphy->n_iface_combinations = ARRAY_SIZE(if_comb);
+ if (!ath9k_use_chanctx) {
+ hw->wiphy->n_iface_combinations = ARRAY_SIZE(if_comb);
+ hw->wiphy->interface_modes |= BIT(NL80211_IFTYPE_WDS);
+ } else
+ hw->wiphy->n_iface_combinations = 1;
}
hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
@@ -855,6 +876,9 @@ static void ath9k_deinit_softc(struct ath_softc *sc)
{
int i = 0;
+ if (sc->p2p_ps_timer)
+ ath_gen_timer_free(sc->sc_ah, sc->p2p_ps_timer);
+
ath9k_deinit_btcoex(sc);
for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++)
diff --git a/drivers/net/wireless/ath/ath9k/mac.c b/drivers/net/wireless/ath/ath9k/mac.c
index 51ce36f108f9..275205ab5f15 100644
--- a/drivers/net/wireless/ath/ath9k/mac.c
+++ b/drivers/net/wireless/ath/ath9k/mac.c
@@ -958,3 +958,25 @@ void ath9k_hw_set_interrupts(struct ath_hw *ah)
return;
}
EXPORT_SYMBOL(ath9k_hw_set_interrupts);
+
+#define ATH9K_HW_MAX_DCU 10
+#define ATH9K_HW_SLICE_PER_DCU 16
+#define ATH9K_HW_BIT_IN_SLICE 16
+void ath9k_hw_set_tx_filter(struct ath_hw *ah, u8 destidx, bool set)
+{
+ int dcu_idx;
+ u32 filter;
+
+ for (dcu_idx = 0; dcu_idx < 10; dcu_idx++) {
+ filter = SM(set, AR_D_TXBLK_WRITE_COMMAND);
+ filter |= SM(dcu_idx, AR_D_TXBLK_WRITE_DCU);
+ filter |= SM((destidx / ATH9K_HW_SLICE_PER_DCU),
+ AR_D_TXBLK_WRITE_SLICE);
+ filter |= BIT(destidx % ATH9K_HW_BIT_IN_SLICE);
+ ath_dbg(ath9k_hw_common(ah), PS,
+ "DCU%d staid %d set %d txfilter %08x\n",
+ dcu_idx, destidx, set, filter);
+ REG_WRITE(ah, AR_D_TXBLK_BASE, filter);
+ }
+}
+EXPORT_SYMBOL(ath9k_hw_set_tx_filter);
diff --git a/drivers/net/wireless/ath/ath9k/mac.h b/drivers/net/wireless/ath/ath9k/mac.h
index 89df634e81f9..da7686757535 100644
--- a/drivers/net/wireless/ath/ath9k/mac.h
+++ b/drivers/net/wireless/ath/ath9k/mac.h
@@ -729,6 +729,7 @@ void ath9k_hw_startpcureceive(struct ath_hw *ah, bool is_scanning);
void ath9k_hw_abortpcurecv(struct ath_hw *ah);
bool ath9k_hw_stopdmarecv(struct ath_hw *ah, bool *reset);
int ath9k_hw_beaconq_setup(struct ath_hw *ah);
+void ath9k_hw_set_tx_filter(struct ath_hw *ah, u8 destidx, bool set);
/* Interrupt Handling */
bool ath9k_hw_intrpend(struct ath_hw *ah);
diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
index d69853b848ce..62ac95d6bb9d 100644
--- a/drivers/net/wireless/ath/ath9k/main.c
+++ b/drivers/net/wireless/ath/ath9k/main.c
@@ -261,6 +261,8 @@ static bool ath_complete_reset(struct ath_softc *sc, bool start)
sc->gtt_cnt = 0;
ieee80211_wake_queues(sc->hw);
+ ath9k_p2p_ps_timer(sc);
+
return true;
}
@@ -419,6 +421,7 @@ static void ath_node_attach(struct ath_softc *sc, struct ieee80211_sta *sta,
an->sc = sc;
an->sta = sta;
an->vif = vif;
+ memset(&an->key_idx, 0, sizeof(an->key_idx));
ath_tx_node_init(sc, an);
}
@@ -1119,6 +1122,8 @@ static int ath9k_add_interface(struct ieee80211_hw *hw,
if (ath9k_uses_beacons(vif->type))
ath9k_beacon_assign_slot(sc, vif);
+ avp->vif = vif;
+
an->sc = sc;
an->sta = NULL;
an->vif = vif;
@@ -1163,6 +1168,29 @@ static int ath9k_change_interface(struct ieee80211_hw *hw,
return 0;
}
+static void
+ath9k_update_p2p_ps_timer(struct ath_softc *sc, struct ath_vif *avp)
+{
+ struct ath_hw *ah = sc->sc_ah;
+ s32 tsf, target_tsf;
+
+ if (!avp || !avp->noa.has_next_tsf)
+ return;
+
+ ath9k_hw_gen_timer_stop(ah, sc->p2p_ps_timer);
+
+ tsf = ath9k_hw_gettsf32(sc->sc_ah);
+
+ target_tsf = avp->noa.next_tsf;
+ if (!avp->noa.absent)
+ target_tsf -= ATH_P2P_PS_STOP_TIME;
+
+ if (target_tsf - tsf < ATH_P2P_PS_STOP_TIME)
+ target_tsf = tsf + ATH_P2P_PS_STOP_TIME;
+
+ ath9k_hw_gen_timer_start(ah, sc->p2p_ps_timer, (u32) target_tsf, 1000000);
+}
+
static void ath9k_remove_interface(struct ieee80211_hw *hw,
struct ieee80211_vif *vif)
{
@@ -1174,6 +1202,13 @@ static void ath9k_remove_interface(struct ieee80211_hw *hw,
mutex_lock(&sc->mutex);
+ spin_lock_bh(&sc->sc_pcu_lock);
+ if (avp == sc->p2p_ps_vif) {
+ sc->p2p_ps_vif = NULL;
+ ath9k_update_p2p_ps_timer(sc, NULL);
+ }
+ spin_unlock_bh(&sc->sc_pcu_lock);
+
sc->nvifs--;
sc->tx99_vif = NULL;
@@ -1427,8 +1462,10 @@ static int ath9k_sta_add(struct ieee80211_hw *hw,
return 0;
key = ath_key_config(common, vif, sta, &ps_key);
- if (key > 0)
+ if (key > 0) {
an->ps_key = key;
+ an->key_idx[0] = key;
+ }
return 0;
}
@@ -1446,6 +1483,7 @@ static void ath9k_del_ps_key(struct ath_softc *sc,
ath_key_delete(common, &ps_key);
an->ps_key = 0;
+ an->key_idx[0] = 0;
}
static int ath9k_sta_remove(struct ieee80211_hw *hw,
@@ -1460,6 +1498,19 @@ static int ath9k_sta_remove(struct ieee80211_hw *hw,
return 0;
}
+static void ath9k_sta_set_tx_filter(struct ath_hw *ah,
+ struct ath_node *an,
+ bool set)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(an->key_idx); i++) {
+ if (!an->key_idx[i])
+ continue;
+ ath9k_hw_set_tx_filter(ah, an->key_idx[i], set);
+ }
+}
+
static void ath9k_sta_notify(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
enum sta_notify_cmd cmd,
@@ -1472,8 +1523,10 @@ static void ath9k_sta_notify(struct ieee80211_hw *hw,
case STA_NOTIFY_SLEEP:
an->sleeping = true;
ath_tx_aggr_sleep(sta, sc, an);
+ ath9k_sta_set_tx_filter(sc->sc_ah, an, true);
break;
case STA_NOTIFY_AWAKE:
+ ath9k_sta_set_tx_filter(sc->sc_ah, an, false);
an->sleeping = false;
ath_tx_aggr_wakeup(sc, an);
break;
@@ -1529,7 +1582,8 @@ static int ath9k_set_key(struct ieee80211_hw *hw,
{
struct ath_softc *sc = hw->priv;
struct ath_common *common = ath9k_hw_common(sc->sc_ah);
- int ret = 0;
+ struct ath_node *an = NULL;
+ int ret = 0, i;
if (ath9k_modparam_nohwcrypt)
return -ENOSPC;
@@ -1551,13 +1605,16 @@ static int ath9k_set_key(struct ieee80211_hw *hw,
mutex_lock(&sc->mutex);
ath9k_ps_wakeup(sc);
- ath_dbg(common, CONFIG, "Set HW Key\n");
+ ath_dbg(common, CONFIG, "Set HW Key %d\n", cmd);
+ if (sta)
+ an = (struct ath_node *)sta->drv_priv;
switch (cmd) {
case SET_KEY:
if (sta)
ath9k_del_ps_key(sc, vif, sta);
+ key->hw_key_idx = 0;
ret = ath_key_config(common, vif, sta, key);
if (ret >= 0) {
key->hw_key_idx = ret;
@@ -1570,9 +1627,27 @@ static int ath9k_set_key(struct ieee80211_hw *hw,
key->flags |= IEEE80211_KEY_FLAG_SW_MGMT_TX;
ret = 0;
}
+ if (an && key->hw_key_idx) {
+ for (i = 0; i < ARRAY_SIZE(an->key_idx); i++) {
+ if (an->key_idx[i])
+ continue;
+ an->key_idx[i] = key->hw_key_idx;
+ break;
+ }
+ WARN_ON(i == ARRAY_SIZE(an->key_idx));
+ }
break;
case DISABLE_KEY:
ath_key_delete(common, key);
+ if (an) {
+ for (i = 0; i < ARRAY_SIZE(an->key_idx); i++) {
+ if (an->key_idx[i] != key->hw_key_idx)
+ continue;
+ an->key_idx[i] = 0;
+ break;
+ }
+ }
+ key->hw_key_idx = 0;
break;
default:
ret = -EINVAL;
@@ -1636,6 +1711,66 @@ static void ath9k_bss_assoc_iter(void *data, u8 *mac, struct ieee80211_vif *vif)
ath9k_set_assoc_state(sc, vif);
}
+void ath9k_p2p_ps_timer(void *priv)
+{
+ struct ath_softc *sc = priv;
+ struct ath_vif *avp = sc->p2p_ps_vif;
+ struct ieee80211_vif *vif;
+ struct ieee80211_sta *sta;
+ struct ath_node *an;
+ u32 tsf;
+
+ if (!avp)
+ return;
+
+ tsf = ath9k_hw_gettsf32(sc->sc_ah);
+ if (!avp->noa.absent)
+ tsf += ATH_P2P_PS_STOP_TIME;
+
+ if (!avp->noa.has_next_tsf ||
+ avp->noa.next_tsf - tsf > BIT(31))
+ ieee80211_update_p2p_noa(&avp->noa, tsf);
+
+ ath9k_update_p2p_ps_timer(sc, avp);
+
+ rcu_read_lock();
+
+ vif = avp->vif;
+ sta = ieee80211_find_sta(vif, vif->bss_conf.bssid);
+ if (!sta)
+ goto out;
+
+ an = (void *) sta->drv_priv;
+ if (an->sleeping == !!avp->noa.absent)
+ goto out;
+
+ an->sleeping = avp->noa.absent;
+ if (an->sleeping)
+ ath_tx_aggr_sleep(sta, sc, an);
+ else
+ ath_tx_aggr_wakeup(sc, an);
+
+out:
+ rcu_read_unlock();
+}
+
+void ath9k_update_p2p_ps(struct ath_softc *sc, struct ieee80211_vif *vif)
+{
+ struct ath_vif *avp = (void *)vif->drv_priv;
+ u32 tsf;
+
+ if (!sc->p2p_ps_timer)
+ return;
+
+ if (vif->type != NL80211_IFTYPE_STATION || !vif->p2p)
+ return;
+
+ sc->p2p_ps_vif = avp;
+ tsf = ath9k_hw_gettsf32(sc->sc_ah);
+ ieee80211_parse_p2p_noa(&vif->bss_conf.p2p_noa_attr, &avp->noa, tsf);
+ ath9k_update_p2p_ps_timer(sc, avp);
+}
+
static void ath9k_bss_info_changed(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_bss_conf *bss_conf,
@@ -1650,6 +1785,7 @@ static void ath9k_bss_info_changed(struct ieee80211_hw *hw,
struct ath_hw *ah = sc->sc_ah;
struct ath_common *common = ath9k_hw_common(ah);
struct ath_vif *avp = (void *)vif->drv_priv;
+ unsigned long flags;
int slottime;
ath9k_ps_wakeup(sc);
@@ -1710,6 +1846,15 @@ static void ath9k_bss_info_changed(struct ieee80211_hw *hw,
}
}
+ if (changed & BSS_CHANGED_P2P_PS) {
+ spin_lock_bh(&sc->sc_pcu_lock);
+ spin_lock_irqsave(&sc->sc_pm_lock, flags);
+ if (!(sc->ps_flags & PS_BEACON_SYNC))
+ ath9k_update_p2p_ps(sc, vif);
+ spin_unlock_irqrestore(&sc->sc_pm_lock, flags);
+ spin_unlock_bh(&sc->sc_pcu_lock);
+ }
+
if (changed & CHECK_ANI)
ath_check_ani(sc);
@@ -1883,7 +2028,8 @@ static bool ath9k_has_tx_pending(struct ath_softc *sc)
return !!npend;
}
-static void ath9k_flush(struct ieee80211_hw *hw, u32 queues, bool drop)
+static void ath9k_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ u32 queues, bool drop)
{
struct ath_softc *sc = hw->priv;
struct ath_hw *ah = sc->sc_ah;
@@ -2084,14 +2230,6 @@ static void ath9k_sw_scan_complete(struct ieee80211_hw *hw)
clear_bit(ATH_OP_SCANNING, &common->op_flags);
}
-static void ath9k_channel_switch_beacon(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif,
- struct cfg80211_chan_def *chandef)
-{
- /* depend on vif->csa_active only */
- return;
-}
-
struct ieee80211_ops ath9k_ops = {
.tx = ath9k_tx,
.start = ath9k_start,
@@ -2139,5 +2277,4 @@ struct ieee80211_ops ath9k_ops = {
#endif
.sw_scan_start = ath9k_sw_scan_start,
.sw_scan_complete = ath9k_sw_scan_complete,
- .channel_switch_beacon = ath9k_channel_switch_beacon,
};
diff --git a/drivers/net/wireless/ath/ath9k/pci.c b/drivers/net/wireless/ath/ath9k/pci.c
index 914dbc6b1720..4dec09e565ed 100644
--- a/drivers/net/wireless/ath/ath9k/pci.c
+++ b/drivers/net/wireless/ath/ath9k/pci.c
@@ -686,7 +686,7 @@ static bool ath_pci_eeprom_read(struct ath_common *common, u32 off, u16 *data)
struct ath_softc *sc = (struct ath_softc *) common->priv;
struct ath9k_platform_data *pdata = sc->dev->platform_data;
- if (pdata) {
+ if (pdata && !pdata->use_eeprom) {
if (off >= (ARRAY_SIZE(pdata->eeprom_data))) {
ath_err(common,
"%s: eeprom read failed, offset %08x is out of range\n",
@@ -914,6 +914,7 @@ static int ath_pci_suspend(struct device *device)
*/
ath9k_stop_btcoex(sc);
ath9k_hw_disable(sc->sc_ah);
+ del_timer_sync(&sc->sleep_timer);
ath9k_hw_setpower(sc->sc_ah, ATH9K_PM_FULL_SLEEP);
return 0;
diff --git a/drivers/net/wireless/ath/ath9k/recv.c b/drivers/net/wireless/ath/ath9k/recv.c
index 19df969ec909..9105a92364f7 100644
--- a/drivers/net/wireless/ath/ath9k/recv.c
+++ b/drivers/net/wireless/ath/ath9k/recv.c
@@ -34,7 +34,8 @@ static inline bool ath9k_check_auto_sleep(struct ath_softc *sc)
* buffer (or rx fifo). This can incorrectly acknowledge packets
* to a sender if last desc is self-linked.
*/
-static void ath_rx_buf_link(struct ath_softc *sc, struct ath_rxbuf *bf)
+static void ath_rx_buf_link(struct ath_softc *sc, struct ath_rxbuf *bf,
+ bool flush)
{
struct ath_hw *ah = sc->sc_ah;
struct ath_common *common = ath9k_hw_common(ah);
@@ -59,18 +60,19 @@ static void ath_rx_buf_link(struct ath_softc *sc, struct ath_rxbuf *bf)
common->rx_bufsize,
0);
- if (sc->rx.rxlink == NULL)
- ath9k_hw_putrxbuf(ah, bf->bf_daddr);
- else
+ if (sc->rx.rxlink)
*sc->rx.rxlink = bf->bf_daddr;
+ else if (!flush)
+ ath9k_hw_putrxbuf(ah, bf->bf_daddr);
sc->rx.rxlink = &ds->ds_link;
}
-static void ath_rx_buf_relink(struct ath_softc *sc, struct ath_rxbuf *bf)
+static void ath_rx_buf_relink(struct ath_softc *sc, struct ath_rxbuf *bf,
+ bool flush)
{
if (sc->rx.buf_hold)
- ath_rx_buf_link(sc, sc->rx.buf_hold);
+ ath_rx_buf_link(sc, sc->rx.buf_hold, flush);
sc->rx.buf_hold = bf;
}
@@ -442,7 +444,7 @@ int ath_startrecv(struct ath_softc *sc)
sc->rx.buf_hold = NULL;
sc->rx.rxlink = NULL;
list_for_each_entry_safe(bf, tbf, &sc->rx.rxbuf, list) {
- ath_rx_buf_link(sc, bf);
+ ath_rx_buf_link(sc, bf, false);
}
/* We could have deleted elements so the list may be empty now */
@@ -538,7 +540,10 @@ static void ath_rx_ps_beacon(struct ath_softc *sc, struct sk_buff *skb)
sc->ps_flags &= ~PS_BEACON_SYNC;
ath_dbg(common, PS,
"Reconfigure beacon timers based on synchronized timestamp\n");
- ath9k_set_beacon(sc);
+ if (!(WARN_ON_ONCE(sc->cur_beacon_conf.beacon_interval == 0)))
+ ath9k_set_beacon(sc);
+ if (sc->p2p_ps_vif)
+ ath9k_update_p2p_ps(sc, sc->p2p_ps_vif->vif);
}
if (ath_beacon_dtim_pending_cab(skb)) {
@@ -1115,12 +1120,12 @@ requeue_drop_frag:
requeue:
list_add_tail(&bf->list, &sc->rx.rxbuf);
- if (edma) {
- ath_rx_edma_buf_link(sc, qtype);
- } else {
- ath_rx_buf_relink(sc, bf);
+ if (!edma) {
+ ath_rx_buf_relink(sc, bf, flush);
if (!flush)
ath9k_hw_rxena(ah);
+ } else if (!flush) {
+ ath_rx_edma_buf_link(sc, qtype);
}
if (!budget--)
diff --git a/drivers/net/wireless/ath/ath9k/reg.h b/drivers/net/wireless/ath/ath9k/reg.h
index b1fd3fa84983..f1bbce3f7774 100644
--- a/drivers/net/wireless/ath/ath9k/reg.h
+++ b/drivers/net/wireless/ath/ath9k/reg.h
@@ -505,9 +505,6 @@
#define AR_D_QCUMASK 0x000003FF
#define AR_D_QCUMASK_RESV0 0xFFFFFC00
-#define AR_D_TXBLK_CMD 0x1038
-#define AR_D_TXBLK_DATA(i) (AR_D_TXBLK_CMD+(i))
-
#define AR_D0_LCL_IFS 0x1040
#define AR_D1_LCL_IFS 0x1044
#define AR_D2_LCL_IFS 0x1048
diff --git a/drivers/net/wireless/ath/carl9170/main.c b/drivers/net/wireless/ath/carl9170/main.c
index 4c8cdb097b65..f8ded84b7be8 100644
--- a/drivers/net/wireless/ath/carl9170/main.c
+++ b/drivers/net/wireless/ath/carl9170/main.c
@@ -1707,7 +1707,9 @@ found:
return 0;
}
-static void carl9170_op_flush(struct ieee80211_hw *hw, u32 queues, bool drop)
+static void carl9170_op_flush(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ u32 queues, bool drop)
{
struct ar9170 *ar = hw->priv;
unsigned int vid;
diff --git a/drivers/net/wireless/ath/carl9170/usb.c b/drivers/net/wireless/ath/carl9170/usb.c
index ca115f33746f..f35c7f30f9a6 100644
--- a/drivers/net/wireless/ath/carl9170/usb.c
+++ b/drivers/net/wireless/ath/carl9170/usb.c
@@ -1076,8 +1076,14 @@ static int carl9170_usb_probe(struct usb_interface *intf,
carl9170_set_state(ar, CARL9170_STOPPED);
- return request_firmware_nowait(THIS_MODULE, 1, CARL9170FW_NAME,
+ err = request_firmware_nowait(THIS_MODULE, 1, CARL9170FW_NAME,
&ar->udev->dev, GFP_KERNEL, ar, carl9170_usb_firmware_step2);
+ if (err) {
+ usb_put_dev(udev);
+ usb_put_dev(udev);
+ carl9170_free(ar);
+ }
+ return err;
}
static void carl9170_usb_disconnect(struct usb_interface *intf)
diff --git a/drivers/net/wireless/ath/dfs_pattern_detector.c b/drivers/net/wireless/ath/dfs_pattern_detector.c
index a1a69c5db409..650be79c7ac9 100644
--- a/drivers/net/wireless/ath/dfs_pattern_detector.c
+++ b/drivers/net/wireless/ath/dfs_pattern_detector.c
@@ -73,9 +73,52 @@ static const struct radar_types etsi_radar_types_v15 = {
.radar_types = etsi_radar_ref_types_v15,
};
-/* for now, we support ETSI radar types, FCC and JP are TODO */
+#define FCC_PATTERN(ID, WMIN, WMAX, PMIN, PMAX, PRF, PPB) \
+{ \
+ ID, WIDTH_LOWER(WMIN), WIDTH_UPPER(WMAX), \
+ PMIN - PRI_TOLERANCE, \
+ PMAX * PRF + PRI_TOLERANCE, PRF, PPB * PRF, \
+ PPB_THRESH(PPB), PRI_TOLERANCE, \
+}
+
+static const struct radar_detector_specs fcc_radar_ref_types[] = {
+ FCC_PATTERN(0, 0, 1, 1428, 1428, 1, 18),
+ FCC_PATTERN(1, 0, 5, 150, 230, 1, 23),
+ FCC_PATTERN(2, 6, 10, 200, 500, 1, 16),
+ FCC_PATTERN(3, 11, 20, 200, 500, 1, 12),
+ FCC_PATTERN(4, 50, 100, 1000, 2000, 20, 1),
+ FCC_PATTERN(5, 0, 1, 333, 333, 1, 9),
+};
+
+static const struct radar_types fcc_radar_types = {
+ .region = NL80211_DFS_FCC,
+ .num_radar_types = ARRAY_SIZE(fcc_radar_ref_types),
+ .radar_types = fcc_radar_ref_types,
+};
+
+#define JP_PATTERN FCC_PATTERN
+static const struct radar_detector_specs jp_radar_ref_types[] = {
+ JP_PATTERN(0, 0, 1, 1428, 1428, 1, 18),
+ JP_PATTERN(1, 2, 3, 3846, 3846, 1, 18),
+ JP_PATTERN(2, 0, 1, 1388, 1388, 1, 18),
+ JP_PATTERN(3, 1, 2, 4000, 4000, 1, 18),
+ JP_PATTERN(4, 0, 5, 150, 230, 1, 23),
+ JP_PATTERN(5, 6, 10, 200, 500, 1, 16),
+ JP_PATTERN(6, 11, 20, 200, 500, 1, 12),
+ JP_PATTERN(7, 50, 100, 1000, 2000, 20, 1),
+ JP_PATTERN(5, 0, 1, 333, 333, 1, 9),
+};
+
+static const struct radar_types jp_radar_types = {
+ .region = NL80211_DFS_JP,
+ .num_radar_types = ARRAY_SIZE(jp_radar_ref_types),
+ .radar_types = jp_radar_ref_types,
+};
+
static const struct radar_types *dfs_domains[] = {
&etsi_radar_types_v15,
+ &fcc_radar_types,
+ &jp_radar_types,
};
/**
diff --git a/drivers/net/wireless/ath/wcn36xx/smd.c b/drivers/net/wireless/ath/wcn36xx/smd.c
index 7bf0ef8a1f56..63986931829e 100644
--- a/drivers/net/wireless/ath/wcn36xx/smd.c
+++ b/drivers/net/wireless/ath/wcn36xx/smd.c
@@ -2068,7 +2068,7 @@ static void wcn36xx_smd_rsp_process(struct wcn36xx *wcn, void *buf, size_t len)
if (!msg_ind)
goto nomem;
msg_ind->msg_len = len;
- msg_ind->msg = kmalloc(len, GFP_KERNEL);
+ msg_ind->msg = kmemdup(buf, len, GFP_KERNEL);
if (!msg_ind->msg) {
kfree(msg_ind);
nomem:
@@ -2080,7 +2080,6 @@ nomem:
msg_header->msg_type);
break;
}
- memcpy(msg_ind->msg, buf, len);
mutex_lock(&wcn->hal_ind_mutex);
list_add_tail(&msg_ind->list, &wcn->hal_ind_queue);
queue_work(wcn->hal_ind_wq, &wcn->hal_ind_work);
diff --git a/drivers/net/wireless/ath/wil6210/cfg80211.c b/drivers/net/wireless/ath/wil6210/cfg80211.c
index 4806a49cb61b..820d4ebd9322 100644
--- a/drivers/net/wireless/ath/wil6210/cfg80211.c
+++ b/drivers/net/wireless/ath/wil6210/cfg80211.c
@@ -172,7 +172,7 @@ static int wil_cid_fill_sinfo(struct wil6210_priv *wil, int cid,
static int wil_cfg80211_get_station(struct wiphy *wiphy,
struct net_device *ndev,
- u8 *mac, struct station_info *sinfo)
+ const u8 *mac, struct station_info *sinfo)
{
struct wil6210_priv *wil = wiphy_to_wil(wiphy);
int rc;
@@ -288,6 +288,7 @@ static int wil_cfg80211_scan(struct wiphy *wiphy,
}
wil->scan_request = request;
+ mod_timer(&wil->scan_timer, jiffies + WIL6210_SCAN_TO);
memset(&cmd, 0, sizeof(cmd));
cmd.cmd.num_channels = 0;
@@ -671,7 +672,7 @@ static int wil_cfg80211_stop_ap(struct wiphy *wiphy,
}
static int wil_cfg80211_del_station(struct wiphy *wiphy,
- struct net_device *dev, u8 *mac)
+ struct net_device *dev, const u8 *mac)
{
struct wil6210_priv *wil = wiphy_to_wil(wiphy);
diff --git a/drivers/net/wireless/ath/wil6210/debugfs.c b/drivers/net/wireless/ath/wil6210/debugfs.c
index ecdabe4adec3..8d4bc4bfb664 100644
--- a/drivers/net/wireless/ath/wil6210/debugfs.c
+++ b/drivers/net/wireless/ath/wil6210/debugfs.c
@@ -35,7 +35,7 @@ static void wil_print_vring(struct seq_file *s, struct wil6210_priv *wil,
void __iomem *x = wmi_addr(wil, vring->hwtail);
seq_printf(s, "VRING %s = {\n", name);
- seq_printf(s, " pa = 0x%016llx\n", (unsigned long long)vring->pa);
+ seq_printf(s, " pa = %pad\n", &vring->pa);
seq_printf(s, " va = 0x%p\n", vring->va);
seq_printf(s, " size = %d\n", vring->size);
seq_printf(s, " swtail = %d\n", vring->swtail);
@@ -473,7 +473,7 @@ static int wil_txdesc_debugfs_show(struct seq_file *s, void *data)
u[0], u[1], u[2], u[3]);
seq_printf(s, " DMA = 0x%08x 0x%08x 0x%08x 0x%08x\n",
u[4], u[5], u[6], u[7]);
- seq_printf(s, " SKB = %p\n", skb);
+ seq_printf(s, " SKB = 0x%p\n", skb);
if (skb) {
skb_get(skb);
diff --git a/drivers/net/wireless/ath/wil6210/interrupt.c b/drivers/net/wireless/ath/wil6210/interrupt.c
index 5824cd41e4ba..73593aa3cd98 100644
--- a/drivers/net/wireless/ath/wil6210/interrupt.c
+++ b/drivers/net/wireless/ath/wil6210/interrupt.c
@@ -338,7 +338,7 @@ static irqreturn_t wil6210_irq_misc_thread(int irq, void *cookie)
}
if (isr)
- wil_err(wil, "un-handled MISC ISR bits 0x%08x\n", isr);
+ wil_dbg_irq(wil, "un-handled MISC ISR bits 0x%08x\n", isr);
wil->isr_misc = 0;
diff --git a/drivers/net/wireless/ath/wil6210/main.c b/drivers/net/wireless/ath/wil6210/main.c
index 95f4efe9ef37..11e6d9d22eae 100644
--- a/drivers/net/wireless/ath/wil6210/main.c
+++ b/drivers/net/wireless/ath/wil6210/main.c
@@ -81,7 +81,7 @@ static void wil_disconnect_cid(struct wil6210_priv *wil, int cid)
memset(&sta->stats, 0, sizeof(sta->stats));
}
-static void _wil6210_disconnect(struct wil6210_priv *wil, void *bssid)
+static void _wil6210_disconnect(struct wil6210_priv *wil, const u8 *bssid)
{
int cid = -ENOENT;
struct net_device *ndev = wil_to_ndev(wil);
@@ -150,6 +150,15 @@ static void wil_connect_timer_fn(ulong x)
schedule_work(&wil->disconnect_worker);
}
+static void wil_scan_timer_fn(ulong x)
+{
+ struct wil6210_priv *wil = (void *)x;
+
+ clear_bit(wil_status_fwready, &wil->status);
+ wil_err(wil, "Scan timeout detected, start fw error recovery\n");
+ schedule_work(&wil->fw_error_worker);
+}
+
static void wil_fw_error_worker(struct work_struct *work)
{
struct wil6210_priv *wil = container_of(work,
@@ -161,12 +170,30 @@ static void wil_fw_error_worker(struct work_struct *work)
if (no_fw_recovery)
return;
+ /* increment @recovery_count if less then WIL6210_FW_RECOVERY_TO
+ * passed since last recovery attempt
+ */
+ if (time_is_after_jiffies(wil->last_fw_recovery +
+ WIL6210_FW_RECOVERY_TO))
+ wil->recovery_count++;
+ else
+ wil->recovery_count = 1; /* fw was alive for a long time */
+
+ if (wil->recovery_count > WIL6210_FW_RECOVERY_RETRIES) {
+ wil_err(wil, "too many recovery attempts (%d), giving up\n",
+ wil->recovery_count);
+ return;
+ }
+
+ wil->last_fw_recovery = jiffies;
+
mutex_lock(&wil->mutex);
switch (wdev->iftype) {
case NL80211_IFTYPE_STATION:
case NL80211_IFTYPE_P2P_CLIENT:
case NL80211_IFTYPE_MONITOR:
- wil_info(wil, "fw error recovery started...\n");
+ wil_info(wil, "fw error recovery started (try %d)...\n",
+ wil->recovery_count);
wil_reset(wil);
/* need to re-allocate Rx ring after reset */
@@ -230,6 +257,7 @@ int wil_priv_init(struct wil6210_priv *wil)
wil->pending_connect_cid = -1;
setup_timer(&wil->connect_timer, wil_connect_timer_fn, (ulong)wil);
+ setup_timer(&wil->scan_timer, wil_scan_timer_fn, (ulong)wil);
INIT_WORK(&wil->connect_worker, wil_connect_worker);
INIT_WORK(&wil->disconnect_worker, wil_disconnect_worker);
@@ -249,10 +277,12 @@ int wil_priv_init(struct wil6210_priv *wil)
return -EAGAIN;
}
+ wil->last_fw_recovery = jiffies;
+
return 0;
}
-void wil6210_disconnect(struct wil6210_priv *wil, void *bssid)
+void wil6210_disconnect(struct wil6210_priv *wil, const u8 *bssid)
{
del_timer_sync(&wil->connect_timer);
_wil6210_disconnect(wil, bssid);
@@ -260,6 +290,7 @@ void wil6210_disconnect(struct wil6210_priv *wil, void *bssid)
void wil_priv_deinit(struct wil6210_priv *wil)
{
+ del_timer_sync(&wil->scan_timer);
cancel_work_sync(&wil->disconnect_worker);
cancel_work_sync(&wil->fw_error_worker);
mutex_lock(&wil->mutex);
@@ -363,8 +394,8 @@ static int wil_wait_for_fw_ready(struct wil6210_priv *wil)
wil_err(wil, "Firmware not ready\n");
return -ETIME;
} else {
- wil_dbg_misc(wil, "FW ready after %d ms\n",
- jiffies_to_msecs(to-left));
+ wil_info(wil, "FW ready after %d ms. HW version 0x%08x\n",
+ jiffies_to_msecs(to-left), wil->hw_version);
}
return 0;
}
@@ -391,6 +422,7 @@ int wil_reset(struct wil6210_priv *wil)
if (wil->scan_request) {
wil_dbg_misc(wil, "Abort scan_request 0x%p\n",
wil->scan_request);
+ del_timer_sync(&wil->scan_timer);
cfg80211_scan_done(wil->scan_request, true);
wil->scan_request = NULL;
}
@@ -520,6 +552,7 @@ static int __wil_down(struct wil6210_priv *wil)
napi_disable(&wil->napi_tx);
if (wil->scan_request) {
+ del_timer_sync(&wil->scan_timer);
cfg80211_scan_done(wil->scan_request, true);
wil->scan_request = NULL;
}
diff --git a/drivers/net/wireless/ath/wil6210/netdev.c b/drivers/net/wireless/ath/wil6210/netdev.c
index fdcaeb820e75..106b6dcb773a 100644
--- a/drivers/net/wireless/ath/wil6210/netdev.c
+++ b/drivers/net/wireless/ath/wil6210/netdev.c
@@ -32,12 +32,26 @@ static int wil_stop(struct net_device *ndev)
return wil_down(wil);
}
+static int wil_change_mtu(struct net_device *ndev, int new_mtu)
+{
+ struct wil6210_priv *wil = ndev_to_wil(ndev);
+
+ if (new_mtu < 68 || new_mtu > IEEE80211_MAX_DATA_LEN_DMG)
+ return -EINVAL;
+
+ wil_dbg_misc(wil, "change MTU %d -> %d\n", ndev->mtu, new_mtu);
+ ndev->mtu = new_mtu;
+
+ return 0;
+}
+
static const struct net_device_ops wil_netdev_ops = {
.ndo_open = wil_open,
.ndo_stop = wil_stop,
.ndo_start_xmit = wil_start_xmit,
.ndo_set_mac_address = eth_mac_addr,
.ndo_validate_addr = eth_validate_addr,
+ .ndo_change_mtu = wil_change_mtu,
};
static int wil6210_netdev_poll_rx(struct napi_struct *napi, int budget)
diff --git a/drivers/net/wireless/ath/wil6210/pcie_bus.c b/drivers/net/wireless/ath/wil6210/pcie_bus.c
index f1e1bb338d68..1e2e07b9d13d 100644
--- a/drivers/net/wireless/ath/wil6210/pcie_bus.c
+++ b/drivers/net/wireless/ath/wil6210/pcie_bus.c
@@ -74,8 +74,6 @@ static int wil_if_pcie_enable(struct wil6210_priv *wil)
if (rc)
goto release_irq;
- wil_info(wil, "HW version: 0x%08x\n", wil->hw_version);
-
return 0;
release_irq:
@@ -140,7 +138,7 @@ static int wil_pcie_probe(struct pci_dev *pdev, const struct pci_device_id *id)
goto err_release_reg;
}
/* rollback to err_iounmap */
- dev_info(&pdev->dev, "CSR at %pR -> %p\n", &pdev->resource[0], csr);
+ dev_info(&pdev->dev, "CSR at %pR -> 0x%p\n", &pdev->resource[0], csr);
wil = wil_if_alloc(dev, csr);
if (IS_ERR(wil)) {
diff --git a/drivers/net/wireless/ath/wil6210/rx_reorder.c b/drivers/net/wireless/ath/wil6210/rx_reorder.c
index d04629fe053f..747ae1275877 100644
--- a/drivers/net/wireless/ath/wil6210/rx_reorder.c
+++ b/drivers/net/wireless/ath/wil6210/rx_reorder.c
@@ -49,10 +49,17 @@ static void wil_release_reorder_frames(struct wil6210_priv *wil,
{
int index;
- while (seq_less(r->head_seq_num, hseq)) {
+ /* note: this function is never called with
+ * hseq preceding r->head_seq_num, i.e it is always true
+ * !seq_less(hseq, r->head_seq_num)
+ * and thus on loop exit it should be
+ * r->head_seq_num == hseq
+ */
+ while (seq_less(r->head_seq_num, hseq) && r->stored_mpdu_num) {
index = reorder_index(r, r->head_seq_num);
wil_release_reorder_frame(wil, r, index);
}
+ r->head_seq_num = hseq;
}
static void wil_reorder_release(struct wil6210_priv *wil,
@@ -91,6 +98,22 @@ void wil_rx_reorder(struct wil6210_priv *wil, struct sk_buff *skb)
spin_lock(&r->reorder_lock);
+ /** Due to the race between WMI events, where BACK establishment
+ * reported, and data Rx, few packets may be pass up before reorder
+ * buffer get allocated. Catch up by pretending SSN is what we
+ * see in the 1-st Rx packet
+ */
+ if (r->first_time) {
+ r->first_time = false;
+ if (seq != r->head_seq_num) {
+ wil_err(wil, "Error: 1-st frame with wrong sequence"
+ " %d, should be %d. Fixing...\n", seq,
+ r->head_seq_num);
+ r->head_seq_num = seq;
+ r->ssn = seq;
+ }
+ }
+
/* frame with out of date sequence number */
if (seq_less(seq, r->head_seq_num)) {
dev_kfree_skb(skb);
@@ -162,6 +185,7 @@ struct wil_tid_ampdu_rx *wil_tid_ampdu_rx_alloc(struct wil6210_priv *wil,
r->head_seq_num = ssn;
r->buf_size = size;
r->stored_mpdu_num = 0;
+ r->first_time = true;
return r;
}
diff --git a/drivers/net/wireless/ath/wil6210/txrx.c b/drivers/net/wireless/ath/wil6210/txrx.c
index c8c547457eb4..0784ef3d4ce2 100644
--- a/drivers/net/wireless/ath/wil6210/txrx.c
+++ b/drivers/net/wireless/ath/wil6210/txrx.c
@@ -64,6 +64,22 @@ static inline int wil_vring_avail_tx(struct vring *vring)
return vring->size - used - 1;
}
+/**
+ * wil_vring_wmark_low - low watermark for available descriptor space
+ */
+static inline int wil_vring_wmark_low(struct vring *vring)
+{
+ return vring->size/8;
+}
+
+/**
+ * wil_vring_wmark_high - high watermark for available descriptor space
+ */
+static inline int wil_vring_wmark_high(struct vring *vring)
+{
+ return vring->size/4;
+}
+
static int wil_vring_alloc(struct wil6210_priv *wil, struct vring *vring)
{
struct device *dev = wil_to_dev(wil);
@@ -98,8 +114,8 @@ static int wil_vring_alloc(struct wil6210_priv *wil, struct vring *vring)
_d->dma.status = TX_DMA_STATUS_DU;
}
- wil_dbg_misc(wil, "vring[%d] 0x%p:0x%016llx 0x%p\n", vring->size,
- vring->va, (unsigned long long)vring->pa, vring->ctx);
+ wil_dbg_misc(wil, "vring[%d] 0x%p:%pad 0x%p\n", vring->size,
+ vring->va, &vring->pa, vring->ctx);
return 0;
}
@@ -880,8 +896,8 @@ static int wil_tx_vring(struct wil6210_priv *wil, struct vring *vring,
pa = dma_map_single(dev, skb->data,
skb_headlen(skb), DMA_TO_DEVICE);
- wil_dbg_txrx(wil, "Tx skb %d bytes %p -> %#08llx\n", skb_headlen(skb),
- skb->data, (unsigned long long)pa);
+ wil_dbg_txrx(wil, "Tx skb %d bytes 0x%p -> %pad\n", skb_headlen(skb),
+ skb->data, &pa);
wil_hex_dump_txrx("Tx ", DUMP_PREFIX_OFFSET, 16, 1,
skb->data, skb_headlen(skb), false);
@@ -1007,7 +1023,7 @@ netdev_tx_t wil_start_xmit(struct sk_buff *skb, struct net_device *ndev)
rc = wil_tx_vring(wil, vring, skb);
/* do we still have enough room in the vring? */
- if (wil_vring_avail_tx(vring) < vring->size/8)
+ if (wil_vring_avail_tx(vring) < wil_vring_wmark_low(vring))
netif_tx_stop_all_queues(wil_to_ndev(wil));
switch (rc) {
@@ -1116,7 +1132,7 @@ int wil_tx_complete(struct wil6210_priv *wil, int ringid)
done++;
}
}
- if (wil_vring_avail_tx(vring) > vring->size/4)
+ if (wil_vring_avail_tx(vring) > wil_vring_wmark_high(vring))
netif_tx_wake_all_queues(wil_to_ndev(wil));
return done;
diff --git a/drivers/net/wireless/ath/wil6210/wil6210.h b/drivers/net/wireless/ath/wil6210/wil6210.h
index 2a2dec75f026..e25edc52398f 100644
--- a/drivers/net/wireless/ath/wil6210/wil6210.h
+++ b/drivers/net/wireless/ath/wil6210/wil6210.h
@@ -35,11 +35,14 @@ static inline u32 WIL_GET_BITS(u32 x, int b0, int b1)
#define WIL6210_MEM_SIZE (2*1024*1024UL)
#define WIL6210_RX_RING_SIZE (128)
-#define WIL6210_TX_RING_SIZE (128)
+#define WIL6210_TX_RING_SIZE (512)
#define WIL6210_MAX_TX_RINGS (24) /* HW limit */
#define WIL6210_MAX_CID (8) /* HW limit */
#define WIL6210_NAPI_BUDGET (16) /* arbitrary */
#define WIL6210_ITR_TRSH (10000) /* arbitrary - about 15 IRQs/msec */
+#define WIL6210_FW_RECOVERY_RETRIES (5) /* try to recover this many times */
+#define WIL6210_FW_RECOVERY_TO msecs_to_jiffies(5000)
+#define WIL6210_SCAN_TO msecs_to_jiffies(10000)
/* Hardware definitions begin */
@@ -301,6 +304,7 @@ struct wil_tid_ampdu_rx {
u16 buf_size;
u16 timeout;
u8 dialog_token;
+ bool first_time; /* is it 1-st time this buffer used? */
};
struct wil6210_stats {
@@ -360,6 +364,8 @@ struct wil6210_priv {
u32 fw_version;
u32 hw_version;
u8 n_mids; /* number of additional MIDs as reported by FW */
+ int recovery_count; /* num of FW recovery attempts in a short time */
+ unsigned long last_fw_recovery; /* jiffies of last fw recovery */
/* profile */
u32 monitor_flags;
u32 secure_pcp; /* create secure PCP? */
@@ -381,6 +387,7 @@ struct wil6210_priv {
struct work_struct disconnect_worker;
struct work_struct fw_error_worker; /* for FW error recovery */
struct timer_list connect_timer;
+ struct timer_list scan_timer; /* detect scan timeout */
int pending_connect_cid;
struct list_head pending_wmi_ev;
/*
@@ -507,7 +514,7 @@ void wil_wdev_free(struct wil6210_priv *wil);
int wmi_set_mac_address(struct wil6210_priv *wil, void *addr);
int wmi_pcp_start(struct wil6210_priv *wil, int bi, u8 wmi_nettype, u8 chan);
int wmi_pcp_stop(struct wil6210_priv *wil);
-void wil6210_disconnect(struct wil6210_priv *wil, void *bssid);
+void wil6210_disconnect(struct wil6210_priv *wil, const u8 *bssid);
int wil_rx_init(struct wil6210_priv *wil);
void wil_rx_fini(struct wil6210_priv *wil);
diff --git a/drivers/net/wireless/ath/wil6210/wmi.c b/drivers/net/wireless/ath/wil6210/wmi.c
index 2ba56eef0c45..6cc0e182cc70 100644
--- a/drivers/net/wireless/ath/wil6210/wmi.c
+++ b/drivers/net/wireless/ath/wil6210/wmi.c
@@ -192,7 +192,7 @@ static int __wmi_send(struct wil6210_priv *wil, u16 cmdid, void *buf, u16 len)
might_sleep();
if (!test_bit(wil_status_fwready, &wil->status)) {
- wil_err(wil, "FW not ready\n");
+ wil_err(wil, "WMI: cannot send command while FW not ready\n");
return -EAGAIN;
}
@@ -276,8 +276,8 @@ static void wmi_evt_ready(struct wil6210_priv *wil, int id, void *d, int len)
wil->fw_version = le32_to_cpu(evt->sw_version);
wil->n_mids = evt->numof_additional_mids;
- wil_dbg_wmi(wil, "FW ver. %d; MAC %pM; %d MID's\n", wil->fw_version,
- evt->mac, wil->n_mids);
+ wil_info(wil, "FW ver. %d; MAC %pM; %d MID's\n", wil->fw_version,
+ evt->mac, wil->n_mids);
if (!is_valid_ether_addr(ndev->dev_addr)) {
memcpy(ndev->dev_addr, evt->mac, ETH_ALEN);
@@ -290,7 +290,7 @@ static void wmi_evt_ready(struct wil6210_priv *wil, int id, void *d, int len)
static void wmi_evt_fw_ready(struct wil6210_priv *wil, int id, void *d,
int len)
{
- wil_dbg_wmi(wil, "WMI: FW ready\n");
+ wil_dbg_wmi(wil, "WMI: got FW ready event\n");
set_bit(wil_status_fwready, &wil->status);
/* reuse wmi_ready for the firmware ready indication */
@@ -348,9 +348,10 @@ static void wmi_evt_scan_complete(struct wil6210_priv *wil, int id,
{
if (wil->scan_request) {
struct wmi_scan_complete_event *data = d;
- bool aborted = (data->status != 0);
+ bool aborted = (data->status != WMI_SCAN_SUCCESS);
wil_dbg_wmi(wil, "SCAN_COMPLETE(0x%08x)\n", data->status);
+ del_timer_sync(&wil->scan_timer);
cfg80211_scan_done(wil->scan_request, aborted);
wil->scan_request = NULL;
} else {
@@ -658,21 +659,27 @@ void wmi_recv_cmd(struct wil6210_priv *wil)
u8 *cmd;
void __iomem *src;
ulong flags;
+ unsigned n;
if (!test_bit(wil_status_reset_done, &wil->status)) {
wil_err(wil, "Reset not completed\n");
return;
}
- for (;;) {
+ for (n = 0;; n++) {
u16 len;
r->head = ioread32(wil->csr + HOST_MBOX +
offsetof(struct wil6210_mbox_ctl, rx.head));
- if (r->tail == r->head)
+ if (r->tail == r->head) {
+ if (n == 0)
+ wil_dbg_wmi(wil, "No events?\n");
return;
+ }
- /* read cmd from tail */
+ wil_dbg_wmi(wil, "Mbox head %08x tail %08x\n",
+ r->head, r->tail);
+ /* read cmd descriptor from tail */
wil_memcpy_fromio_32(&d_tail, wil->csr + HOSTADDR(r->tail),
sizeof(struct wil6210_mbox_ring_desc));
if (d_tail.sync == 0) {
@@ -680,13 +687,18 @@ void wmi_recv_cmd(struct wil6210_priv *wil)
return;
}
+ /* read cmd header from descriptor */
if (0 != wmi_read_hdr(wil, d_tail.addr, &hdr)) {
wil_err(wil, "Mbox evt at 0x%08x?\n",
le32_to_cpu(d_tail.addr));
return;
}
-
len = le16_to_cpu(hdr.len);
+ wil_dbg_wmi(wil, "Mbox evt %04x %04x %04x %02x\n",
+ le16_to_cpu(hdr.seq), len, le16_to_cpu(hdr.type),
+ hdr.flags);
+
+ /* read cmd buffer from descriptor */
src = wmi_buffer(wil, d_tail.addr) +
sizeof(struct wil6210_mbox_hdr);
evt = kmalloc(ALIGN(offsetof(struct pending_wmi_event,
@@ -702,9 +714,6 @@ void wmi_recv_cmd(struct wil6210_priv *wil)
iowrite32(0, wil->csr + HOSTADDR(r->tail) +
offsetof(struct wil6210_mbox_ring_desc, sync));
/* indicate */
- wil_dbg_wmi(wil, "Mbox evt %04x %04x %04x %02x\n",
- le16_to_cpu(hdr.seq), len, le16_to_cpu(hdr.type),
- hdr.flags);
if ((hdr.type == WIL_MBOX_HDR_TYPE_WMI) &&
(len >= sizeof(struct wil6210_mbox_hdr_wmi))) {
struct wil6210_mbox_hdr_wmi *wmi = &evt->event.wmi;
@@ -734,6 +743,8 @@ void wmi_recv_cmd(struct wil6210_priv *wil)
wil_dbg_wmi(wil, "queue_work -> %d\n", q);
}
}
+ if (n > 1)
+ wil_dbg_wmi(wil, "%s -> %d events processed\n", __func__, n);
}
int wmi_call(struct wil6210_priv *wil, u16 cmdid, void *buf, u16 len,
@@ -802,6 +813,7 @@ int wmi_pcp_start(struct wil6210_priv *wil, int bi, u8 wmi_nettype, u8 chan)
.network_type = wmi_nettype,
.disable_sec_offload = 1,
.channel = chan - 1,
+ .pcp_max_assoc_sta = WIL6210_MAX_CID,
};
struct {
struct wil6210_mbox_hdr_wmi wmi;
diff --git a/drivers/net/wireless/ath/wil6210/wmi.h b/drivers/net/wireless/ath/wil6210/wmi.h
index 50b8528394f4..17334c852866 100644
--- a/drivers/net/wireless/ath/wil6210/wmi.h
+++ b/drivers/net/wireless/ath/wil6210/wmi.h
@@ -28,7 +28,7 @@
#define __WILOCITY_WMI_H__
/* General */
-
+#define WILOCITY_MAX_ASSOC_STA (8)
#define WMI_MAC_LEN (6)
#define WMI_PROX_RANGE_NUM (3)
@@ -219,15 +219,6 @@ struct wmi_disconnect_sta_cmd {
__le16 disconnect_reason;
} __packed;
-/*
- * WMI_RECONNECT_CMDID
- */
-struct wmi_reconnect_cmd {
- u8 channel; /* hint */
- u8 reserved;
- u8 bssid[WMI_MAC_LEN]; /* mandatory if set */
-} __packed;
-
/*
* WMI_SET_PMK_CMDID
@@ -296,11 +287,13 @@ enum wmi_scan_type {
WMI_LONG_SCAN = 0,
WMI_SHORT_SCAN = 1,
WMI_PBC_SCAN = 2,
+ WMI_ACTIVE_SCAN = 3,
+ WMI_DIRECT_SCAN = 4,
};
struct wmi_start_scan_cmd {
- u8 reserved[8];
-
+ u8 direct_scan_mac_addr[6];
+ u8 reserved[2];
__le32 home_dwell_time; /* Max duration in the home channel(ms) */
__le32 force_scan_interval; /* Time interval between scans (ms)*/
u8 scan_type; /* wmi_scan_type */
@@ -332,6 +325,7 @@ struct wmi_probed_ssid_cmd {
u8 ssid[WMI_MAX_SSID_LEN];
} __packed;
+
/*
* WMI_SET_APPIE_CMDID
* Add Application specified IE to a management frame
@@ -427,7 +421,7 @@ struct wmi_bcon_ctrl_cmd {
__le16 frag_num;
__le64 ss_mask;
u8 network_type;
- u8 reserved;
+ u8 pcp_max_assoc_sta;
u8 disable_sec_offload;
u8 disable_sec;
} __packed;
@@ -450,7 +444,7 @@ enum wmi_port_role {
struct wmi_port_allocate_cmd {
u8 mac[WMI_MAC_LEN];
u8 port_role;
- u8 midid;
+ u8 mid;
} __packed;
/*
@@ -467,6 +461,7 @@ struct wmi_delete_port_cmd {
enum wmi_discovery_mode {
WMI_DISCOVERY_MODE_NON_OFFLOAD = 0,
WMI_DISCOVERY_MODE_OFFLOAD = 1,
+ WMI_DISCOVERY_MODE_PEER2PEER = 2,
};
struct wmi_p2p_cfg_cmd {
@@ -493,7 +488,8 @@ struct wmi_power_mgmt_cfg_cmd {
*/
struct wmi_pcp_start_cmd {
__le16 bcon_interval;
- u8 reserved0[10];
+ u8 pcp_max_assoc_sta;
+ u8 reserved0[9];
u8 network_type;
u8 channel;
u8 disable_sec_offload;
@@ -857,6 +853,7 @@ enum wmi_event_id {
WMI_RF_MGMT_STATUS_EVENTID = 0x1853,
WMI_BF_SM_MGMT_DONE_EVENTID = 0x1838,
WMI_RX_MGMT_PACKET_EVENTID = 0x1840,
+ WMI_TX_MGMT_PACKET_EVENTID = 0x1841,
/* Performance monitoring events */
WMI_DATA_PORT_OPEN_EVENTID = 0x1860,
@@ -1040,16 +1037,23 @@ enum wmi_disconnect_reason {
struct wmi_disconnect_event {
__le16 protocol_reason_status; /* reason code, see 802.11 spec. */
u8 bssid[WMI_MAC_LEN]; /* set if known */
- u8 disconnect_reason; /* see wmi_disconnect_reason_e */
- u8 assoc_resp_len;
- u8 assoc_info[0];
+ u8 disconnect_reason; /* see wmi_disconnect_reason */
+ u8 assoc_resp_len; /* not in use */
+ u8 assoc_info[0]; /* not in use */
} __packed;
/*
* WMI_SCAN_COMPLETE_EVENTID
*/
+enum scan_status {
+ WMI_SCAN_SUCCESS = 0,
+ WMI_SCAN_FAILED = 1,
+ WMI_SCAN_ABORTED = 2,
+ WMI_SCAN_REJECTED = 3,
+};
+
struct wmi_scan_complete_event {
- __le32 status;
+ __le32 status; /* scan_status */
} __packed;
/*
@@ -1256,6 +1260,14 @@ struct wmi_rx_mgmt_info {
u8 channel; /* From Radio MNGR */
} __packed;
+
+/*
+ * WMI_TX_MGMT_PACKET_EVENTID
+ */
+struct wmi_tx_mgmt_packet_event {
+ u8 payload[0];
+} __packed;
+
struct wmi_rx_mgmt_packet_event {
struct wmi_rx_mgmt_info info;
u8 payload[0];
diff --git a/drivers/net/wireless/b43/Kconfig b/drivers/net/wireless/b43/Kconfig
index 088d544ec63f..e3f67b8d3f80 100644
--- a/drivers/net/wireless/b43/Kconfig
+++ b/drivers/net/wireless/b43/Kconfig
@@ -1,7 +1,8 @@
config B43
tristate "Broadcom 43xx wireless support (mac80211 stack)"
- depends on SSB_POSSIBLE && MAC80211 && HAS_DMA
- select SSB
+ depends on (BCMA_POSSIBLE || SSB_POSSIBLE) && MAC80211 && HAS_DMA
+ select BCMA if B43_BCMA
+ select SSB if B43_SSB
select FW_LOADER
---help---
b43 is a driver for the Broadcom 43xx series wireless devices.
@@ -27,14 +28,33 @@ config B43
If unsure, say M.
config B43_BCMA
- bool "Support for BCMA bus"
- depends on B43 && (BCMA = y || BCMA = B43)
- default y
+ bool
config B43_SSB
bool
- depends on B43 && (SSB = y || SSB = B43)
- default y
+
+choice
+ prompt "Supported bus types"
+ depends on B43
+ default B43_BCMA_AND_SSB
+
+config B43_BUSES_BCMA_AND_SSB
+ bool "BCMA and SSB"
+ depends on BCMA_POSSIBLE && SSB_POSSIBLE
+ select B43_BCMA
+ select B43_SSB
+
+config B43_BUSES_BCMA
+ bool "BCMA only"
+ depends on BCMA_POSSIBLE
+ select B43_BCMA
+
+config B43_BUSES_SSB
+ bool "SSB only"
+ depends on SSB_POSSIBLE
+ select B43_SSB
+
+endchoice
# Auto-select SSB PCI-HOST support, if possible
config B43_PCI_AUTOSELECT
@@ -53,7 +73,7 @@ config B43_PCICORE_AUTOSELECT
config B43_PCMCIA
bool "Broadcom 43xx PCMCIA device support"
- depends on B43 && SSB_PCMCIAHOST_POSSIBLE
+ depends on B43 && B43_SSB && SSB_PCMCIAHOST_POSSIBLE
select SSB_PCMCIAHOST
---help---
Broadcom 43xx PCMCIA device support.
@@ -73,7 +93,7 @@ config B43_PCMCIA
config B43_SDIO
bool "Broadcom 43xx SDIO device support"
- depends on B43 && SSB_SDIOHOST_POSSIBLE
+ depends on B43 && B43_SSB && SSB_SDIOHOST_POSSIBLE
select SSB_SDIOHOST
---help---
Broadcom 43xx device support for Soft-MAC SDIO devices.
@@ -98,7 +118,7 @@ config B43_BCMA_PIO
config B43_PIO
bool
- depends on B43
+ depends on B43 && B43_SSB
select SSB_BLOCKIO
default y
@@ -116,7 +136,7 @@ config B43_PHY_N
config B43_PHY_LP
bool "Support for low-power (LP-PHY) devices"
- depends on B43
+ depends on B43 && B43_SSB
default y
---help---
Support for the LP-PHY.
diff --git a/drivers/net/wireless/b43/b43.h b/drivers/net/wireless/b43/b43.h
index 54376fddfaf9..4113b6934764 100644
--- a/drivers/net/wireless/b43/b43.h
+++ b/drivers/net/wireless/b43/b43.h
@@ -915,10 +915,6 @@ struct b43_wl {
char rng_name[30 + 1];
#endif /* CONFIG_B43_HWRNG */
- /* List of all wireless devices on this chip */
- struct list_head devlist;
- u8 nr_devs;
-
bool radiotap_enabled;
bool radio_enabled;
diff --git a/drivers/net/wireless/b43/bus.h b/drivers/net/wireless/b43/bus.h
index 184c95659279..f3205c6988bc 100644
--- a/drivers/net/wireless/b43/bus.h
+++ b/drivers/net/wireless/b43/bus.h
@@ -5,7 +5,9 @@ enum b43_bus_type {
#ifdef CONFIG_B43_BCMA
B43_BUS_BCMA,
#endif
+#ifdef CONFIG_B43_SSB
B43_BUS_SSB,
+#endif
};
struct b43_bus_dev {
@@ -52,13 +54,21 @@ struct b43_bus_dev {
static inline bool b43_bus_host_is_pcmcia(struct b43_bus_dev *dev)
{
+#ifdef CONFIG_B43_SSB
return (dev->bus_type == B43_BUS_SSB &&
dev->sdev->bus->bustype == SSB_BUSTYPE_PCMCIA);
+#else
+ return false;
+#endif
}
static inline bool b43_bus_host_is_sdio(struct b43_bus_dev *dev)
{
+#ifdef CONFIG_B43_SSB
return (dev->bus_type == B43_BUS_SSB &&
dev->sdev->bus->bustype == SSB_BUSTYPE_SDIO);
+#else
+ return false;
+#endif
}
struct b43_bus_dev *b43_bus_dev_bcma_init(struct bcma_device *core);
diff --git a/drivers/net/wireless/b43/main.c b/drivers/net/wireless/b43/main.c
index 69fc3d65531a..32538ac5f7e4 100644
--- a/drivers/net/wireless/b43/main.c
+++ b/drivers/net/wireless/b43/main.c
@@ -182,7 +182,7 @@ static struct ieee80211_rate __b43_ratetable[] = {
#define b43_g_ratetable (__b43_ratetable + 0)
#define b43_g_ratetable_size 12
-#define CHAN4G(_channel, _freq, _flags) { \
+#define CHAN2G(_channel, _freq, _flags) { \
.band = IEEE80211_BAND_2GHZ, \
.center_freq = (_freq), \
.hw_value = (_channel), \
@@ -191,23 +191,31 @@ static struct ieee80211_rate __b43_ratetable[] = {
.max_power = 30, \
}
static struct ieee80211_channel b43_2ghz_chantable[] = {
- CHAN4G(1, 2412, 0),
- CHAN4G(2, 2417, 0),
- CHAN4G(3, 2422, 0),
- CHAN4G(4, 2427, 0),
- CHAN4G(5, 2432, 0),
- CHAN4G(6, 2437, 0),
- CHAN4G(7, 2442, 0),
- CHAN4G(8, 2447, 0),
- CHAN4G(9, 2452, 0),
- CHAN4G(10, 2457, 0),
- CHAN4G(11, 2462, 0),
- CHAN4G(12, 2467, 0),
- CHAN4G(13, 2472, 0),
- CHAN4G(14, 2484, 0),
+ CHAN2G(1, 2412, 0),
+ CHAN2G(2, 2417, 0),
+ CHAN2G(3, 2422, 0),
+ CHAN2G(4, 2427, 0),
+ CHAN2G(5, 2432, 0),
+ CHAN2G(6, 2437, 0),
+ CHAN2G(7, 2442, 0),
+ CHAN2G(8, 2447, 0),
+ CHAN2G(9, 2452, 0),
+ CHAN2G(10, 2457, 0),
+ CHAN2G(11, 2462, 0),
+ CHAN2G(12, 2467, 0),
+ CHAN2G(13, 2472, 0),
+ CHAN2G(14, 2484, 0),
};
-#undef CHAN4G
+#undef CHAN2G
+#define CHAN4G(_channel, _flags) { \
+ .band = IEEE80211_BAND_5GHZ, \
+ .center_freq = 4000 + (5 * (_channel)), \
+ .hw_value = (_channel), \
+ .flags = (_flags), \
+ .max_antenna_gain = 0, \
+ .max_power = 30, \
+}
#define CHAN5G(_channel, _flags) { \
.band = IEEE80211_BAND_5GHZ, \
.center_freq = 5000 + (5 * (_channel)), \
@@ -217,6 +225,18 @@ static struct ieee80211_channel b43_2ghz_chantable[] = {
.max_power = 30, \
}
static struct ieee80211_channel b43_5ghz_nphy_chantable[] = {
+ CHAN4G(184, 0), CHAN4G(186, 0),
+ CHAN4G(188, 0), CHAN4G(190, 0),
+ CHAN4G(192, 0), CHAN4G(194, 0),
+ CHAN4G(196, 0), CHAN4G(198, 0),
+ CHAN4G(200, 0), CHAN4G(202, 0),
+ CHAN4G(204, 0), CHAN4G(206, 0),
+ CHAN4G(208, 0), CHAN4G(210, 0),
+ CHAN4G(212, 0), CHAN4G(214, 0),
+ CHAN4G(216, 0), CHAN4G(218, 0),
+ CHAN4G(220, 0), CHAN4G(222, 0),
+ CHAN4G(224, 0), CHAN4G(226, 0),
+ CHAN4G(228, 0),
CHAN5G(32, 0), CHAN5G(34, 0),
CHAN5G(36, 0), CHAN5G(38, 0),
CHAN5G(40, 0), CHAN5G(42, 0),
@@ -260,18 +280,7 @@ static struct ieee80211_channel b43_5ghz_nphy_chantable[] = {
CHAN5G(170, 0), CHAN5G(172, 0),
CHAN5G(174, 0), CHAN5G(176, 0),
CHAN5G(178, 0), CHAN5G(180, 0),
- CHAN5G(182, 0), CHAN5G(184, 0),
- CHAN5G(186, 0), CHAN5G(188, 0),
- CHAN5G(190, 0), CHAN5G(192, 0),
- CHAN5G(194, 0), CHAN5G(196, 0),
- CHAN5G(198, 0), CHAN5G(200, 0),
- CHAN5G(202, 0), CHAN5G(204, 0),
- CHAN5G(206, 0), CHAN5G(208, 0),
- CHAN5G(210, 0), CHAN5G(212, 0),
- CHAN5G(214, 0), CHAN5G(216, 0),
- CHAN5G(218, 0), CHAN5G(220, 0),
- CHAN5G(222, 0), CHAN5G(224, 0),
- CHAN5G(226, 0), CHAN5G(228, 0),
+ CHAN5G(182, 0),
};
static struct ieee80211_channel b43_5ghz_aphy_chantable[] = {
@@ -295,6 +304,7 @@ static struct ieee80211_channel b43_5ghz_aphy_chantable[] = {
CHAN5G(208, 0), CHAN5G(212, 0),
CHAN5G(216, 0),
};
+#undef CHAN4G
#undef CHAN5G
static struct ieee80211_supported_band b43_band_5GHz_nphy = {
@@ -1175,18 +1185,7 @@ static void b43_bcma_phy_reset(struct b43_wldev *dev)
bcma_awrite32(dev->dev->bdev, BCMA_IOCTL, flags);
udelay(2);
- /* Take PHY out of reset */
- flags = bcma_aread32(dev->dev->bdev, BCMA_IOCTL);
- flags &= ~B43_BCMA_IOCTL_PHY_RESET;
- flags |= BCMA_IOCTL_FGC;
- bcma_awrite32(dev->dev->bdev, BCMA_IOCTL, flags);
- udelay(1);
-
- /* Do not force clock anymore */
- flags = bcma_aread32(dev->dev->bdev, BCMA_IOCTL);
- flags &= ~BCMA_IOCTL_FGC;
- bcma_awrite32(dev->dev->bdev, BCMA_IOCTL, flags);
- udelay(1);
+ b43_phy_take_out_of_reset(dev);
}
static void b43_bcma_wireless_core_reset(struct b43_wldev *dev, bool gmode)
@@ -1195,18 +1194,22 @@ static void b43_bcma_wireless_core_reset(struct b43_wldev *dev, bool gmode)
B43_BCMA_CLKCTLST_PHY_PLL_REQ;
u32 status = B43_BCMA_CLKCTLST_80211_PLL_ST |
B43_BCMA_CLKCTLST_PHY_PLL_ST;
+ u32 flags;
+
+ flags = B43_BCMA_IOCTL_PHY_CLKEN;
+ if (gmode)
+ flags |= B43_BCMA_IOCTL_GMODE;
+ b43_device_enable(dev, flags);
- b43_device_enable(dev, B43_BCMA_IOCTL_PHY_CLKEN);
bcma_core_set_clockmode(dev->dev->bdev, BCMA_CLKMODE_FAST);
b43_bcma_phy_reset(dev);
bcma_core_pll_ctl(dev->dev->bdev, req, status, true);
}
#endif
+#ifdef CONFIG_B43_SSB
static void b43_ssb_wireless_core_reset(struct b43_wldev *dev, bool gmode)
{
- struct ssb_device *sdev = dev->dev->sdev;
- u32 tmslow;
u32 flags = 0;
if (gmode)
@@ -1218,18 +1221,9 @@ static void b43_ssb_wireless_core_reset(struct b43_wldev *dev, bool gmode)
b43_device_enable(dev, flags);
msleep(2); /* Wait for the PLL to turn on. */
- /* Now take the PHY out of Reset again */
- tmslow = ssb_read32(sdev, SSB_TMSLOW);
- tmslow |= SSB_TMSLOW_FGC;
- tmslow &= ~B43_TMSLOW_PHYRESET;
- ssb_write32(sdev, SSB_TMSLOW, tmslow);
- ssb_read32(sdev, SSB_TMSLOW); /* flush */
- msleep(1);
- tmslow &= ~SSB_TMSLOW_FGC;
- ssb_write32(sdev, SSB_TMSLOW, tmslow);
- ssb_read32(sdev, SSB_TMSLOW); /* flush */
- msleep(1);
+ b43_phy_take_out_of_reset(dev);
}
+#endif
void b43_wireless_core_reset(struct b43_wldev *dev, bool gmode)
{
@@ -2704,32 +2698,37 @@ static int b43_upload_initvals(struct b43_wldev *dev)
struct b43_firmware *fw = &dev->fw;
const struct b43_iv *ivals;
size_t count;
- int err;
hdr = (const struct b43_fw_header *)(fw->initvals.data->data);
ivals = (const struct b43_iv *)(fw->initvals.data->data + hdr_len);
count = be32_to_cpu(hdr->size);
- err = b43_write_initvals(dev, ivals, count,
+ return b43_write_initvals(dev, ivals, count,
fw->initvals.data->size - hdr_len);
- if (err)
- goto out;
- if (fw->initvals_band.data) {
- hdr = (const struct b43_fw_header *)(fw->initvals_band.data->data);
- ivals = (const struct b43_iv *)(fw->initvals_band.data->data + hdr_len);
- count = be32_to_cpu(hdr->size);
- err = b43_write_initvals(dev, ivals, count,
- fw->initvals_band.data->size - hdr_len);
- if (err)
- goto out;
- }
-out:
+}
- return err;
+static int b43_upload_initvals_band(struct b43_wldev *dev)
+{
+ const size_t hdr_len = sizeof(struct b43_fw_header);
+ const struct b43_fw_header *hdr;
+ struct b43_firmware *fw = &dev->fw;
+ const struct b43_iv *ivals;
+ size_t count;
+
+ if (!fw->initvals_band.data)
+ return 0;
+
+ hdr = (const struct b43_fw_header *)(fw->initvals_band.data->data);
+ ivals = (const struct b43_iv *)(fw->initvals_band.data->data + hdr_len);
+ count = be32_to_cpu(hdr->size);
+ return b43_write_initvals(dev, ivals, count,
+ fw->initvals_band.data->size - hdr_len);
}
/* Initialize the GPIOs
* http://bcm-specs.sipsolutions.net/GPIO
*/
+
+#ifdef CONFIG_B43_SSB
static struct ssb_device *b43_ssb_gpio_dev(struct b43_wldev *dev)
{
struct ssb_bus *bus = dev->dev->sdev->bus;
@@ -2740,10 +2739,13 @@ static struct ssb_device *b43_ssb_gpio_dev(struct b43_wldev *dev)
return bus->chipco.dev;
#endif
}
+#endif
static int b43_gpio_init(struct b43_wldev *dev)
{
+#ifdef CONFIG_B43_SSB
struct ssb_device *gpiodev;
+#endif
u32 mask, set;
b43_maskset32(dev, B43_MMIO_MACCTL, ~B43_MACCTL_GPOUTSMSK, 0);
@@ -2802,7 +2804,9 @@ static int b43_gpio_init(struct b43_wldev *dev)
/* Turn off all GPIO stuff. Call this on module unload, for example. */
static void b43_gpio_cleanup(struct b43_wldev *dev)
{
+#ifdef CONFIG_B43_SSB
struct ssb_device *gpiodev;
+#endif
switch (dev->dev->bus_type) {
#ifdef CONFIG_B43_BCMA
@@ -3086,6 +3090,10 @@ static int b43_chip_init(struct b43_wldev *dev)
if (err)
goto err_gpio_clean;
+ err = b43_upload_initvals_band(dev);
+ if (err)
+ goto err_gpio_clean;
+
/* Turn the Analog on and initialize the PHY. */
phy->ops->switch_analog(dev, 1);
err = b43_phy_init(dev);
@@ -3685,37 +3693,6 @@ static void b43_op_set_tsf(struct ieee80211_hw *hw,
mutex_unlock(&wl->mutex);
}
-static void b43_put_phy_into_reset(struct b43_wldev *dev)
-{
- u32 tmp;
-
- switch (dev->dev->bus_type) {
-#ifdef CONFIG_B43_BCMA
- case B43_BUS_BCMA:
- b43err(dev->wl,
- "Putting PHY into reset not supported on BCMA\n");
- break;
-#endif
-#ifdef CONFIG_B43_SSB
- case B43_BUS_SSB:
- tmp = ssb_read32(dev->dev->sdev, SSB_TMSLOW);
- tmp &= ~B43_TMSLOW_GMODE;
- tmp |= B43_TMSLOW_PHYRESET;
- tmp |= SSB_TMSLOW_FGC;
- ssb_write32(dev->dev->sdev, SSB_TMSLOW, tmp);
- msleep(1);
-
- tmp = ssb_read32(dev->dev->sdev, SSB_TMSLOW);
- tmp &= ~SSB_TMSLOW_FGC;
- tmp |= B43_TMSLOW_PHYRESET;
- ssb_write32(dev->dev->sdev, SSB_TMSLOW, tmp);
- msleep(1);
-
- break;
-#endif
- }
-}
-
static const char *band_to_string(enum ieee80211_band band)
{
switch (band) {
@@ -3731,94 +3708,75 @@ static const char *band_to_string(enum ieee80211_band band)
}
/* Expects wl->mutex locked */
-static int b43_switch_band(struct b43_wl *wl, struct ieee80211_channel *chan)
+static int b43_switch_band(struct b43_wldev *dev,
+ struct ieee80211_channel *chan)
{
- struct b43_wldev *up_dev = NULL;
- struct b43_wldev *down_dev;
- struct b43_wldev *d;
- int err;
- bool uninitialized_var(gmode);
- int prev_status;
+ struct b43_phy *phy = &dev->phy;
+ bool gmode;
+ u32 tmp;
- /* Find a device and PHY which supports the band. */
- list_for_each_entry(d, &wl->devlist, list) {
- switch (chan->band) {
- case IEEE80211_BAND_5GHZ:
- if (d->phy.supports_5ghz) {
- up_dev = d;
- gmode = false;
- }
- break;
- case IEEE80211_BAND_2GHZ:
- if (d->phy.supports_2ghz) {
- up_dev = d;
- gmode = true;
- }
- break;
- default:
- B43_WARN_ON(1);
- return -EINVAL;
- }
- if (up_dev)
- break;
+ switch (chan->band) {
+ case IEEE80211_BAND_5GHZ:
+ gmode = false;
+ break;
+ case IEEE80211_BAND_2GHZ:
+ gmode = true;
+ break;
+ default:
+ B43_WARN_ON(1);
+ return -EINVAL;
}
- if (!up_dev) {
- b43err(wl, "Could not find a device for %s-GHz band operation\n",
+
+ if (!((gmode && phy->supports_2ghz) ||
+ (!gmode && phy->supports_5ghz))) {
+ b43err(dev->wl, "This device doesn't support %s-GHz band\n",
band_to_string(chan->band));
return -ENODEV;
}
- if ((up_dev == wl->current_dev) &&
- (!!wl->current_dev->phy.gmode == !!gmode)) {
+
+ if (!!phy->gmode == !!gmode) {
/* This device is already running. */
return 0;
}
- b43dbg(wl, "Switching to %s-GHz band\n",
+
+ b43dbg(dev->wl, "Switching to %s GHz band\n",
band_to_string(chan->band));
- down_dev = wl->current_dev;
- prev_status = b43_status(down_dev);
- /* Shutdown the currently running core. */
- if (prev_status >= B43_STAT_STARTED)
- down_dev = b43_wireless_core_stop(down_dev);
- if (prev_status >= B43_STAT_INITIALIZED)
- b43_wireless_core_exit(down_dev);
+ /* Some new devices don't need disabling radio for band switching */
+ if (!(phy->type == B43_PHYTYPE_N && phy->rev >= 3))
+ b43_software_rfkill(dev, true);
- if (down_dev != up_dev) {
- /* We switch to a different core, so we put PHY into
- * RESET on the old core. */
- b43_put_phy_into_reset(down_dev);
+ phy->gmode = gmode;
+ b43_phy_put_into_reset(dev);
+ switch (dev->dev->bus_type) {
+#ifdef CONFIG_B43_BCMA
+ case B43_BUS_BCMA:
+ tmp = bcma_aread32(dev->dev->bdev, BCMA_IOCTL);
+ if (gmode)
+ tmp |= B43_BCMA_IOCTL_GMODE;
+ else
+ tmp &= ~B43_BCMA_IOCTL_GMODE;
+ bcma_awrite32(dev->dev->bdev, BCMA_IOCTL, tmp);
+ break;
+#endif
+#ifdef CONFIG_B43_SSB
+ case B43_BUS_SSB:
+ tmp = ssb_read32(dev->dev->sdev, SSB_TMSLOW);
+ if (gmode)
+ tmp |= B43_TMSLOW_GMODE;
+ else
+ tmp &= ~B43_TMSLOW_GMODE;
+ ssb_write32(dev->dev->sdev, SSB_TMSLOW, tmp);
+ break;
+#endif
}
+ b43_phy_take_out_of_reset(dev);
- /* Now start the new core. */
- up_dev->phy.gmode = gmode;
- if (prev_status >= B43_STAT_INITIALIZED) {
- err = b43_wireless_core_init(up_dev);
- if (err) {
- b43err(wl, "Fatal: Could not initialize device for "
- "selected %s-GHz band\n",
- band_to_string(chan->band));
- goto init_failure;
- }
- }
- if (prev_status >= B43_STAT_STARTED) {
- err = b43_wireless_core_start(up_dev);
- if (err) {
- b43err(wl, "Fatal: Could not start device for "
- "selected %s-GHz band\n",
- band_to_string(chan->band));
- b43_wireless_core_exit(up_dev);
- goto init_failure;
- }
- }
- B43_WARN_ON(b43_status(up_dev) != prev_status);
+ b43_upload_initvals_band(dev);
- wl->current_dev = up_dev;
+ b43_phy_init(dev);
return 0;
-init_failure:
- /* Whoops, failed to init the new core. No core is operating now. */
- wl->current_dev = NULL;
- return err;
}
/* Write the short and long frame retry limit values. */
@@ -3851,8 +3809,10 @@ static int b43_op_config(struct ieee80211_hw *hw, u32 changed)
dev = wl->current_dev;
+ b43_mac_suspend(dev);
+
/* Switch the band (if necessary). This might change the active core. */
- err = b43_switch_band(wl, conf->chandef.chan);
+ err = b43_switch_band(dev, conf->chandef.chan);
if (err)
goto out_unlock_mutex;
@@ -3871,8 +3831,6 @@ static int b43_op_config(struct ieee80211_hw *hw, u32 changed)
else
phy->is_40mhz = false;
- b43_mac_suspend(dev);
-
if (changed & IEEE80211_CONF_CHANGE_RETRY_LIMITS)
b43_set_retry_limits(dev, conf->short_frame_max_tx_count,
conf->long_frame_max_tx_count);
@@ -4582,8 +4540,12 @@ static void b43_imcfglo_timeouts_workaround(struct b43_wldev *dev)
struct ssb_bus *bus;
u32 tmp;
+#ifdef CONFIG_B43_SSB
if (dev->dev->bus_type != B43_BUS_SSB)
return;
+#else
+ return;
+#endif
bus = dev->dev->sdev->bus;
@@ -4738,7 +4700,7 @@ static int b43_wireless_core_init(struct b43_wldev *dev)
}
if (sprom->boardflags_lo & B43_BFL_XTAL_NOSLOW)
hf |= B43_HF_DSCRQ; /* Disable slowclock requests from ucode. */
-#ifdef CONFIG_SSB_DRIVER_PCICORE
+#if defined(CONFIG_B43_SSB) && defined(CONFIG_SSB_DRIVER_PCICORE)
if (dev->dev->bus_type == B43_BUS_SSB &&
dev->dev->sdev->bus->bustype == SSB_BUSTYPE_PCI &&
dev->dev->sdev->bus->pcicore.dev->id.revision <= 10)
@@ -5129,10 +5091,82 @@ static void b43_wireless_core_detach(struct b43_wldev *dev)
b43_phy_free(dev);
}
+static void b43_supported_bands(struct b43_wldev *dev, bool *have_2ghz_phy,
+ bool *have_5ghz_phy)
+{
+ u16 dev_id = 0;
+
+#ifdef CONFIG_B43_BCMA
+ if (dev->dev->bus_type == B43_BUS_BCMA &&
+ dev->dev->bdev->bus->hosttype == BCMA_HOSTTYPE_PCI)
+ dev_id = dev->dev->bdev->bus->host_pci->device;
+#endif
+#ifdef CONFIG_B43_SSB
+ if (dev->dev->bus_type == B43_BUS_SSB &&
+ dev->dev->sdev->bus->bustype == SSB_BUSTYPE_PCI)
+ dev_id = dev->dev->sdev->bus->host_pci->device;
+#endif
+ /* Override with SPROM value if available */
+ if (dev->dev->bus_sprom->dev_id)
+ dev_id = dev->dev->bus_sprom->dev_id;
+
+ /* Note: below IDs can be "virtual" (not maching e.g. real PCI ID) */
+ switch (dev_id) {
+ case 0x4324: /* BCM4306 */
+ case 0x4312: /* BCM4311 */
+ case 0x4319: /* BCM4318 */
+ case 0x4328: /* BCM4321 */
+ case 0x432b: /* BCM4322 */
+ case 0x4350: /* BCM43222 */
+ case 0x4353: /* BCM43224 */
+ case 0x0576: /* BCM43224 */
+ case 0x435f: /* BCM6362 */
+ case 0x4331: /* BCM4331 */
+ case 0x4359: /* BCM43228 */
+ case 0x43a0: /* BCM4360 */
+ case 0x43b1: /* BCM4352 */
+ /* Dual band devices */
+ *have_2ghz_phy = true;
+ *have_5ghz_phy = true;
+ return;
+ case 0x4321: /* BCM4306 */
+ case 0x4313: /* BCM4311 */
+ case 0x431a: /* BCM4318 */
+ case 0x432a: /* BCM4321 */
+ case 0x432d: /* BCM4322 */
+ case 0x4352: /* BCM43222 */
+ case 0x4333: /* BCM4331 */
+ case 0x43a2: /* BCM4360 */
+ case 0x43b3: /* BCM4352 */
+ /* 5 GHz only devices */
+ *have_2ghz_phy = false;
+ *have_5ghz_phy = true;
+ return;
+ }
+
+ /* As a fallback, try to guess using PHY type */
+ switch (dev->phy.type) {
+ case B43_PHYTYPE_A:
+ *have_2ghz_phy = false;
+ *have_5ghz_phy = true;
+ return;
+ case B43_PHYTYPE_G:
+ case B43_PHYTYPE_N:
+ case B43_PHYTYPE_LP:
+ case B43_PHYTYPE_HT:
+ case B43_PHYTYPE_LCN:
+ *have_2ghz_phy = true;
+ *have_5ghz_phy = false;
+ return;
+ }
+
+ B43_WARN_ON(1);
+}
+
static int b43_wireless_core_attach(struct b43_wldev *dev)
{
struct b43_wl *wl = dev->wl;
- struct pci_dev *pdev = NULL;
+ struct b43_phy *phy = &dev->phy;
int err;
u32 tmp;
bool have_2ghz_phy = false, have_5ghz_phy = false;
@@ -5144,19 +5178,15 @@ static int b43_wireless_core_attach(struct b43_wldev *dev)
* that in core_init(), too.
*/
-#ifdef CONFIG_B43_SSB
- if (dev->dev->bus_type == B43_BUS_SSB &&
- dev->dev->sdev->bus->bustype == SSB_BUSTYPE_PCI)
- pdev = dev->dev->sdev->bus->host_pci;
-#endif
-
err = b43_bus_powerup(dev, 0);
if (err) {
b43err(wl, "Bus powerup failed\n");
goto out;
}
- /* Get the PHY type. */
+ phy->do_full_init = true;
+
+ /* Try to guess supported bands for the first init needs */
switch (dev->dev->bus_type) {
#ifdef CONFIG_B43_BCMA
case B43_BUS_BCMA:
@@ -5178,51 +5208,31 @@ static int b43_wireless_core_attach(struct b43_wldev *dev)
}
dev->phy.gmode = have_2ghz_phy;
- dev->phy.radio_on = true;
b43_wireless_core_reset(dev, dev->phy.gmode);
+ /* Get the PHY type. */
err = b43_phy_versioning(dev);
if (err)
goto err_powerdown;
- /* Check if this device supports multiband. */
- if (!pdev ||
- (pdev->device != 0x4312 &&
- pdev->device != 0x4319 && pdev->device != 0x4324)) {
- /* No multiband support. */
- have_2ghz_phy = false;
+
+ /* Get real info about supported bands */
+ b43_supported_bands(dev, &have_2ghz_phy, &have_5ghz_phy);
+
+ /* We don't support 5 GHz on some PHYs yet */
+ switch (dev->phy.type) {
+ case B43_PHYTYPE_A:
+ case B43_PHYTYPE_N:
+ case B43_PHYTYPE_LP:
+ case B43_PHYTYPE_HT:
+ b43warn(wl, "5 GHz band is unsupported on this PHY\n");
have_5ghz_phy = false;
- switch (dev->phy.type) {
- case B43_PHYTYPE_A:
- have_5ghz_phy = true;
- break;
- case B43_PHYTYPE_LP: //FIXME not always!
-#if 0 //FIXME enabling 5GHz causes a NULL pointer dereference
- have_5ghz_phy = 1;
-#endif
- case B43_PHYTYPE_G:
- case B43_PHYTYPE_N:
- case B43_PHYTYPE_HT:
- case B43_PHYTYPE_LCN:
- have_2ghz_phy = true;
- break;
- default:
- B43_WARN_ON(1);
- }
}
- if (dev->phy.type == B43_PHYTYPE_A) {
- /* FIXME */
- b43err(wl, "IEEE 802.11a devices are unsupported\n");
+
+ if (!have_2ghz_phy && !have_5ghz_phy) {
+ b43err(wl, "b43 can't support any band on this device\n");
err = -EOPNOTSUPP;
goto err_powerdown;
}
- if (1 /* disable A-PHY */) {
- /* FIXME: For now we disable the A-PHY on multi-PHY devices. */
- if (dev->phy.type != B43_PHYTYPE_N &&
- dev->phy.type != B43_PHYTYPE_LP) {
- have_2ghz_phy = true;
- have_5ghz_phy = false;
- }
- }
err = b43_phy_allocate(dev);
if (err)
@@ -5270,7 +5280,6 @@ static void b43_one_core_detach(struct b43_bus_dev *dev)
b43_debugfs_remove_device(wldev);
b43_wireless_core_detach(wldev);
list_del(&wldev->list);
- wl->nr_devs--;
b43_bus_set_wldev(dev, NULL);
kfree(wldev);
}
@@ -5295,8 +5304,6 @@ static int b43_one_core_attach(struct b43_bus_dev *dev, struct b43_wl *wl)
if (err)
goto err_kfree_wldev;
- list_add(&wldev->list, &wl->devlist);
- wl->nr_devs++;
b43_bus_set_wldev(dev, wldev);
b43_debugfs_add_device(wldev);
@@ -5314,6 +5321,7 @@ static int b43_one_core_attach(struct b43_bus_dev *dev, struct b43_wl *wl)
(pdev->subsystem_vendor == PCI_VENDOR_ID_##_subvendor) && \
(pdev->subsystem_device == _subdevice) )
+#ifdef CONFIG_B43_SSB
static void b43_sprom_fixup(struct ssb_bus *bus)
{
struct pci_dev *pdev;
@@ -5345,6 +5353,7 @@ static void b43_wireless_exit(struct b43_bus_dev *dev, struct b43_wl *wl)
ssb_set_devtypedata(dev->sdev, NULL);
ieee80211_free_hw(hw);
}
+#endif
static struct b43_wl *b43_wireless_init(struct b43_bus_dev *dev)
{
@@ -5386,7 +5395,6 @@ static struct b43_wl *b43_wireless_init(struct b43_bus_dev *dev)
wl->hw = hw;
mutex_init(&wl->mutex);
spin_lock_init(&wl->hardirq_lock);
- INIT_LIST_HEAD(&wl->devlist);
INIT_WORK(&wl->beacon_update_trigger, b43_beacon_update_trigger_work);
INIT_WORK(&wl->txpower_adjust_work, b43_phy_txpower_adjust_work);
INIT_WORK(&wl->tx_work, b43_tx_work);
@@ -5486,39 +5494,42 @@ int b43_ssb_probe(struct ssb_device *sdev, const struct ssb_device_id *id)
struct b43_bus_dev *dev;
struct b43_wl *wl;
int err;
- int first = 0;
dev = b43_bus_dev_ssb_init(sdev);
if (!dev)
return -ENOMEM;
wl = ssb_get_devtypedata(sdev);
- if (!wl) {
- /* Probing the first core. Must setup common struct b43_wl */
- first = 1;
- b43_sprom_fixup(sdev->bus);
- wl = b43_wireless_init(dev);
- if (IS_ERR(wl)) {
- err = PTR_ERR(wl);
- goto out;
- }
- ssb_set_devtypedata(sdev, wl);
- B43_WARN_ON(ssb_get_devtypedata(sdev) != wl);
+ if (wl) {
+ b43err(NULL, "Dual-core devices are not supported\n");
+ err = -ENOTSUPP;
+ goto err_ssb_kfree_dev;
+ }
+
+ b43_sprom_fixup(sdev->bus);
+
+ wl = b43_wireless_init(dev);
+ if (IS_ERR(wl)) {
+ err = PTR_ERR(wl);
+ goto err_ssb_kfree_dev;
}
+ ssb_set_devtypedata(sdev, wl);
+ B43_WARN_ON(ssb_get_devtypedata(sdev) != wl);
+
err = b43_one_core_attach(dev, wl);
if (err)
- goto err_wireless_exit;
+ goto err_ssb_wireless_exit;
/* setup and start work to load firmware */
INIT_WORK(&wl->firmware_load, b43_request_firmware);
schedule_work(&wl->firmware_load);
- out:
return err;
- err_wireless_exit:
- if (first)
- b43_wireless_exit(dev, wl);
+err_ssb_wireless_exit:
+ b43_wireless_exit(dev, wl);
+err_ssb_kfree_dev:
+ kfree(dev);
return err;
}
@@ -5546,13 +5557,8 @@ static void b43_ssb_remove(struct ssb_device *sdev)
/* Unregister HW RNG driver */
b43_rng_exit(wl);
- if (list_empty(&wl->devlist)) {
- b43_leds_unregister(wl);
- /* Last core on the chip unregistered.
- * We can destroy common struct b43_wl.
- */
- b43_wireless_exit(dev, wl);
- }
+ b43_leds_unregister(wl);
+ b43_wireless_exit(dev, wl);
}
static struct ssb_driver b43_ssb_driver = {
diff --git a/drivers/net/wireless/b43/phy_common.c b/drivers/net/wireless/b43/phy_common.c
index dbaa51890198..08244b3b327e 100644
--- a/drivers/net/wireless/b43/phy_common.c
+++ b/drivers/net/wireless/b43/phy_common.c
@@ -96,12 +96,16 @@ int b43_phy_init(struct b43_wldev *dev)
phy->channel = ops->get_default_chan(dev);
- ops->software_rfkill(dev, false);
+ phy->ops->switch_analog(dev, true);
+ b43_software_rfkill(dev, false);
+
err = ops->init(dev);
if (err) {
b43err(dev->wl, "PHY init failed\n");
goto err_block_rf;
}
+ phy->do_full_init = false;
+
/* Make sure to switch hardware and firmware (SHM) to
* the default channel. */
err = b43_switch_channel(dev, ops->get_default_chan(dev));
@@ -113,10 +117,11 @@ int b43_phy_init(struct b43_wldev *dev)
return 0;
err_phy_exit:
+ phy->do_full_init = true;
if (ops->exit)
ops->exit(dev);
err_block_rf:
- ops->software_rfkill(dev, true);
+ b43_software_rfkill(dev, true);
return err;
}
@@ -125,7 +130,8 @@ void b43_phy_exit(struct b43_wldev *dev)
{
const struct b43_phy_operations *ops = dev->phy.ops;
- ops->software_rfkill(dev, true);
+ b43_software_rfkill(dev, true);
+ dev->phy.do_full_init = true;
if (ops->exit)
ops->exit(dev);
}
@@ -312,6 +318,90 @@ void b43_phy_maskset(struct b43_wldev *dev, u16 offset, u16 mask, u16 set)
}
}
+void b43_phy_put_into_reset(struct b43_wldev *dev)
+{
+ u32 tmp;
+
+ switch (dev->dev->bus_type) {
+#ifdef CONFIG_B43_BCMA
+ case B43_BUS_BCMA:
+ tmp = bcma_aread32(dev->dev->bdev, BCMA_IOCTL);
+ tmp &= ~B43_BCMA_IOCTL_GMODE;
+ tmp |= B43_BCMA_IOCTL_PHY_RESET;
+ tmp |= BCMA_IOCTL_FGC;
+ bcma_awrite32(dev->dev->bdev, BCMA_IOCTL, tmp);
+ udelay(1);
+
+ tmp = bcma_aread32(dev->dev->bdev, BCMA_IOCTL);
+ tmp &= ~BCMA_IOCTL_FGC;
+ bcma_awrite32(dev->dev->bdev, BCMA_IOCTL, tmp);
+ udelay(1);
+ break;
+#endif
+#ifdef CONFIG_B43_SSB
+ case B43_BUS_SSB:
+ tmp = ssb_read32(dev->dev->sdev, SSB_TMSLOW);
+ tmp &= ~B43_TMSLOW_GMODE;
+ tmp |= B43_TMSLOW_PHYRESET;
+ tmp |= SSB_TMSLOW_FGC;
+ ssb_write32(dev->dev->sdev, SSB_TMSLOW, tmp);
+ usleep_range(1000, 2000);
+
+ tmp = ssb_read32(dev->dev->sdev, SSB_TMSLOW);
+ tmp &= ~SSB_TMSLOW_FGC;
+ ssb_write32(dev->dev->sdev, SSB_TMSLOW, tmp);
+ usleep_range(1000, 2000);
+
+ break;
+#endif
+ }
+}
+
+void b43_phy_take_out_of_reset(struct b43_wldev *dev)
+{
+ u32 tmp;
+
+ switch (dev->dev->bus_type) {
+#ifdef CONFIG_B43_BCMA
+ case B43_BUS_BCMA:
+ /* Unset reset bit (with forcing clock) */
+ tmp = bcma_aread32(dev->dev->bdev, BCMA_IOCTL);
+ tmp &= ~B43_BCMA_IOCTL_PHY_RESET;
+ tmp &= ~B43_BCMA_IOCTL_PHY_CLKEN;
+ tmp |= BCMA_IOCTL_FGC;
+ bcma_awrite32(dev->dev->bdev, BCMA_IOCTL, tmp);
+ udelay(1);
+
+ /* Do not force clock anymore */
+ tmp = bcma_aread32(dev->dev->bdev, BCMA_IOCTL);
+ tmp &= ~BCMA_IOCTL_FGC;
+ tmp |= B43_BCMA_IOCTL_PHY_CLKEN;
+ bcma_awrite32(dev->dev->bdev, BCMA_IOCTL, tmp);
+ udelay(1);
+ break;
+#endif
+#ifdef CONFIG_B43_SSB
+ case B43_BUS_SSB:
+ /* Unset reset bit (with forcing clock) */
+ tmp = ssb_read32(dev->dev->sdev, SSB_TMSLOW);
+ tmp &= ~B43_TMSLOW_PHYRESET;
+ tmp &= ~B43_TMSLOW_PHYCLKEN;
+ tmp |= SSB_TMSLOW_FGC;
+ ssb_write32(dev->dev->sdev, SSB_TMSLOW, tmp);
+ ssb_read32(dev->dev->sdev, SSB_TMSLOW); /* flush */
+ usleep_range(1000, 2000);
+
+ tmp = ssb_read32(dev->dev->sdev, SSB_TMSLOW);
+ tmp &= ~SSB_TMSLOW_FGC;
+ tmp |= B43_TMSLOW_PHYCLKEN;
+ ssb_write32(dev->dev->sdev, SSB_TMSLOW, tmp);
+ ssb_read32(dev->dev->sdev, SSB_TMSLOW); /* flush */
+ usleep_range(1000, 2000);
+ break;
+#endif
+ }
+}
+
int b43_switch_channel(struct b43_wldev *dev, unsigned int new_channel)
{
struct b43_phy *phy = &(dev->phy);
diff --git a/drivers/net/wireless/b43/phy_common.h b/drivers/net/wireless/b43/phy_common.h
index f1b999349876..4ad6240d9ff4 100644
--- a/drivers/net/wireless/b43/phy_common.h
+++ b/drivers/net/wireless/b43/phy_common.h
@@ -231,9 +231,12 @@ struct b43_phy {
/* HT info */
bool is_40mhz;
- /* GMODE bit enabled? */
+ /* Is GMODE (2 GHz mode) bit enabled? */
bool gmode;
+ /* After power reset full init has to be performed */
+ bool do_full_init;
+
/* Analog Type */
u8 analog;
/* B43_PHYTYPE_ */
@@ -390,6 +393,9 @@ void b43_phy_lock(struct b43_wldev *dev);
*/
void b43_phy_unlock(struct b43_wldev *dev);
+void b43_phy_put_into_reset(struct b43_wldev *dev);
+void b43_phy_take_out_of_reset(struct b43_wldev *dev);
+
/**
* b43_switch_channel - Switch to another channel
*/
diff --git a/drivers/net/wireless/b43/phy_g.c b/drivers/net/wireless/b43/phy_g.c
index 12f467b8d564..8f5c14bc10e6 100644
--- a/drivers/net/wireless/b43/phy_g.c
+++ b/drivers/net/wireless/b43/phy_g.c
@@ -1587,6 +1587,7 @@ static void b43_phy_initb5(struct b43_wldev *dev)
b43_write16(dev, 0x03E4, (b43_read16(dev, 0x03E4) & 0xFFC0) | 0x0004);
}
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/Init/B6 */
static void b43_phy_initb6(struct b43_wldev *dev)
{
struct b43_phy *phy = &dev->phy;
@@ -1670,7 +1671,7 @@ static void b43_phy_initb6(struct b43_wldev *dev)
b43_radio_write16(dev, 0x50, 0x20);
}
if (phy->radio_rev <= 2) {
- b43_radio_write16(dev, 0x7C, 0x20);
+ b43_radio_write16(dev, 0x50, 0x20);
b43_radio_write16(dev, 0x5A, 0x70);
b43_radio_write16(dev, 0x5B, 0x7B);
b43_radio_write16(dev, 0x5C, 0xB0);
@@ -1686,9 +1687,8 @@ static void b43_phy_initb6(struct b43_wldev *dev)
b43_phy_write(dev, 0x2A, 0x8AC0);
b43_phy_write(dev, 0x0038, 0x0668);
b43_set_txpower_g(dev, &gphy->bbatt, &gphy->rfatt, gphy->tx_control);
- if (phy->radio_rev <= 5) {
+ if (phy->radio_rev == 4 || phy->radio_rev == 5)
b43_phy_maskset(dev, 0x5D, 0xFF80, 0x0003);
- }
if (phy->radio_rev <= 2)
b43_radio_write16(dev, 0x005D, 0x000D);
diff --git a/drivers/net/wireless/b43/phy_n.c b/drivers/net/wireless/b43/phy_n.c
index 24ccbe96e0c8..86569f6a8705 100644
--- a/drivers/net/wireless/b43/phy_n.c
+++ b/drivers/net/wireless/b43/phy_n.c
@@ -257,6 +257,72 @@ static void b43_nphy_rf_ctl_override(struct b43_wldev *dev, u16 field,
}
}
+static void b43_nphy_rf_ctl_intc_override_rev7(struct b43_wldev *dev,
+ enum n_intc_override intc_override,
+ u16 value, u8 core_sel)
+{
+ u16 reg, tmp, tmp2, val;
+ int core;
+
+ for (core = 0; core < 2; core++) {
+ if ((core_sel == 1 && core != 0) ||
+ (core_sel == 2 && core != 1))
+ continue;
+
+ reg = (core == 0) ? B43_NPHY_RFCTL_INTC1 : B43_NPHY_RFCTL_INTC2;
+
+ switch (intc_override) {
+ case N_INTC_OVERRIDE_OFF:
+ b43_phy_write(dev, reg, 0);
+ b43_nphy_force_rf_sequence(dev, B43_RFSEQ_RESET2RX);
+ break;
+ case N_INTC_OVERRIDE_TRSW:
+ b43_phy_maskset(dev, reg, ~0xC0, value << 6);
+ b43_phy_set(dev, reg, 0x400);
+
+ b43_phy_mask(dev, 0x2ff, ~0xC000 & 0xFFFF);
+ b43_phy_set(dev, 0x2ff, 0x2000);
+ b43_phy_set(dev, 0x2ff, 0x0001);
+ break;
+ case N_INTC_OVERRIDE_PA:
+ tmp = 0x0030;
+ if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ)
+ val = value << 5;
+ else
+ val = value << 4;
+ b43_phy_maskset(dev, reg, ~tmp, val);
+ b43_phy_set(dev, reg, 0x1000);
+ break;
+ case N_INTC_OVERRIDE_EXT_LNA_PU:
+ if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) {
+ tmp = 0x0001;
+ tmp2 = 0x0004;
+ val = value;
+ } else {
+ tmp = 0x0004;
+ tmp2 = 0x0001;
+ val = value << 2;
+ }
+ b43_phy_maskset(dev, reg, ~tmp, val);
+ b43_phy_mask(dev, reg, ~tmp2);
+ break;
+ case N_INTC_OVERRIDE_EXT_LNA_GAIN:
+ if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) {
+ tmp = 0x0002;
+ tmp2 = 0x0008;
+ val = value << 1;
+ } else {
+ tmp = 0x0008;
+ tmp2 = 0x0002;
+ val = value << 3;
+ }
+ b43_phy_maskset(dev, reg, ~tmp, val);
+ b43_phy_mask(dev, reg, ~tmp2);
+ break;
+ }
+ }
+}
+
/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/RFCtrlIntcOverride */
static void b43_nphy_rf_ctl_intc_override(struct b43_wldev *dev,
enum n_intc_override intc_override,
@@ -265,6 +331,12 @@ static void b43_nphy_rf_ctl_intc_override(struct b43_wldev *dev,
u8 i, j;
u16 reg, tmp, val;
+ if (dev->phy.rev >= 7) {
+ b43_nphy_rf_ctl_intc_override_rev7(dev, intc_override, value,
+ core);
+ return;
+ }
+
B43_WARN_ON(dev->phy.rev < 3);
for (i = 0; i < 2; i++) {
@@ -419,7 +491,8 @@ static void b43_nphy_stay_in_carrier_search(struct b43_wldev *dev, bool enable)
static const u16 clip[] = { 0xFFFF, 0xFFFF };
if (nphy->deaf_count++ == 0) {
nphy->classifier_state = b43_nphy_classifier(dev, 0, 0);
- b43_nphy_classifier(dev, 0x7, 0);
+ b43_nphy_classifier(dev, 0x7,
+ B43_NPHY_CLASSCTL_WAITEDEN);
b43_nphy_read_clip_detection(dev, nphy->clip_state);
b43_nphy_write_clip_detection(dev, clip);
}
@@ -627,13 +700,11 @@ static void b43_radio_2057_init_post(struct b43_wldev *dev)
b43_radio_mask(dev, R2057_RFPLL_MISC_CAL_RESETN, ~0x78);
b43_radio_mask(dev, R2057_XTAL_CONFIG2, ~0x80);
- if (dev->phy.n->init_por) {
+ if (dev->phy.do_full_init) {
b43_radio_2057_rcal(dev);
b43_radio_2057_rccal(dev);
}
b43_radio_mask(dev, R2057_RFPLL_MASTER, ~0x8);
-
- dev->phy.n->init_por = false;
}
/* http://bcm-v4.sipsolutions.net/802.11/Radio/2057/Init */
@@ -734,9 +805,16 @@ static void b43_radio_2056_setup(struct b43_wldev *dev,
u16 bias, cbias;
u16 pag_boost, padg_boost, pgag_boost, mixg_boost;
u16 paa_boost, pada_boost, pgaa_boost, mixa_boost;
+ bool is_pkg_fab_smic;
B43_WARN_ON(dev->phy.rev < 3);
+ is_pkg_fab_smic =
+ ((dev->dev->chip_id == BCMA_CHIP_ID_BCM43224 ||
+ dev->dev->chip_id == BCMA_CHIP_ID_BCM43225 ||
+ dev->dev->chip_id == BCMA_CHIP_ID_BCM43421) &&
+ dev->dev->chip_pkg == BCMA_PKG_ID_BCM43224_FAB_SMIC);
+
b43_chantab_radio_2056_upload(dev, e);
b2056_upload_syn_pll_cp2(dev, band == IEEE80211_BAND_5GHZ);
@@ -744,7 +822,8 @@ static void b43_radio_2056_setup(struct b43_wldev *dev,
b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) {
b43_radio_write(dev, B2056_SYN_PLL_LOOPFILTER1, 0x1F);
b43_radio_write(dev, B2056_SYN_PLL_LOOPFILTER2, 0x1F);
- if (dev->dev->chip_id == 0x4716) {
+ if (dev->dev->chip_id == BCMA_CHIP_ID_BCM4716 ||
+ dev->dev->chip_id == BCMA_CHIP_ID_BCM47162) {
b43_radio_write(dev, B2056_SYN_PLL_LOOPFILTER4, 0x14);
b43_radio_write(dev, B2056_SYN_PLL_CP2, 0);
} else {
@@ -752,6 +831,13 @@ static void b43_radio_2056_setup(struct b43_wldev *dev,
b43_radio_write(dev, B2056_SYN_PLL_CP2, 0x14);
}
}
+ if (sprom->boardflags2_hi & B43_BFH2_GPLL_WAR2 &&
+ b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) {
+ b43_radio_write(dev, B2056_SYN_PLL_LOOPFILTER1, 0x1f);
+ b43_radio_write(dev, B2056_SYN_PLL_LOOPFILTER2, 0x1f);
+ b43_radio_write(dev, B2056_SYN_PLL_LOOPFILTER4, 0x0b);
+ b43_radio_write(dev, B2056_SYN_PLL_CP2, 0x20);
+ }
if (sprom->boardflags2_lo & B43_BFL2_APLL_WAR &&
b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) {
b43_radio_write(dev, B2056_SYN_PLL_LOOPFILTER1, 0x1F);
@@ -767,7 +853,8 @@ static void b43_radio_2056_setup(struct b43_wldev *dev,
b43_radio_write(dev,
offset | B2056_TX_PADG_IDAC, 0xcc);
- if (dev->dev->chip_id == 0x4716) {
+ if (dev->dev->chip_id == BCMA_CHIP_ID_BCM4716 ||
+ dev->dev->chip_id == BCMA_CHIP_ID_BCM47162) {
bias = 0x40;
cbias = 0x45;
pag_boost = 0x5;
@@ -776,6 +863,10 @@ static void b43_radio_2056_setup(struct b43_wldev *dev,
} else {
bias = 0x25;
cbias = 0x20;
+ if (is_pkg_fab_smic) {
+ bias = 0x2a;
+ cbias = 0x38;
+ }
pag_boost = 0x4;
pgag_boost = 0x03;
mixg_boost = 0x65;
@@ -844,6 +935,8 @@ static void b43_radio_2056_setup(struct b43_wldev *dev,
mixa_boost = 0xF;
}
+ cbias = is_pkg_fab_smic ? 0x35 : 0x30;
+
for (i = 0; i < 2; i++) {
offset = i ? B2056_TX1 : B2056_TX0;
@@ -862,11 +955,11 @@ static void b43_radio_2056_setup(struct b43_wldev *dev,
b43_radio_write(dev,
offset | B2056_TX_PADA_CASCBIAS, 0x03);
b43_radio_write(dev,
- offset | B2056_TX_INTPAA_IAUX_STAT, 0x50);
+ offset | B2056_TX_INTPAA_IAUX_STAT, 0x30);
b43_radio_write(dev,
- offset | B2056_TX_INTPAA_IMAIN_STAT, 0x50);
+ offset | B2056_TX_INTPAA_IMAIN_STAT, 0x30);
b43_radio_write(dev,
- offset | B2056_TX_INTPAA_CASCBIAS, 0x30);
+ offset | B2056_TX_INTPAA_CASCBIAS, cbias);
}
}
@@ -933,7 +1026,7 @@ static void b43_radio_init2056_post(struct b43_wldev *dev)
b43_radio_mask(dev, B2056_SYN_COM_RESET, ~0x2);
b43_radio_mask(dev, B2056_SYN_PLL_MAST2, ~0xFC);
b43_radio_mask(dev, B2056_SYN_RCCAL_CTRL0, ~0x1);
- if (dev->phy.n->init_por)
+ if (dev->phy.do_full_init)
b43_radio_2056_rcal(dev);
}
@@ -946,8 +1039,6 @@ static void b43_radio_init2056(struct b43_wldev *dev)
b43_radio_init2056_pre(dev);
b2056_upload_inittabs(dev, 0, 0);
b43_radio_init2056_post(dev);
-
- dev->phy.n->init_por = false;
}
/**************************************************
@@ -1164,23 +1255,20 @@ static void b43_nphy_run_samples(struct b43_wldev *dev, u16 samps, u16 loops,
u16 seq_mode;
u32 tmp;
- if (nphy->hang_avoid)
- b43_nphy_stay_in_carrier_search(dev, true);
+ b43_nphy_stay_in_carrier_search(dev, true);
if ((nphy->bb_mult_save & 0x80000000) == 0) {
tmp = b43_ntab_read(dev, B43_NTAB16(15, 87));
nphy->bb_mult_save = (tmp & 0xFFFF) | 0x80000000;
}
+ /* TODO: add modify_bbmult argument */
if (!dev->phy.is_40mhz)
tmp = 0x6464;
else
tmp = 0x4747;
b43_ntab_write(dev, B43_NTAB16(15, 87), tmp);
- if (nphy->hang_avoid)
- b43_nphy_stay_in_carrier_search(dev, false);
-
b43_phy_write(dev, B43_NPHY_SAMP_DEPCNT, (samps - 1));
if (loops != 0xFFFF)
@@ -1213,6 +1301,8 @@ static void b43_nphy_run_samples(struct b43_wldev *dev, u16 samps, u16 loops,
b43err(dev->wl, "run samples timeout\n");
b43_phy_write(dev, B43_NPHY_RFSEQMODE, seq_mode);
+
+ b43_nphy_stay_in_carrier_search(dev, false);
}
/**************************************************
@@ -1588,8 +1678,8 @@ static void b43_nphy_rev3_rssi_cal(struct b43_wldev *dev)
struct b43_phy_n *nphy = dev->phy.n;
u16 saved_regs_phy_rfctl[2];
- u16 saved_regs_phy[13];
- u16 regs_to_store[] = {
+ u16 saved_regs_phy[22];
+ u16 regs_to_store_rev3[] = {
B43_NPHY_AFECTL_OVER1, B43_NPHY_AFECTL_OVER,
B43_NPHY_AFECTL_C1, B43_NPHY_AFECTL_C2,
B43_NPHY_TXF_40CO_B1S1, B43_NPHY_RFCTL_OVER,
@@ -1598,6 +1688,20 @@ static void b43_nphy_rev3_rssi_cal(struct b43_wldev *dev)
B43_NPHY_RFCTL_LUT_TRSW_UP1, B43_NPHY_RFCTL_LUT_TRSW_UP2,
B43_NPHY_RFCTL_RSSIO1, B43_NPHY_RFCTL_RSSIO2
};
+ u16 regs_to_store_rev7[] = {
+ B43_NPHY_AFECTL_OVER1, B43_NPHY_AFECTL_OVER,
+ B43_NPHY_AFECTL_C1, B43_NPHY_AFECTL_C2,
+ B43_NPHY_TXF_40CO_B1S1, B43_NPHY_RFCTL_OVER,
+ 0x342, 0x343, 0x346, 0x347,
+ 0x2ff,
+ B43_NPHY_TXF_40CO_B1S0, B43_NPHY_TXF_40CO_B32S1,
+ B43_NPHY_RFCTL_CMD,
+ B43_NPHY_RFCTL_LUT_TRSW_UP1, B43_NPHY_RFCTL_LUT_TRSW_UP2,
+ 0x340, 0x341, 0x344, 0x345,
+ B43_NPHY_RFCTL_RSSIO1, B43_NPHY_RFCTL_RSSIO2
+ };
+ u16 *regs_to_store;
+ int regs_amount;
u16 class;
@@ -1617,6 +1721,15 @@ static void b43_nphy_rev3_rssi_cal(struct b43_wldev *dev)
u8 rx_core_state;
int core, i, j, vcm;
+ if (dev->phy.rev >= 7) {
+ regs_to_store = regs_to_store_rev7;
+ regs_amount = ARRAY_SIZE(regs_to_store_rev7);
+ } else {
+ regs_to_store = regs_to_store_rev3;
+ regs_amount = ARRAY_SIZE(regs_to_store_rev3);
+ }
+ BUG_ON(regs_amount > ARRAY_SIZE(saved_regs_phy));
+
class = b43_nphy_classifier(dev, 0, 0);
b43_nphy_classifier(dev, 7, 4);
b43_nphy_read_clip_detection(dev, clip_state);
@@ -1624,22 +1737,29 @@ static void b43_nphy_rev3_rssi_cal(struct b43_wldev *dev)
saved_regs_phy_rfctl[0] = b43_phy_read(dev, B43_NPHY_RFCTL_INTC1);
saved_regs_phy_rfctl[1] = b43_phy_read(dev, B43_NPHY_RFCTL_INTC2);
- for (i = 0; i < ARRAY_SIZE(regs_to_store); i++)
+ for (i = 0; i < regs_amount; i++)
saved_regs_phy[i] = b43_phy_read(dev, regs_to_store[i]);
b43_nphy_rf_ctl_intc_override(dev, N_INTC_OVERRIDE_OFF, 0, 7);
b43_nphy_rf_ctl_intc_override(dev, N_INTC_OVERRIDE_TRSW, 1, 7);
- b43_nphy_rf_ctl_override(dev, 0x1, 0, 0, false);
- b43_nphy_rf_ctl_override(dev, 0x2, 1, 0, false);
- b43_nphy_rf_ctl_override(dev, 0x80, 1, 0, false);
- b43_nphy_rf_ctl_override(dev, 0x40, 1, 0, false);
-
- if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) {
- b43_nphy_rf_ctl_override(dev, 0x20, 0, 0, false);
- b43_nphy_rf_ctl_override(dev, 0x10, 1, 0, false);
+
+ if (dev->phy.rev >= 7) {
+ /* TODO */
+ if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) {
+ } else {
+ }
} else {
- b43_nphy_rf_ctl_override(dev, 0x10, 0, 0, false);
- b43_nphy_rf_ctl_override(dev, 0x20, 1, 0, false);
+ b43_nphy_rf_ctl_override(dev, 0x1, 0, 0, false);
+ b43_nphy_rf_ctl_override(dev, 0x2, 1, 0, false);
+ b43_nphy_rf_ctl_override(dev, 0x80, 1, 0, false);
+ b43_nphy_rf_ctl_override(dev, 0x40, 1, 0, false);
+ if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) {
+ b43_nphy_rf_ctl_override(dev, 0x20, 0, 0, false);
+ b43_nphy_rf_ctl_override(dev, 0x10, 1, 0, false);
+ } else {
+ b43_nphy_rf_ctl_override(dev, 0x10, 0, 0, false);
+ b43_nphy_rf_ctl_override(dev, 0x20, 1, 0, false);
+ }
}
rx_core_state = b43_nphy_get_rx_core_state(dev);
@@ -1654,8 +1774,11 @@ static void b43_nphy_rev3_rssi_cal(struct b43_wldev *dev)
/* Grab RSSI results for every possible VCM */
for (vcm = 0; vcm < 8; vcm++) {
- b43_radio_maskset(dev, r | B2056_RX_RSSI_MISC, 0xE3,
- vcm << 2);
+ if (dev->phy.rev >= 7)
+ ;
+ else
+ b43_radio_maskset(dev, r | B2056_RX_RSSI_MISC,
+ 0xE3, vcm << 2);
b43_nphy_poll_rssi(dev, N_RSSI_NB, results[vcm], 8);
}
@@ -1682,8 +1805,11 @@ static void b43_nphy_rev3_rssi_cal(struct b43_wldev *dev)
}
/* Select the best VCM */
- b43_radio_maskset(dev, r | B2056_RX_RSSI_MISC, 0xE3,
- vcm_final << 2);
+ if (dev->phy.rev >= 7)
+ ;
+ else
+ b43_radio_maskset(dev, r | B2056_RX_RSSI_MISC,
+ 0xE3, vcm_final << 2);
for (i = 0; i < 4; i++) {
if (core != i / 2)
@@ -1736,9 +1862,9 @@ static void b43_nphy_rev3_rssi_cal(struct b43_wldev *dev)
b43_phy_set(dev, B43_NPHY_RFCTL_OVER, 0x1);
b43_phy_set(dev, B43_NPHY_RFCTL_CMD, B43_NPHY_RFCTL_CMD_RXTX);
- b43_phy_mask(dev, B43_NPHY_TXF_40CO_B1S1, ~0x1);
+ b43_phy_mask(dev, B43_NPHY_RFCTL_OVER, ~0x1);
- for (i = 0; i < ARRAY_SIZE(regs_to_store); i++)
+ for (i = 0; i < regs_amount; i++)
b43_phy_write(dev, regs_to_store[i], saved_regs_phy[i]);
/* Store for future configuration */
@@ -2494,8 +2620,8 @@ static void b43_nphy_workarounds_rev3plus(struct b43_wldev *dev)
struct ssb_sprom *sprom = dev->dev->bus_sprom;
/* TX to RX */
- u8 tx2rx_events[8] = { 0x4, 0x3, 0x6, 0x5, 0x2, 0x1, 0x8, 0x1F };
- u8 tx2rx_delays[8] = { 8, 4, 2, 2, 4, 4, 6, 1 };
+ u8 tx2rx_events[7] = { 0x4, 0x3, 0x5, 0x2, 0x1, 0x8, 0x1F };
+ u8 tx2rx_delays[7] = { 8, 4, 4, 4, 4, 6, 1 };
/* RX to TX */
u8 rx2tx_events_ipa[9] = { 0x0, 0x1, 0x2, 0x8, 0x5, 0x6, 0xF, 0x3,
0x1F };
@@ -2503,6 +2629,23 @@ static void b43_nphy_workarounds_rev3plus(struct b43_wldev *dev)
u8 rx2tx_events[9] = { 0x0, 0x1, 0x2, 0x8, 0x5, 0x6, 0x3, 0x4, 0x1F };
u8 rx2tx_delays[9] = { 8, 6, 6, 4, 4, 18, 42, 1, 1 };
+ u16 vmids[5][4] = {
+ { 0xa2, 0xb4, 0xb4, 0x89, }, /* 0 */
+ { 0xb4, 0xb4, 0xb4, 0x24, }, /* 1 */
+ { 0xa2, 0xb4, 0xb4, 0x74, }, /* 2 */
+ { 0xa2, 0xb4, 0xb4, 0x270, }, /* 3 */
+ { 0xa2, 0xb4, 0xb4, 0x00, }, /* 4 and 5 */
+ };
+ u16 gains[5][4] = {
+ { 0x02, 0x02, 0x02, 0x00, }, /* 0 */
+ { 0x02, 0x02, 0x02, 0x02, }, /* 1 */
+ { 0x02, 0x02, 0x02, 0x04, }, /* 2 */
+ { 0x02, 0x02, 0x02, 0x00, }, /* 3 */
+ { 0x02, 0x02, 0x02, 0x00, }, /* 4 and 5 */
+ };
+ u16 *vmid, *gain;
+
+ u8 pdet_range;
u16 tmp16;
u32 tmp32;
@@ -2561,7 +2704,71 @@ static void b43_nphy_workarounds_rev3plus(struct b43_wldev *dev)
b43_ntab_write(dev, B43_NTAB16(8, 0), 2);
b43_ntab_write(dev, B43_NTAB16(8, 16), 2);
- /* TODO */
+ if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ)
+ pdet_range = sprom->fem.ghz2.pdet_range;
+ else
+ pdet_range = sprom->fem.ghz5.pdet_range;
+ vmid = vmids[min_t(u16, pdet_range, 4)];
+ gain = gains[min_t(u16, pdet_range, 4)];
+ switch (pdet_range) {
+ case 3:
+ if (!(dev->phy.rev >= 4 &&
+ b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ))
+ break;
+ /* FALL THROUGH */
+ case 0:
+ case 1:
+ b43_ntab_write_bulk(dev, B43_NTAB16(8, 0x08), 4, vmid);
+ b43_ntab_write_bulk(dev, B43_NTAB16(8, 0x18), 4, vmid);
+ b43_ntab_write_bulk(dev, B43_NTAB16(8, 0x0c), 4, gain);
+ b43_ntab_write_bulk(dev, B43_NTAB16(8, 0x1c), 4, gain);
+ break;
+ case 2:
+ if (dev->phy.rev >= 6) {
+ if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ)
+ vmid[3] = 0x94;
+ else
+ vmid[3] = 0x8e;
+ gain[3] = 3;
+ } else if (dev->phy.rev == 5) {
+ vmid[3] = 0x84;
+ gain[3] = 2;
+ }
+ b43_ntab_write_bulk(dev, B43_NTAB16(8, 0x08), 4, vmid);
+ b43_ntab_write_bulk(dev, B43_NTAB16(8, 0x18), 4, vmid);
+ b43_ntab_write_bulk(dev, B43_NTAB16(8, 0x0c), 4, gain);
+ b43_ntab_write_bulk(dev, B43_NTAB16(8, 0x1c), 4, gain);
+ break;
+ case 4:
+ case 5:
+ if (b43_current_band(dev->wl) != IEEE80211_BAND_2GHZ) {
+ if (pdet_range == 4) {
+ vmid[3] = 0x8e;
+ tmp16 = 0x96;
+ gain[3] = 0x2;
+ } else {
+ vmid[3] = 0x89;
+ tmp16 = 0x89;
+ gain[3] = 0;
+ }
+ } else {
+ if (pdet_range == 4) {
+ vmid[3] = 0x89;
+ tmp16 = 0x8b;
+ gain[3] = 0x2;
+ } else {
+ vmid[3] = 0x74;
+ tmp16 = 0x70;
+ gain[3] = 0;
+ }
+ }
+ b43_ntab_write_bulk(dev, B43_NTAB16(8, 0x08), 4, vmid);
+ b43_ntab_write_bulk(dev, B43_NTAB16(8, 0x0c), 4, gain);
+ vmid[3] = tmp16;
+ b43_ntab_write_bulk(dev, B43_NTAB16(8, 0x18), 4, vmid);
+ b43_ntab_write_bulk(dev, B43_NTAB16(8, 0x1c), 4, gain);
+ break;
+ }
b43_radio_write(dev, B2056_RX0 | B2056_RX_MIXA_MAST_BIAS, 0x00);
b43_radio_write(dev, B2056_RX1 | B2056_RX_MIXA_MAST_BIAS, 0x00);
@@ -2600,7 +2807,7 @@ static void b43_nphy_workarounds_rev3plus(struct b43_wldev *dev)
/* Dropped probably-always-true condition */
b43_phy_write(dev, B43_NPHY_ED_CRS40ASSERTTHRESH0, 0x03eb);
b43_phy_write(dev, B43_NPHY_ED_CRS40ASSERTTHRESH1, 0x03eb);
- b43_phy_write(dev, B43_NPHY_ED_CRS40DEASSERTTHRESH1, 0x0341);
+ b43_phy_write(dev, B43_NPHY_ED_CRS40DEASSERTTHRESH0, 0x0341);
b43_phy_write(dev, B43_NPHY_ED_CRS40DEASSERTTHRESH1, 0x0341);
b43_phy_write(dev, B43_NPHY_ED_CRS20LASSERTTHRESH0, 0x042b);
b43_phy_write(dev, B43_NPHY_ED_CRS20LASSERTTHRESH1, 0x042b);
@@ -3211,6 +3418,20 @@ static void b43_nphy_tx_prepare_adjusted_power_table(struct b43_wldev *dev)
u8 idx, delta;
u8 i, stf_mode;
+ /* Array adj_pwr_tbl corresponds to the hardware table. It consists of
+ * 21 groups, each containing 4 entries.
+ *
+ * First group has entries for CCK modulation.
+ * The rest of groups has 1 entry per modulation (SISO, CDD, STBC, SDM).
+ *
+ * Group 0 is for CCK
+ * Groups 1..4 use BPSK (group per coding rate)
+ * Groups 5..8 use QPSK (group per coding rate)
+ * Groups 9..12 use 16-QAM (group per coding rate)
+ * Groups 13..16 use 64-QAM (group per coding rate)
+ * Groups 17..20 are unknown
+ */
+
for (i = 0; i < 4; i++)
nphy->adj_pwr_tbl[i] = nphy->tx_power_offset[i];
@@ -3409,10 +3630,8 @@ static void b43_nphy_tx_power_ctl_setup(struct b43_wldev *dev)
}
b43_nphy_tx_prepare_adjusted_power_table(dev);
- /*
b43_ntab_write_bulk(dev, B43_NTAB16(26, 64), 84, nphy->adj_pwr_tbl);
b43_ntab_write_bulk(dev, B43_NTAB16(27, 64), 84, nphy->adj_pwr_tbl);
- */
if (nphy->hang_avoid)
b43_nphy_stay_in_carrier_search(dev, false);
@@ -5124,7 +5343,7 @@ static int b43_phy_initn(struct b43_wldev *dev)
b43_phy_write(dev, B43_NPHY_TXMACIF_HOLDOFF, 0x0015);
b43_phy_write(dev, B43_NPHY_TXMACDELAY, 0x0320);
if (phy->rev >= 3 && phy->rev <= 6)
- b43_phy_write(dev, B43_NPHY_PLOAD_CSENSE_EXTLEN, 0x0014);
+ b43_phy_write(dev, B43_NPHY_PLOAD_CSENSE_EXTLEN, 0x0032);
b43_nphy_tx_lp_fbw(dev);
if (phy->rev >= 3)
b43_nphy_spur_workaround(dev);
@@ -5338,7 +5557,6 @@ static void b43_nphy_op_prepare_structs(struct b43_wldev *dev)
nphy->hang_avoid = (phy->rev == 3 || phy->rev == 4);
nphy->spur_avoid = (phy->rev >= 3) ?
B43_SPUR_AVOID_AUTO : B43_SPUR_AVOID_DISABLE;
- nphy->init_por = true;
nphy->gain_boost = true; /* this way we follow wl, assume it is true */
nphy->txrx_chain = 2; /* sth different than 0 and 1 for now */
nphy->phyrxchain = 3; /* to avoid b43_nphy_set_rx_core_state like wl */
@@ -5379,8 +5597,6 @@ static void b43_nphy_op_prepare_structs(struct b43_wldev *dev)
nphy->ipa2g_on = sprom->fem.ghz2.extpa_gain == 2;
nphy->ipa5g_on = sprom->fem.ghz5.extpa_gain == 2;
}
-
- nphy->init_por = true;
}
static void b43_nphy_op_free(struct b43_wldev *dev)
@@ -5441,8 +5657,11 @@ static u16 b43_nphy_op_radio_read(struct b43_wldev *dev, u16 reg)
{
/* Register 1 is a 32-bit register. */
B43_WARN_ON(reg == 1);
- /* N-PHY needs 0x100 for read access */
- reg |= 0x100;
+
+ if (dev->phy.rev >= 7)
+ reg |= 0x200; /* Radio 0x2057 */
+ else
+ reg |= 0x100;
b43_write16(dev, B43_MMIO_RADIO_CONTROL, reg);
return b43_read16(dev, B43_MMIO_RADIO_DATA_LOW);
@@ -5488,10 +5707,12 @@ static void b43_nphy_op_software_rfkill(struct b43_wldev *dev,
}
} else {
if (dev->phy.rev >= 7) {
- b43_radio_2057_init(dev);
+ if (!dev->phy.radio_on)
+ b43_radio_2057_init(dev);
b43_switch_channel(dev, dev->phy.channel);
} else if (dev->phy.rev >= 3) {
- b43_radio_init2056(dev);
+ if (!dev->phy.radio_on)
+ b43_radio_init2056(dev);
b43_switch_channel(dev, dev->phy.channel);
} else {
b43_radio_init2055(dev);
diff --git a/drivers/net/wireless/b43/phy_n.h b/drivers/net/wireless/b43/phy_n.h
index 9a5b6bc27d24..ecfbf66dbc3b 100644
--- a/drivers/net/wireless/b43/phy_n.h
+++ b/drivers/net/wireless/b43/phy_n.h
@@ -931,7 +931,6 @@ struct b43_phy_n {
u16 papd_epsilon_offset[2];
s32 preamble_override;
u32 bb_mult_save;
- bool init_por;
bool gain_boost;
bool elna_gain_config;
diff --git a/drivers/net/wireless/b43/radio_2056.c b/drivers/net/wireless/b43/radio_2056.c
index b4fd9345d673..2ce25607c60d 100644
--- a/drivers/net/wireless/b43/radio_2056.c
+++ b/drivers/net/wireless/b43/radio_2056.c
@@ -48,7 +48,7 @@ struct b2056_inittabs_pts {
unsigned int rx_length;
};
-static const struct b2056_inittab_entry b2056_inittab_rev3_syn[] = {
+static const struct b2056_inittab_entry b2056_inittab_phy_rev3_syn[] = {
[B2056_SYN_RESERVED_ADDR2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
[B2056_SYN_RESERVED_ADDR3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
[B2056_SYN_RESERVED_ADDR4] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
@@ -232,7 +232,7 @@ static const struct b2056_inittab_entry b2056_inittab_rev3_syn[] = {
[B2056_SYN_LOGEN_TX_CMOS_VALID] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
};
-static const struct b2056_inittab_entry b2056_inittab_rev3_tx[] = {
+static const struct b2056_inittab_entry b2056_inittab_phy_rev3_tx[] = {
[B2056_TX_RESERVED_ADDR2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
[B2056_TX_RESERVED_ADDR3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
[B2056_TX_RESERVED_ADDR4] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
@@ -380,7 +380,7 @@ static const struct b2056_inittab_entry b2056_inittab_rev3_tx[] = {
[B2056_TX_STATUS_TXLPF_RC] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
};
-static const struct b2056_inittab_entry b2056_inittab_rev3_rx[] = {
+static const struct b2056_inittab_entry b2056_inittab_phy_rev3_rx[] = {
[B2056_RX_RESERVED_ADDR2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
[B2056_RX_RESERVED_ADDR3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
[B2056_RX_RESERVED_ADDR4] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
@@ -530,7 +530,7 @@ static const struct b2056_inittab_entry b2056_inittab_rev3_rx[] = {
[B2056_RX_STATUS_HPC_RC] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
};
-static const struct b2056_inittab_entry b2056_inittab_rev4_syn[] = {
+static const struct b2056_inittab_entry b2056_inittab_phy_rev4_syn[] = {
[B2056_SYN_RESERVED_ADDR2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
[B2056_SYN_RESERVED_ADDR3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
[B2056_SYN_RESERVED_ADDR4] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
@@ -714,7 +714,7 @@ static const struct b2056_inittab_entry b2056_inittab_rev4_syn[] = {
[B2056_SYN_LOGEN_TX_CMOS_VALID] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
};
-static const struct b2056_inittab_entry b2056_inittab_rev4_tx[] = {
+static const struct b2056_inittab_entry b2056_inittab_phy_rev4_tx[] = {
[B2056_TX_RESERVED_ADDR2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
[B2056_TX_RESERVED_ADDR3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
[B2056_TX_RESERVED_ADDR4] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
@@ -862,7 +862,7 @@ static const struct b2056_inittab_entry b2056_inittab_rev4_tx[] = {
[B2056_TX_STATUS_TXLPF_RC] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
};
-static const struct b2056_inittab_entry b2056_inittab_rev4_rx[] = {
+static const struct b2056_inittab_entry b2056_inittab_phy_rev4_rx[] = {
[B2056_RX_RESERVED_ADDR2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
[B2056_RX_RESERVED_ADDR3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
[B2056_RX_RESERVED_ADDR4] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
@@ -1012,7 +1012,7 @@ static const struct b2056_inittab_entry b2056_inittab_rev4_rx[] = {
[B2056_RX_STATUS_HPC_RC] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
};
-static const struct b2056_inittab_entry b2056_inittab_rev5_syn[] = {
+static const struct b2056_inittab_entry b2056_inittab_radio_rev5_syn[] = {
[B2056_SYN_RESERVED_ADDR2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
[B2056_SYN_RESERVED_ADDR3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
[B2056_SYN_RESERVED_ADDR4] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
@@ -1196,7 +1196,7 @@ static const struct b2056_inittab_entry b2056_inittab_rev5_syn[] = {
[B2056_SYN_LOGEN_TX_CMOS_VALID] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
};
-static const struct b2056_inittab_entry b2056_inittab_rev5_tx[] = {
+static const struct b2056_inittab_entry b2056_inittab_radio_rev5_tx[] = {
[B2056_TX_RESERVED_ADDR2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
[B2056_TX_RESERVED_ADDR3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
[B2056_TX_RESERVED_ADDR4] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
@@ -1352,7 +1352,7 @@ static const struct b2056_inittab_entry b2056_inittab_rev5_tx[] = {
[B2056_TX_GMBB_IDAC7] = { .ghz5 = 0x0075, .ghz2 = 0x0075, UPLOAD, },
};
-static const struct b2056_inittab_entry b2056_inittab_rev5_rx[] = {
+static const struct b2056_inittab_entry b2056_inittab_radio_rev5_rx[] = {
[B2056_RX_RESERVED_ADDR2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
[B2056_RX_RESERVED_ADDR3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
[B2056_RX_RESERVED_ADDR4] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
@@ -1502,7 +1502,7 @@ static const struct b2056_inittab_entry b2056_inittab_rev5_rx[] = {
[B2056_RX_STATUS_HPC_RC] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
};
-static const struct b2056_inittab_entry b2056_inittab_rev6_syn[] = {
+static const struct b2056_inittab_entry b2056_inittab_radio_rev6_syn[] = {
[B2056_SYN_RESERVED_ADDR2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
[B2056_SYN_RESERVED_ADDR3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
[B2056_SYN_RESERVED_ADDR4] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
@@ -1686,7 +1686,7 @@ static const struct b2056_inittab_entry b2056_inittab_rev6_syn[] = {
[B2056_SYN_LOGEN_TX_CMOS_VALID] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
};
-static const struct b2056_inittab_entry b2056_inittab_rev6_tx[] = {
+static const struct b2056_inittab_entry b2056_inittab_radio_rev6_tx[] = {
[B2056_TX_RESERVED_ADDR2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
[B2056_TX_RESERVED_ADDR3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
[B2056_TX_RESERVED_ADDR4] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
@@ -1842,7 +1842,7 @@ static const struct b2056_inittab_entry b2056_inittab_rev6_tx[] = {
[B2056_TX_GMBB_IDAC7] = { .ghz5 = 0x0070, .ghz2 = 0x0070, NOUPLOAD, },
};
-static const struct b2056_inittab_entry b2056_inittab_rev6_rx[] = {
+static const struct b2056_inittab_entry b2056_inittab_radio_rev6_rx[] = {
[B2056_RX_RESERVED_ADDR2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
[B2056_RX_RESERVED_ADDR3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
[B2056_RX_RESERVED_ADDR4] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
@@ -1992,7 +1992,7 @@ static const struct b2056_inittab_entry b2056_inittab_rev6_rx[] = {
[B2056_RX_STATUS_HPC_RC] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
};
-static const struct b2056_inittab_entry b2056_inittab_rev7_syn[] = {
+static const struct b2056_inittab_entry b2056_inittab_radio_rev7_9_syn[] = {
[B2056_SYN_RESERVED_ADDR2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
[B2056_SYN_RESERVED_ADDR3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
[B2056_SYN_RESERVED_ADDR4] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
@@ -2176,7 +2176,7 @@ static const struct b2056_inittab_entry b2056_inittab_rev7_syn[] = {
[B2056_SYN_LOGEN_TX_CMOS_VALID] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
};
-static const struct b2056_inittab_entry b2056_inittab_rev7_tx[] = {
+static const struct b2056_inittab_entry b2056_inittab_radio_rev7_9_tx[] = {
[B2056_TX_RESERVED_ADDR2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
[B2056_TX_RESERVED_ADDR3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
[B2056_TX_RESERVED_ADDR4] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
@@ -2332,7 +2332,7 @@ static const struct b2056_inittab_entry b2056_inittab_rev7_tx[] = {
[B2056_TX_GMBB_IDAC7] = { .ghz5 = 0x0075, .ghz2 = 0x0075, UPLOAD, },
};
-static const struct b2056_inittab_entry b2056_inittab_rev7_rx[] = {
+static const struct b2056_inittab_entry b2056_inittab_radio_rev7_9_rx[] = {
[B2056_RX_RESERVED_ADDR2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
[B2056_RX_RESERVED_ADDR3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
[B2056_RX_RESERVED_ADDR4] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
@@ -2482,7 +2482,7 @@ static const struct b2056_inittab_entry b2056_inittab_rev7_rx[] = {
[B2056_RX_STATUS_HPC_RC] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
};
-static const struct b2056_inittab_entry b2056_inittab_rev8_syn[] = {
+static const struct b2056_inittab_entry b2056_inittab_radio_rev8_syn[] = {
[B2056_SYN_RESERVED_ADDR2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
[B2056_SYN_RESERVED_ADDR3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
[B2056_SYN_RESERVED_ADDR4] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
@@ -2666,7 +2666,7 @@ static const struct b2056_inittab_entry b2056_inittab_rev8_syn[] = {
[B2056_SYN_LOGEN_TX_CMOS_VALID] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
};
-static const struct b2056_inittab_entry b2056_inittab_rev8_tx[] = {
+static const struct b2056_inittab_entry b2056_inittab_radio_rev8_tx[] = {
[B2056_TX_RESERVED_ADDR2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
[B2056_TX_RESERVED_ADDR3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
[B2056_TX_RESERVED_ADDR4] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
@@ -2822,7 +2822,7 @@ static const struct b2056_inittab_entry b2056_inittab_rev8_tx[] = {
[B2056_TX_GMBB_IDAC7] = { .ghz5 = 0x0070, .ghz2 = 0x0070, NOUPLOAD, },
};
-static const struct b2056_inittab_entry b2056_inittab_rev8_rx[] = {
+static const struct b2056_inittab_entry b2056_inittab_radio_rev8_rx[] = {
[B2056_RX_RESERVED_ADDR2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
[B2056_RX_RESERVED_ADDR3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
[B2056_RX_RESERVED_ADDR4] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
@@ -2972,24 +2972,69 @@ static const struct b2056_inittab_entry b2056_inittab_rev8_rx[] = {
[B2056_RX_STATUS_HPC_RC] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
};
-#define INITTABSPTS(prefix) \
- .syn = prefix##_syn, \
- .syn_length = ARRAY_SIZE(prefix##_syn), \
- .tx = prefix##_tx, \
- .tx_length = ARRAY_SIZE(prefix##_tx), \
- .rx = prefix##_rx, \
- .rx_length = ARRAY_SIZE(prefix##_rx)
+static const struct b2056_inittab_entry b2056_inittab_radio_rev11_syn[] = {
+ [B2056_SYN_PLL_PFD] = { .ghz5 = 0x0006, .ghz2 = 0x0006, UPLOAD, },
+ [B2056_SYN_PLL_CP2] = { .ghz5 = 0x003f, .ghz2 = 0x003f, UPLOAD, },
+ [B2056_SYN_PLL_LOOPFILTER1] = { .ghz5 = 0x0006, .ghz2 = 0x0006, UPLOAD, },
+ [B2056_SYN_PLL_LOOPFILTER2] = { .ghz5 = 0x0006, .ghz2 = 0x0006, UPLOAD, },
+ [B2056_SYN_PLL_LOOPFILTER4] = { .ghz5 = 0x002b, .ghz2 = 0x002b, UPLOAD, },
+ [B2056_SYN_PLL_VCO2] = { .ghz5 = 0x00f7, .ghz2 = 0x00f7, UPLOAD, },
+ [B2056_SYN_PLL_VCOCAL12] = { .ghz5 = 0x0007, .ghz2 = 0x0007, UPLOAD, },
+ [B2056_SYN_LOGENBUF2] = { .ghz5 = 0x008f, .ghz2 = 0x008f, UPLOAD, },
+};
-static const struct b2056_inittabs_pts b2056_inittabs[] = {
- [3] = { INITTABSPTS(b2056_inittab_rev3) },
- [4] = { INITTABSPTS(b2056_inittab_rev4) },
- [5] = { INITTABSPTS(b2056_inittab_rev5) },
- [6] = { INITTABSPTS(b2056_inittab_rev6) },
- [7] = { INITTABSPTS(b2056_inittab_rev7) },
- [8] = { INITTABSPTS(b2056_inittab_rev8) },
- [9] = { INITTABSPTS(b2056_inittab_rev7) },
+static const struct b2056_inittab_entry b2056_inittab_radio_rev11_tx[] = {
+ [B2056_TX_PA_SPARE2] = { .ghz5 = 0x00ee, .ghz2 = 0x00ee, UPLOAD, },
+ [B2056_TX_INTPAA_IAUX_STAT] = { .ghz5 = 0x0050, .ghz2 = 0x0050, UPLOAD, },
+ [B2056_TX_INTPAA_IMAIN_STAT] = { .ghz5 = 0x0050, .ghz2 = 0x0050, UPLOAD, },
+ [B2056_TX_INTPAA_PASLOPE] = { .ghz5 = 0x00f0, .ghz2 = 0x00f0, UPLOAD, },
+ [B2056_TX_INTPAG_PASLOPE] = { .ghz5 = 0x00f0, .ghz2 = 0x00f0, UPLOAD, },
+ [B2056_TX_PADA_IDAC] = { .ghz5 = 0x00ff, .ghz2 = 0x00ff, UPLOAD, },
+ [B2056_TX_PADA_SLOPE] = { .ghz5 = 0x0070, .ghz2 = 0x0070, UPLOAD, },
+ [B2056_TX_PADG_SLOPE] = { .ghz5 = 0x0070, .ghz2 = 0x0070, UPLOAD, },
+ [B2056_TX_PGAA_IDAC] = { .ghz5 = 0x00ff, .ghz2 = 0x00ff, UPLOAD, },
+ [B2056_TX_PGAA_SLOPE] = { .ghz5 = 0x0077, .ghz2 = 0x0077, UPLOAD, },
+ [B2056_TX_PGAG_SLOPE] = { .ghz5 = 0x0077, .ghz2 = 0x0077, UPLOAD, },
+ [B2056_TX_GMBB_IDAC] = { .ghz5 = 0x0000, .ghz2 = 0x0000, UPLOAD, },
+ [B2056_TX_TXSPARE1] = { .ghz5 = 0x0030, .ghz2 = 0x0030, UPLOAD, },
+};
+
+static const struct b2056_inittab_entry b2056_inittab_radio_rev11_rx[] = {
+ [B2056_RX_BIASPOLE_LNAA1_IDAC] = { .ghz5 = 0x0017, .ghz2 = 0x0017, UPLOAD, },
+ [B2056_RX_LNAA2_IDAC] = { .ghz5 = 0x00ff, .ghz2 = 0x00ff, UPLOAD, },
+ [B2056_RX_BIASPOLE_LNAG1_IDAC] = { .ghz5 = 0x0017, .ghz2 = 0x0017, UPLOAD, },
+ [B2056_RX_LNAG2_IDAC] = { .ghz5 = 0x00f0, .ghz2 = 0x00f0, UPLOAD, },
+ [B2056_RX_MIXA_VCM] = { .ghz5 = 0x0055, .ghz2 = 0x0055, UPLOAD, },
+ [B2056_RX_MIXA_LOB_BIAS] = { .ghz5 = 0x0088, .ghz2 = 0x0088, UPLOAD, },
+ [B2056_RX_MIXA_BIAS_AUX] = { .ghz5 = 0x0007, .ghz2 = 0x0007, UPLOAD, },
+ [B2056_RX_MIXG_VCM] = { .ghz5 = 0x0055, .ghz2 = 0x0055, UPLOAD, },
+ [B2056_RX_TIA_IOPAMP] = { .ghz5 = 0x0026, .ghz2 = 0x0026, UPLOAD, },
+ [B2056_RX_TIA_QOPAMP] = { .ghz5 = 0x0026, .ghz2 = 0x0026, UPLOAD, },
+ [B2056_RX_TIA_IMISC] = { .ghz5 = 0x000f, .ghz2 = 0x000f, UPLOAD, },
+ [B2056_RX_TIA_QMISC] = { .ghz5 = 0x000f, .ghz2 = 0x000f, UPLOAD, },
+ [B2056_RX_RXLPF_OUTVCM] = { .ghz5 = 0x0004, .ghz2 = 0x0004, UPLOAD, },
+ [B2056_RX_VGA_BIAS_DCCANCEL] = { .ghz5 = 0x0000, .ghz2 = 0x0000, UPLOAD, },
+ [B2056_RX_RXSPARE3] = { .ghz5 = 0x0005, .ghz2 = 0x0005, UPLOAD, },
};
+#define INITTABSPTS(prefix) \
+ static const struct b2056_inittabs_pts prefix = { \
+ .syn = prefix##_syn, \
+ .syn_length = ARRAY_SIZE(prefix##_syn), \
+ .tx = prefix##_tx, \
+ .tx_length = ARRAY_SIZE(prefix##_tx), \
+ .rx = prefix##_rx, \
+ .rx_length = ARRAY_SIZE(prefix##_rx), \
+ }
+
+INITTABSPTS(b2056_inittab_phy_rev3);
+INITTABSPTS(b2056_inittab_phy_rev4);
+INITTABSPTS(b2056_inittab_radio_rev5);
+INITTABSPTS(b2056_inittab_radio_rev6);
+INITTABSPTS(b2056_inittab_radio_rev7_9);
+INITTABSPTS(b2056_inittab_radio_rev8);
+INITTABSPTS(b2056_inittab_radio_rev11);
+
#define RADIOREGS3(r00, r01, r02, r03, r04, r05, r06, r07, r08, r09, \
r10, r11, r12, r13, r14, r15, r16, r17, r18, r19, \
r20, r21, r22, r23, r24, r25, r26, r27, r28, r29, \
@@ -3041,7 +3086,7 @@ static const struct b2056_inittabs_pts b2056_inittabs[] = {
.phy_regs.phy_bw6 = r5
/* http://bcm-v4.sipsolutions.net/802.11/Radio/2056/ChannelTable */
-static const struct b43_nphy_channeltab_entry_rev3 b43_nphy_channeltab_rev3[] = {
+static const struct b43_nphy_channeltab_entry_rev3 b43_nphy_channeltab_phy_rev3[] = {
{ .freq = 4920,
RADIOREGS3(0xff, 0x01, 0x01, 0x01, 0xec, 0x05, 0x05, 0x04,
0x0c, 0x01, 0x00, 0x00, 0x00, 0x8f, 0x0f, 0x00,
@@ -4036,7 +4081,7 @@ static const struct b43_nphy_channeltab_entry_rev3 b43_nphy_channeltab_rev3[] =
},
};
-static const struct b43_nphy_channeltab_entry_rev3 b43_nphy_channeltab_rev4[] = {
+static const struct b43_nphy_channeltab_entry_rev3 b43_nphy_channeltab_phy_rev4[] = {
{ .freq = 4920,
RADIOREGS3(0xff, 0x01, 0x01, 0x01, 0xec, 0x05, 0x05, 0x04,
0x0c, 0x01, 0x00, 0x00, 0x00, 0x8f, 0x0f, 0x00,
@@ -5031,7 +5076,7 @@ static const struct b43_nphy_channeltab_entry_rev3 b43_nphy_channeltab_rev4[] =
},
};
-static const struct b43_nphy_channeltab_entry_rev3 b43_nphy_channeltab_rev5[] = {
+static const struct b43_nphy_channeltab_entry_rev3 b43_nphy_channeltab_radio_rev5[] = {
{ .freq = 4920,
RADIOREGS3(0xff, 0x01, 0x01, 0x01, 0xec, 0x05, 0x05, 0x04,
0x0c, 0x01, 0x00, 0x00, 0x00, 0x8f, 0x0f, 0x00,
@@ -6026,7 +6071,7 @@ static const struct b43_nphy_channeltab_entry_rev3 b43_nphy_channeltab_rev5[] =
},
};
-static const struct b43_nphy_channeltab_entry_rev3 b43_nphy_channeltab_rev6[] = {
+static const struct b43_nphy_channeltab_entry_rev3 b43_nphy_channeltab_radio_rev6[] = {
{ .freq = 4920,
RADIOREGS3(0xff, 0x01, 0x01, 0x01, 0xec, 0x05, 0x05, 0x04,
0x0c, 0x01, 0x00, 0x00, 0x00, 0x8f, 0x0f, 0x00,
@@ -7021,7 +7066,7 @@ static const struct b43_nphy_channeltab_entry_rev3 b43_nphy_channeltab_rev6[] =
},
};
-static const struct b43_nphy_channeltab_entry_rev3 b43_nphy_channeltab_rev7_9[] = {
+static const struct b43_nphy_channeltab_entry_rev3 b43_nphy_channeltab_radio_rev7_9[] = {
{ .freq = 4920,
RADIOREGS3(0xff, 0x01, 0x01, 0x01, 0xec, 0x05, 0x05, 0x04,
0x0c, 0x01, 0x00, 0x00, 0x00, 0x8f, 0x0f, 0x00,
@@ -8016,7 +8061,7 @@ static const struct b43_nphy_channeltab_entry_rev3 b43_nphy_channeltab_rev7_9[]
},
};
-static const struct b43_nphy_channeltab_entry_rev3 b43_nphy_channeltab_rev8[] = {
+static const struct b43_nphy_channeltab_entry_rev3 b43_nphy_channeltab_radio_rev8[] = {
{ .freq = 4920,
RADIOREGS3(0xff, 0x01, 0x01, 0x01, 0xec, 0x05, 0x05, 0x04,
0x0c, 0x01, 0x00, 0x00, 0x00, 0x8f, 0x0f, 0x00,
@@ -9011,6 +9056,1154 @@ static const struct b43_nphy_channeltab_entry_rev3 b43_nphy_channeltab_rev8[] =
},
};
+static const struct b43_nphy_channeltab_entry_rev3 b43_nphy_channeltab_radio_rev11[] = {
+ {
+ .freq = 4920,
+ RADIOREGS3(0xff, 0x01, 0x01, 0x01, 0xec, 0x05, 0x05, 0x02,
+ 0x0c, 0x01, 0x00, 0x00, 0x00, 0x8f, 0x0f, 0x00,
+ 0xff, 0xfe, 0x00, 0x09, 0x00, 0x77, 0x00, 0x0f,
+ 0x00, 0x6f, 0x00, 0xfe, 0x00, 0x09, 0x00, 0x77,
+ 0x00, 0x0f, 0x00, 0x6f, 0x00),
+ PHYREGS(0x07b4, 0x07b0, 0x07ac, 0x0214, 0x0215, 0x0216),
+ },
+ {
+ .freq = 4930,
+ RADIOREGS3(0xff, 0x01, 0x01, 0x01, 0xed, 0x05, 0x05, 0x02,
+ 0x0c, 0x01, 0x00, 0x00, 0x00, 0x8f, 0x0f, 0x00,
+ 0xff, 0xfe, 0x00, 0x09, 0x00, 0x77, 0x00, 0x0f,
+ 0x00, 0x6f, 0x00, 0xfe, 0x00, 0x09, 0x00, 0x77,
+ 0x00, 0x0f, 0x00, 0x6f, 0x00),
+ PHYREGS(0x07b8, 0x07b4, 0x07b0, 0x0213, 0x0214, 0x0215),
+ },
+ {
+ .freq = 4940,
+ RADIOREGS3(0xff, 0x01, 0x01, 0x01, 0xee, 0x05, 0x05, 0x02,
+ 0x0c, 0x01, 0x00, 0x00, 0x00, 0x8f, 0x0f, 0x00,
+ 0xff, 0xfe, 0x00, 0x09, 0x00, 0x77, 0x00, 0x0f,
+ 0x00, 0x6f, 0x00, 0xfe, 0x00, 0x09, 0x00, 0x77,
+ 0x00, 0x0f, 0x00, 0x6f, 0x00),
+ PHYREGS(0x07bc, 0x07b8, 0x07b4, 0x0212, 0x0213, 0x0214),
+ },
+ {
+ .freq = 4950,
+ RADIOREGS3(0xff, 0x01, 0x01, 0x01, 0xef, 0x05, 0x05, 0x02,
+ 0x0c, 0x01, 0x00, 0x00, 0x00, 0x8f, 0x0f, 0x00,
+ 0xff, 0xfe, 0x00, 0x09, 0x00, 0x77, 0x00, 0x0f,
+ 0x00, 0x6f, 0x00, 0xfe, 0x00, 0x09, 0x00, 0x77,
+ 0x00, 0x0f, 0x00, 0x6f, 0x00),
+ PHYREGS(0x07c0, 0x07bc, 0x07b8, 0x0211, 0x0212, 0x0213),
+ },
+ {
+ .freq = 4960,
+ RADIOREGS3(0xff, 0x01, 0x01, 0x01, 0xf0, 0x05, 0x05, 0x02,
+ 0x0c, 0x01, 0x01, 0x01, 0x01, 0x8f, 0x0f, 0x00,
+ 0xff, 0xfe, 0x00, 0x09, 0x00, 0x77, 0x00, 0x0f,
+ 0x00, 0x6f, 0x00, 0xfe, 0x00, 0x09, 0x00, 0x77,
+ 0x00, 0x0f, 0x00, 0x6f, 0x00),
+ PHYREGS(0x07c4, 0x07c0, 0x07bc, 0x020f, 0x0211, 0x0212),
+ },
+ {
+ .freq = 4970,
+ RADIOREGS3(0xff, 0x01, 0x01, 0x01, 0xf1, 0x05, 0x05, 0x02,
+ 0x0c, 0x01, 0x01, 0x01, 0x01, 0x8f, 0x0f, 0x00,
+ 0xff, 0xfe, 0x00, 0x09, 0x00, 0x77, 0x00, 0x0f,
+ 0x00, 0x6f, 0x00, 0xfe, 0x00, 0x09, 0x00, 0x77,
+ 0x00, 0x0f, 0x00, 0x6f, 0x00),
+ PHYREGS(0x07c8, 0x07c4, 0x07c0, 0x020e, 0x020f, 0x0211),
+ },
+ {
+ .freq = 4980,
+ RADIOREGS3(0xff, 0x01, 0x01, 0x01, 0xf2, 0x05, 0x05, 0x02,
+ 0x0c, 0x01, 0x01, 0x01, 0x01, 0x8f, 0x0f, 0x00,
+ 0xff, 0xfe, 0x00, 0x09, 0x00, 0x77, 0x00, 0x0f,
+ 0x00, 0x6f, 0x00, 0xfe, 0x00, 0x09, 0x00, 0x77,
+ 0x00, 0x0f, 0x00, 0x6f, 0x00),
+ PHYREGS(0x07cc, 0x07c8, 0x07c4, 0x020d, 0x020e, 0x020f),
+ },
+ {
+ .freq = 4990,
+ RADIOREGS3(0xff, 0x01, 0x01, 0x01, 0xf3, 0x05, 0x05, 0x02,
+ 0x0c, 0x01, 0x01, 0x01, 0x01, 0x8f, 0x0f, 0x00,
+ 0xff, 0xfe, 0x00, 0x09, 0x00, 0x77, 0x00, 0x0f,
+ 0x00, 0x6f, 0x00, 0xfe, 0x00, 0x09, 0x00, 0x77,
+ 0x00, 0x0f, 0x00, 0x6f, 0x00),
+ PHYREGS(0x07d0, 0x07cc, 0x07c8, 0x020c, 0x020d, 0x020e),
+ },
+ {
+ .freq = 5000,
+ RADIOREGS3(0xff, 0x01, 0x01, 0x01, 0xf4, 0x05, 0x05, 0x02,
+ 0x0c, 0x01, 0x01, 0x01, 0x01, 0x8f, 0x0f, 0x00,
+ 0xff, 0xfe, 0x00, 0x09, 0x00, 0x77, 0x00, 0x0f,
+ 0x00, 0x6f, 0x00, 0xfe, 0x00, 0x09, 0x00, 0x77,
+ 0x00, 0x0f, 0x00, 0x6f, 0x00),
+ PHYREGS(0x07d4, 0x07d0, 0x07cc, 0x020b, 0x020c, 0x020d),
+ },
+ {
+ .freq = 5010,
+ RADIOREGS3(0xff, 0x01, 0x01, 0x01, 0xf5, 0x05, 0x05, 0x02,
+ 0x0c, 0x01, 0x01, 0x01, 0x01, 0x8f, 0x0f, 0x00,
+ 0xff, 0xfe, 0x00, 0x09, 0x00, 0x77, 0x00, 0x0f,
+ 0x00, 0x6f, 0x00, 0xfe, 0x00, 0x09, 0x00, 0x77,
+ 0x00, 0x0f, 0x00, 0x6f, 0x00),
+ PHYREGS(0x07d8, 0x07d4, 0x07d0, 0x020a, 0x020b, 0x020c),
+ },
+ {
+ .freq = 5020,
+ RADIOREGS3(0xf7, 0x01, 0x01, 0x01, 0xf6, 0x05, 0x05, 0x02,
+ 0x0c, 0x01, 0x01, 0x01, 0x01, 0x8f, 0x0f, 0x00,
+ 0xff, 0xfe, 0x00, 0x09, 0x00, 0x77, 0x00, 0x0f,
+ 0x00, 0x6f, 0x00, 0xfe, 0x00, 0x09, 0x00, 0x77,
+ 0x00, 0x0f, 0x00, 0x6f, 0x00),
+ PHYREGS(0x07dc, 0x07d8, 0x07d4, 0x0209, 0x020a, 0x020b),
+ },
+ {
+ .freq = 5030,
+ RADIOREGS3(0xf7, 0x01, 0x01, 0x01, 0xf7, 0x05, 0x05, 0x02,
+ 0x0c, 0x01, 0x01, 0x01, 0x01, 0x8f, 0x0f, 0x00,
+ 0xff, 0xfe, 0x00, 0x09, 0x00, 0x77, 0x00, 0x0f,
+ 0x00, 0x6f, 0x00, 0xfe, 0x00, 0x09, 0x00, 0x77,
+ 0x00, 0x0f, 0x00, 0x6f, 0x00),
+ PHYREGS(0x07e0, 0x07dc, 0x07d8, 0x0208, 0x0209, 0x020a),
+ },
+ {
+ .freq = 5040,
+ RADIOREGS3(0xef, 0x01, 0x01, 0x01, 0xf8, 0x05, 0x05, 0x02,
+ 0x0c, 0x01, 0x01, 0x01, 0x01, 0x8f, 0x0f, 0x00,
+ 0xff, 0xfe, 0x00, 0x09, 0x00, 0x77, 0x00, 0x0f,
+ 0x00, 0x6f, 0x00, 0xfe, 0x00, 0x09, 0x00, 0x77,
+ 0x00, 0x0f, 0x00, 0x6f, 0x00),
+ PHYREGS(0x07e4, 0x07e0, 0x07dc, 0x0207, 0x0208, 0x0209),
+ },
+ {
+ .freq = 5050,
+ RADIOREGS3(0xef, 0x01, 0x01, 0x01, 0xf9, 0x05, 0x05, 0x02,
+ 0x0c, 0x01, 0x01, 0x01, 0x01, 0x8f, 0x0f, 0x00,
+ 0xff, 0xfe, 0x00, 0x09, 0x00, 0x77, 0x00, 0x0f,
+ 0x00, 0x6f, 0x00, 0xfe, 0x00, 0x09, 0x00, 0x77,
+ 0x00, 0x0f, 0x00, 0x6f, 0x00),
+ PHYREGS(0x07e8, 0x07e4, 0x07e0, 0x0206, 0x0207, 0x0208),
+ },
+ {
+ .freq = 5060,
+ RADIOREGS3(0xe6, 0x01, 0x01, 0x01, 0xfa, 0x05, 0x05, 0x02,
+ 0x0c, 0x01, 0x01, 0x01, 0x01, 0x8f, 0x0f, 0x00,
+ 0xff, 0xfe, 0x00, 0x09, 0x00, 0x77, 0x00, 0x0f,
+ 0x00, 0x6f, 0x00, 0xfe, 0x00, 0x09, 0x00, 0x77,
+ 0x00, 0x0f, 0x00, 0x6f, 0x00),
+ PHYREGS(0x07ec, 0x07e8, 0x07e4, 0x0205, 0x0206, 0x0207),
+ },
+ {
+ .freq = 5070,
+ RADIOREGS3(0xe6, 0x01, 0x01, 0x01, 0xfb, 0x05, 0x05, 0x02,
+ 0x0c, 0x01, 0x01, 0x01, 0x01, 0x8f, 0x0f, 0x00,
+ 0xff, 0xfd, 0x00, 0x09, 0x00, 0x77, 0x00, 0x0f,
+ 0x00, 0x6f, 0x00, 0xfd, 0x00, 0x09, 0x00, 0x77,
+ 0x00, 0x0f, 0x00, 0x6f, 0x00),
+ PHYREGS(0x07f0, 0x07ec, 0x07e8, 0x0204, 0x0205, 0x0206),
+ },
+ {
+ .freq = 5080,
+ RADIOREGS3(0xde, 0x01, 0x01, 0x01, 0xfc, 0x05, 0x05, 0x02,
+ 0x0c, 0x01, 0x01, 0x01, 0x01, 0x8f, 0x0f, 0x00,
+ 0xff, 0xfd, 0x00, 0x09, 0x00, 0x77, 0x00, 0x0f,
+ 0x00, 0x6f, 0x00, 0xfd, 0x00, 0x09, 0x00, 0x77,
+ 0x00, 0x0f, 0x00, 0x6f, 0x00),
+ PHYREGS(0x07f4, 0x07f0, 0x07ec, 0x0203, 0x0204, 0x0205),
+ },
+ {
+ .freq = 5090,
+ RADIOREGS3(0xde, 0x01, 0x01, 0x01, 0xfd, 0x05, 0x05, 0x02,
+ 0x0c, 0x01, 0x01, 0x01, 0x01, 0x8f, 0x0f, 0x00,
+ 0xff, 0xfd, 0x00, 0x09, 0x00, 0x77, 0x00, 0x0f,
+ 0x00, 0x6f, 0x00, 0xfd, 0x00, 0x09, 0x00, 0x77,
+ 0x00, 0x0f, 0x00, 0x6f, 0x00),
+ PHYREGS(0x07f8, 0x07f4, 0x07f0, 0x0202, 0x0203, 0x0204),
+ },
+ {
+ .freq = 5100,
+ RADIOREGS3(0xd6, 0x01, 0x01, 0x01, 0xfe, 0x05, 0x05, 0x02,
+ 0x0c, 0x01, 0x02, 0x02, 0x02, 0x8f, 0x0f, 0x00,
+ 0xff, 0xfd, 0x00, 0x08, 0x00, 0x77, 0x00, 0x0f,
+ 0x00, 0x6f, 0x00, 0xfd, 0x00, 0x08, 0x00, 0x77,
+ 0x00, 0x0f, 0x00, 0x6f, 0x00),
+ PHYREGS(0x07fc, 0x07f8, 0x07f4, 0x0201, 0x0202, 0x0203),
+ },
+ {
+ .freq = 5110,
+ RADIOREGS3(0xd6, 0x01, 0x01, 0x01, 0xff, 0x05, 0x05, 0x02,
+ 0x0c, 0x01, 0x02, 0x02, 0x02, 0x8f, 0x0f, 0x00,
+ 0xff, 0xfc, 0x00, 0x08, 0x00, 0x77, 0x00, 0x0f,
+ 0x00, 0x6f, 0x00, 0xfc, 0x00, 0x08, 0x00, 0x77,
+ 0x00, 0x0f, 0x00, 0x6f, 0x00),
+ PHYREGS(0x0800, 0x07fc, 0x07f8, 0x0200, 0x0201, 0x0202),
+ },
+ {
+ .freq = 5120,
+ RADIOREGS3(0xce, 0x01, 0x01, 0x02, 0x00, 0x05, 0x05, 0x02,
+ 0x0c, 0x01, 0x02, 0x02, 0x02, 0x8f, 0x0f, 0x00,
+ 0xff, 0xfc, 0x00, 0x08, 0x00, 0x77, 0x00, 0x0f,
+ 0x00, 0x6f, 0x00, 0xfc, 0x00, 0x08, 0x00, 0x77,
+ 0x00, 0x0f, 0x00, 0x6f, 0x00),
+ PHYREGS(0x0804, 0x0800, 0x07fc, 0x01ff, 0x0200, 0x0201),
+ },
+ {
+ .freq = 5130,
+ RADIOREGS3(0xce, 0x01, 0x01, 0x02, 0x01, 0x05, 0x05, 0x02,
+ 0x0c, 0x01, 0x02, 0x02, 0x02, 0x8f, 0x0f, 0x00,
+ 0xff, 0xfc, 0x00, 0x08, 0x00, 0x77, 0x00, 0x0f,
+ 0x00, 0x6f, 0x00, 0xfc, 0x00, 0x08, 0x00, 0x77,
+ 0x00, 0x0f, 0x00, 0x6f, 0x00),
+ PHYREGS(0x0808, 0x0804, 0x0800, 0x01fe, 0x01ff, 0x0200),
+ },
+ {
+ .freq = 5140,
+ RADIOREGS3(0xc6, 0x01, 0x01, 0x02, 0x02, 0x05, 0x05, 0x02,
+ 0x0c, 0x01, 0x02, 0x02, 0x02, 0x8f, 0x0f, 0x00,
+ 0xff, 0xfb, 0x00, 0x08, 0x00, 0x77, 0x00, 0x0f,
+ 0x00, 0x6f, 0x00, 0xfb, 0x00, 0x08, 0x00, 0x77,
+ 0x00, 0x0f, 0x00, 0x6f, 0x00),
+ PHYREGS(0x080c, 0x0808, 0x0804, 0x01fd, 0x01fe, 0x01ff),
+ },
+ {
+ .freq = 5160,
+ RADIOREGS3(0xbe, 0x01, 0x01, 0x02, 0x04, 0x05, 0x05, 0x02,
+ 0x0c, 0x01, 0x02, 0x02, 0x02, 0x8f, 0x0f, 0x00,
+ 0xff, 0xfa, 0x00, 0x07, 0x00, 0x77, 0x00, 0x0e,
+ 0x00, 0x6f, 0x00, 0xfa, 0x00, 0x07, 0x00, 0x77,
+ 0x00, 0x0e, 0x00, 0x6f, 0x00),
+ PHYREGS(0x0814, 0x0810, 0x080c, 0x01fb, 0x01fc, 0x01fd),
+ },
+ {
+ .freq = 5170,
+ RADIOREGS3(0xbe, 0x01, 0x01, 0x02, 0x05, 0x05, 0x05, 0x02,
+ 0x0c, 0x01, 0x02, 0x02, 0x02, 0x8f, 0x0f, 0x00,
+ 0xff, 0xfa, 0x00, 0x07, 0x00, 0x77, 0x00, 0x0e,
+ 0x00, 0x6f, 0x00, 0xfa, 0x00, 0x07, 0x00, 0x77,
+ 0x00, 0x0e, 0x00, 0x6f, 0x00),
+ PHYREGS(0x0818, 0x0814, 0x0810, 0x01fa, 0x01fb, 0x01fc),
+ },
+ {
+ .freq = 5180,
+ RADIOREGS3(0xb6, 0x01, 0x01, 0x02, 0x06, 0x05, 0x05, 0x02,
+ 0x0c, 0x01, 0x02, 0x02, 0x02, 0x8f, 0x0f, 0x00,
+ 0xff, 0xf9, 0x00, 0x06, 0x00, 0x77, 0x00, 0x0e,
+ 0x00, 0x6f, 0x00, 0xf9, 0x00, 0x06, 0x00, 0x77,
+ 0x00, 0x0e, 0x00, 0x6f, 0x00),
+ PHYREGS(0x081c, 0x0818, 0x0814, 0x01f9, 0x01fa, 0x01fb),
+ },
+ {
+ .freq = 5190,
+ RADIOREGS3(0xb6, 0x01, 0x01, 0x02, 0x07, 0x05, 0x05, 0x02,
+ 0x0c, 0x01, 0x02, 0x02, 0x02, 0x8f, 0x0f, 0x00,
+ 0xff, 0xf9, 0x00, 0x06, 0x00, 0x77, 0x00, 0x0d,
+ 0x00, 0x6f, 0x00, 0xf9, 0x00, 0x06, 0x00, 0x77,
+ 0x00, 0x0d, 0x00, 0x6f, 0x00),
+ PHYREGS(0x0820, 0x081c, 0x0818, 0x01f8, 0x01f9, 0x01fa),
+ },
+ {
+ .freq = 5200,
+ RADIOREGS3(0xaf, 0x01, 0x01, 0x02, 0x08, 0x05, 0x05, 0x02,
+ 0x0c, 0x01, 0x02, 0x02, 0x02, 0x8f, 0x0f, 0x00,
+ 0xff, 0xf9, 0x00, 0x05, 0x00, 0x77, 0x00, 0x0d,
+ 0x00, 0x6f, 0x00, 0xf9, 0x00, 0x05, 0x00, 0x77,
+ 0x00, 0x0d, 0x00, 0x6f, 0x00),
+ PHYREGS(0x0824, 0x0820, 0x081c, 0x01f7, 0x01f8, 0x01f9),
+ },
+ {
+ .freq = 5210,
+ RADIOREGS3(0xaf, 0x01, 0x01, 0x02, 0x09, 0x05, 0x05, 0x02,
+ 0x0c, 0x01, 0x02, 0x02, 0x02, 0x8f, 0x0f, 0x00,
+ 0xff, 0xf9, 0x00, 0x05, 0x00, 0x77, 0x00, 0x0d,
+ 0x00, 0x6f, 0x00, 0xf9, 0x00, 0x05, 0x00, 0x77,
+ 0x00, 0x0d, 0x00, 0x6f, 0x00),
+ PHYREGS(0x0828, 0x0824, 0x0820, 0x01f6, 0x01f7, 0x01f8),
+ },
+ {
+ .freq = 5220,
+ RADIOREGS3(0xa7, 0x01, 0x01, 0x02, 0x0a, 0x05, 0x05, 0x02,
+ 0x0c, 0x01, 0x02, 0x02, 0x02, 0x8e, 0x0f, 0x00,
+ 0xfe, 0xd8, 0x00, 0x05, 0x00, 0x77, 0x00, 0x0d,
+ 0x00, 0x6f, 0x00, 0xd8, 0x00, 0x05, 0x00, 0x77,
+ 0x00, 0x0d, 0x00, 0x6f, 0x00),
+ PHYREGS(0x082c, 0x0828, 0x0824, 0x01f5, 0x01f6, 0x01f7),
+ },
+ {
+ .freq = 5230,
+ RADIOREGS3(0xa7, 0x01, 0x01, 0x02, 0x0b, 0x05, 0x05, 0x02,
+ 0x0c, 0x01, 0x02, 0x02, 0x02, 0x8e, 0x0f, 0x00,
+ 0xee, 0xd8, 0x00, 0x05, 0x00, 0x77, 0x00, 0x0d,
+ 0x00, 0x6f, 0x00, 0xd8, 0x00, 0x05, 0x00, 0x77,
+ 0x00, 0x0d, 0x00, 0x6f, 0x00),
+ PHYREGS(0x0830, 0x082c, 0x0828, 0x01f4, 0x01f5, 0x01f6),
+ },
+ {
+ .freq = 5240,
+ RADIOREGS3(0xa0, 0x01, 0x01, 0x02, 0x0c, 0x05, 0x05, 0x02,
+ 0x0c, 0x01, 0x02, 0x02, 0x02, 0x8e, 0x0f, 0x00,
+ 0xee, 0xc8, 0x00, 0x05, 0x00, 0x77, 0x00, 0x0d,
+ 0x00, 0x6f, 0x00, 0xc8, 0x00, 0x05, 0x00, 0x77,
+ 0x00, 0x0d, 0x00, 0x6f, 0x00),
+ PHYREGS(0x0834, 0x0830, 0x082c, 0x01f3, 0x01f4, 0x01f5),
+ },
+ {
+ .freq = 5250,
+ RADIOREGS3(0xa0, 0x01, 0x01, 0x02, 0x0d, 0x05, 0x05, 0x02,
+ 0x0c, 0x01, 0x02, 0x02, 0x02, 0x8e, 0x0f, 0x00,
+ 0xed, 0xc7, 0x00, 0x05, 0x00, 0x77, 0x00, 0x0d,
+ 0x00, 0x6f, 0x00, 0xc7, 0x00, 0x05, 0x00, 0x77,
+ 0x00, 0x0d, 0x00, 0x6f, 0x00),
+ PHYREGS(0x0838, 0x0834, 0x0830, 0x01f2, 0x01f3, 0x01f4),
+ },
+ {
+ .freq = 5260,
+ RADIOREGS3(0x98, 0x01, 0x01, 0x02, 0x0e, 0x05, 0x05, 0x02,
+ 0x0c, 0x01, 0x02, 0x02, 0x02, 0x8e, 0x0e, 0x00,
+ 0xed, 0xc7, 0x00, 0x04, 0x00, 0x77, 0x00, 0x0d,
+ 0x00, 0x6f, 0x00, 0xc7, 0x00, 0x04, 0x00, 0x77,
+ 0x00, 0x0d, 0x00, 0x6f, 0x00),
+ PHYREGS(0x083c, 0x0838, 0x0834, 0x01f1, 0x01f2, 0x01f3),
+ },
+ {
+ .freq = 5270,
+ RADIOREGS3(0x98, 0x01, 0x01, 0x02, 0x0f, 0x05, 0x05, 0x02,
+ 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8e, 0x0e, 0x00,
+ 0xed, 0xc7, 0x00, 0x04, 0x00, 0x77, 0x00, 0x0c,
+ 0x00, 0x6f, 0x00, 0xc7, 0x00, 0x04, 0x00, 0x77,
+ 0x00, 0x0c, 0x00, 0x6f, 0x00),
+ PHYREGS(0x0840, 0x083c, 0x0838, 0x01f0, 0x01f1, 0x01f2),
+ },
+ {
+ .freq = 5280,
+ RADIOREGS3(0x91, 0x01, 0x01, 0x02, 0x10, 0x05, 0x05, 0x02,
+ 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8d, 0x0e, 0x00,
+ 0xdc, 0xb7, 0x00, 0x03, 0x00, 0x77, 0x00, 0x0c,
+ 0x00, 0x6f, 0x00, 0xb7, 0x00, 0x03, 0x00, 0x77,
+ 0x00, 0x0c, 0x00, 0x6f, 0x00),
+ PHYREGS(0x0844, 0x0840, 0x083c, 0x01f0, 0x01f0, 0x01f1),
+ },
+ {
+ .freq = 5290,
+ RADIOREGS3(0x91, 0x01, 0x01, 0x02, 0x11, 0x05, 0x05, 0x02,
+ 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8d, 0x0e, 0x00,
+ 0xdc, 0xb7, 0x00, 0x03, 0x00, 0x77, 0x00, 0x0c,
+ 0x00, 0x6f, 0x00, 0xb7, 0x00, 0x03, 0x00, 0x77,
+ 0x00, 0x0c, 0x00, 0x6f, 0x00),
+ PHYREGS(0x0848, 0x0844, 0x0840, 0x01ef, 0x01f0, 0x01f0),
+ },
+ {
+ .freq = 5300,
+ RADIOREGS3(0x8a, 0x01, 0x01, 0x02, 0x12, 0x05, 0x05, 0x02,
+ 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8d, 0x0e, 0x00,
+ 0xdc, 0xb7, 0x00, 0x03, 0x00, 0x77, 0x00, 0x0c,
+ 0x00, 0x6f, 0x00, 0xb7, 0x00, 0x03, 0x00, 0x77,
+ 0x00, 0x0c, 0x00, 0x6f, 0x00),
+ PHYREGS(0x084c, 0x0848, 0x0844, 0x01ee, 0x01ef, 0x01f0),
+ },
+ {
+ .freq = 5310,
+ RADIOREGS3(0x8a, 0x01, 0x01, 0x02, 0x13, 0x05, 0x05, 0x02,
+ 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8d, 0x0e, 0x00,
+ 0xdc, 0xb7, 0x00, 0x03, 0x00, 0x77, 0x00, 0x0c,
+ 0x00, 0x6f, 0x00, 0xb7, 0x00, 0x03, 0x00, 0x77,
+ 0x00, 0x0c, 0x00, 0x6f, 0x00),
+ PHYREGS(0x0850, 0x084c, 0x0848, 0x01ed, 0x01ee, 0x01ef),
+ },
+ {
+ .freq = 5320,
+ RADIOREGS3(0x83, 0x01, 0x01, 0x02, 0x14, 0x05, 0x05, 0x02,
+ 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8d, 0x0e, 0x00,
+ 0xdb, 0xb7, 0x00, 0x03, 0x00, 0x77, 0x00, 0x0c,
+ 0x00, 0x6f, 0x00, 0xb7, 0x00, 0x03, 0x00, 0x77,
+ 0x00, 0x0c, 0x00, 0x6f, 0x00),
+ PHYREGS(0x0854, 0x0850, 0x084c, 0x01ec, 0x01ed, 0x01ee),
+ },
+ {
+ .freq = 5330,
+ RADIOREGS3(0x83, 0x01, 0x01, 0x02, 0x15, 0x05, 0x05, 0x02,
+ 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8d, 0x0d, 0x00,
+ 0xcb, 0xa6, 0x00, 0x03, 0x00, 0x77, 0x00, 0x0b,
+ 0x00, 0x6f, 0x00, 0xa6, 0x00, 0x03, 0x00, 0x77,
+ 0x00, 0x0b, 0x00, 0x6f, 0x00),
+ PHYREGS(0x0858, 0x0854, 0x0850, 0x01eb, 0x01ec, 0x01ed),
+ },
+ {
+ .freq = 5340,
+ RADIOREGS3(0x7c, 0x01, 0x01, 0x02, 0x16, 0x05, 0x05, 0x02,
+ 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8d, 0x0d, 0x00,
+ 0xca, 0xa6, 0x00, 0x03, 0x00, 0x77, 0x00, 0x0b,
+ 0x00, 0x6f, 0x00, 0xa6, 0x00, 0x03, 0x00, 0x77,
+ 0x00, 0x0b, 0x00, 0x6f, 0x00),
+ PHYREGS(0x085c, 0x0858, 0x0854, 0x01ea, 0x01eb, 0x01ec),
+ },
+ {
+ .freq = 5350,
+ RADIOREGS3(0x7c, 0x01, 0x01, 0x02, 0x17, 0x05, 0x05, 0x02,
+ 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8c, 0x0d, 0x00,
+ 0xca, 0xa6, 0x00, 0x03, 0x00, 0x77, 0x00, 0x0b,
+ 0x00, 0x6f, 0x00, 0xa6, 0x00, 0x03, 0x00, 0x77,
+ 0x00, 0x0b, 0x00, 0x6f, 0x00),
+ PHYREGS(0x0860, 0x085c, 0x0858, 0x01e9, 0x01ea, 0x01eb),
+ },
+ {
+ .freq = 5360,
+ RADIOREGS3(0x75, 0x01, 0x01, 0x02, 0x18, 0x05, 0x05, 0x02,
+ 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8c, 0x0d, 0x00,
+ 0xc9, 0x95, 0x00, 0x03, 0x00, 0x77, 0x00, 0x0a,
+ 0x00, 0x6f, 0x00, 0x95, 0x00, 0x03, 0x00, 0x77,
+ 0x00, 0x0a, 0x00, 0x6f, 0x00),
+ PHYREGS(0x0864, 0x0860, 0x085c, 0x01e8, 0x01e9, 0x01ea),
+ },
+ {
+ .freq = 5370,
+ RADIOREGS3(0x75, 0x01, 0x01, 0x02, 0x19, 0x05, 0x05, 0x02,
+ 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8c, 0x0d, 0x00,
+ 0xc9, 0x95, 0x00, 0x03, 0x00, 0x77, 0x00, 0x0a,
+ 0x00, 0x6f, 0x00, 0x95, 0x00, 0x03, 0x00, 0x77,
+ 0x00, 0x0a, 0x00, 0x6f, 0x00),
+ PHYREGS(0x0868, 0x0864, 0x0860, 0x01e7, 0x01e8, 0x01e9),
+ },
+ {
+ .freq = 5380,
+ RADIOREGS3(0x6e, 0x01, 0x01, 0x02, 0x1a, 0x05, 0x05, 0x02,
+ 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8c, 0x0c, 0x00,
+ 0xb8, 0x95, 0x00, 0x03, 0x00, 0x77, 0x00, 0x0a,
+ 0x00, 0x6f, 0x00, 0x95, 0x00, 0x03, 0x00, 0x77,
+ 0x00, 0x0a, 0x00, 0x6f, 0x00),
+ PHYREGS(0x086c, 0x0868, 0x0864, 0x01e6, 0x01e7, 0x01e8),
+ },
+ {
+ .freq = 5390,
+ RADIOREGS3(0x6e, 0x01, 0x01, 0x02, 0x1b, 0x05, 0x05, 0x02,
+ 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8c, 0x0c, 0x00,
+ 0xb8, 0x84, 0x00, 0x03, 0x00, 0x77, 0x00, 0x0a,
+ 0x00, 0x6f, 0x00, 0x84, 0x00, 0x03, 0x00, 0x77,
+ 0x00, 0x0a, 0x00, 0x6f, 0x00),
+ PHYREGS(0x0870, 0x086c, 0x0868, 0x01e5, 0x01e6, 0x01e7),
+ },
+ {
+ .freq = 5400,
+ RADIOREGS3(0x67, 0x01, 0x01, 0x02, 0x1c, 0x05, 0x05, 0x02,
+ 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8c, 0x0c, 0x00,
+ 0xb8, 0x84, 0x00, 0x03, 0x00, 0x77, 0x00, 0x0a,
+ 0x00, 0x6f, 0x00, 0x84, 0x00, 0x03, 0x00, 0x77,
+ 0x00, 0x0a, 0x00, 0x6f, 0x00),
+ PHYREGS(0x0874, 0x0870, 0x086c, 0x01e5, 0x01e5, 0x01e6),
+ },
+ {
+ .freq = 5410,
+ RADIOREGS3(0x67, 0x01, 0x01, 0x02, 0x1d, 0x05, 0x05, 0x02,
+ 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8c, 0x0c, 0x00,
+ 0xb7, 0x84, 0x00, 0x02, 0x00, 0x77, 0x00, 0x0a,
+ 0x00, 0x6f, 0x00, 0x84, 0x00, 0x02, 0x00, 0x77,
+ 0x00, 0x0a, 0x00, 0x6f, 0x00),
+ PHYREGS(0x0878, 0x0874, 0x0870, 0x01e4, 0x01e5, 0x01e5),
+ },
+ {
+ .freq = 5420,
+ RADIOREGS3(0x61, 0x01, 0x01, 0x02, 0x1e, 0x05, 0x05, 0x02,
+ 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8c, 0x0c, 0x00,
+ 0xa7, 0x84, 0x00, 0x02, 0x00, 0x77, 0x00, 0x0a,
+ 0x00, 0x6f, 0x00, 0x84, 0x00, 0x02, 0x00, 0x77,
+ 0x00, 0x0a, 0x00, 0x6f, 0x00),
+ PHYREGS(0x087c, 0x0878, 0x0874, 0x01e3, 0x01e4, 0x01e5),
+ },
+ {
+ .freq = 5430,
+ RADIOREGS3(0x61, 0x01, 0x01, 0x02, 0x1f, 0x05, 0x05, 0x02,
+ 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8c, 0x0b, 0x00,
+ 0xa6, 0x84, 0x00, 0x02, 0x00, 0x77, 0x00, 0x0a,
+ 0x00, 0x6f, 0x00, 0x84, 0x00, 0x02, 0x00, 0x77,
+ 0x00, 0x0a, 0x00, 0x6f, 0x00),
+ PHYREGS(0x0880, 0x087c, 0x0878, 0x01e2, 0x01e3, 0x01e4),
+ },
+ {
+ .freq = 5440,
+ RADIOREGS3(0x5a, 0x01, 0x01, 0x02, 0x20, 0x05, 0x05, 0x02,
+ 0x0c, 0x01, 0x04, 0x04, 0x04, 0x8b, 0x0b, 0x00,
+ 0xa6, 0x84, 0x00, 0x02, 0x00, 0x77, 0x00, 0x09,
+ 0x00, 0x6f, 0x00, 0x84, 0x00, 0x02, 0x00, 0x77,
+ 0x00, 0x09, 0x00, 0x6f, 0x00),
+ PHYREGS(0x0884, 0x0880, 0x087c, 0x01e1, 0x01e2, 0x01e3),
+ },
+ {
+ .freq = 5450,
+ RADIOREGS3(0x5a, 0x01, 0x01, 0x02, 0x21, 0x05, 0x05, 0x02,
+ 0x0c, 0x01, 0x04, 0x04, 0x04, 0x8b, 0x0b, 0x00,
+ 0x95, 0x84, 0x00, 0x01, 0x00, 0x77, 0x00, 0x09,
+ 0x00, 0x6f, 0x00, 0x84, 0x00, 0x01, 0x00, 0x77,
+ 0x00, 0x09, 0x00, 0x6f, 0x00),
+ PHYREGS(0x0888, 0x0884, 0x0880, 0x01e0, 0x01e1, 0x01e2),
+ },
+ {
+ .freq = 5460,
+ RADIOREGS3(0x53, 0x01, 0x01, 0x02, 0x22, 0x05, 0x05, 0x02,
+ 0x0c, 0x01, 0x04, 0x04, 0x04, 0x8b, 0x0b, 0x00,
+ 0x95, 0x84, 0x00, 0x01, 0x00, 0x77, 0x00, 0x09,
+ 0x00, 0x6f, 0x00, 0x84, 0x00, 0x01, 0x00, 0x77,
+ 0x00, 0x09, 0x00, 0x6f, 0x00),
+ PHYREGS(0x088c, 0x0888, 0x0884, 0x01df, 0x01e0, 0x01e1),
+ },
+ {
+ .freq = 5470,
+ RADIOREGS3(0x53, 0x01, 0x01, 0x02, 0x23, 0x05, 0x05, 0x02,
+ 0x0c, 0x01, 0x04, 0x04, 0x04, 0x8b, 0x0b, 0x00,
+ 0x94, 0x73, 0x00, 0x01, 0x00, 0x77, 0x00, 0x09,
+ 0x00, 0x6f, 0x00, 0x73, 0x00, 0x01, 0x00, 0x77,
+ 0x00, 0x09, 0x00, 0x6f, 0x00),
+ PHYREGS(0x0890, 0x088c, 0x0888, 0x01de, 0x01df, 0x01e0),
+ },
+ {
+ .freq = 5480,
+ RADIOREGS3(0x4d, 0x01, 0x01, 0x02, 0x24, 0x05, 0x05, 0x02,
+ 0x0c, 0x01, 0x04, 0x04, 0x04, 0x8a, 0x0a, 0x00,
+ 0x84, 0x73, 0x00, 0x00, 0x00, 0x77, 0x00, 0x09,
+ 0x00, 0x6f, 0x00, 0x73, 0x00, 0x00, 0x00, 0x77,
+ 0x00, 0x09, 0x00, 0x6f, 0x00),
+ PHYREGS(0x0894, 0x0890, 0x088c, 0x01dd, 0x01de, 0x01df),
+ },
+ {
+ .freq = 5490,
+ RADIOREGS3(0x4d, 0x01, 0x01, 0x02, 0x25, 0x05, 0x05, 0x02,
+ 0x0c, 0x01, 0x04, 0x04, 0x04, 0x8a, 0x0a, 0x00,
+ 0x83, 0x73, 0x00, 0x00, 0x00, 0x77, 0x00, 0x09,
+ 0x00, 0x6f, 0x00, 0x73, 0x00, 0x00, 0x00, 0x77,
+ 0x00, 0x09, 0x00, 0x6f, 0x00),
+ PHYREGS(0x0898, 0x0894, 0x0890, 0x01dd, 0x01dd, 0x01de),
+ },
+ {
+ .freq = 5500,
+ RADIOREGS3(0x47, 0x01, 0x01, 0x02, 0x26, 0x05, 0x05, 0x02,
+ 0x0c, 0x01, 0x04, 0x04, 0x04, 0x8a, 0x0a, 0x00,
+ 0x82, 0x73, 0x00, 0x00, 0x00, 0x77, 0x00, 0x09,
+ 0x00, 0x6f, 0x00, 0x73, 0x00, 0x00, 0x00, 0x77,
+ 0x00, 0x09, 0x00, 0x6f, 0x00),
+ PHYREGS(0x089c, 0x0898, 0x0894, 0x01dc, 0x01dd, 0x01dd),
+ },
+ {
+ .freq = 5510,
+ RADIOREGS3(0x47, 0x01, 0x01, 0x02, 0x27, 0x05, 0x05, 0x02,
+ 0x0c, 0x01, 0x04, 0x04, 0x04, 0x8a, 0x0a, 0x00,
+ 0x82, 0x73, 0x00, 0x00, 0x00, 0x77, 0x00, 0x09,
+ 0x00, 0x6f, 0x00, 0x73, 0x00, 0x00, 0x00, 0x77,
+ 0x00, 0x09, 0x00, 0x6f, 0x00),
+ PHYREGS(0x08a0, 0x089c, 0x0898, 0x01db, 0x01dc, 0x01dd),
+ },
+ {
+ .freq = 5520,
+ RADIOREGS3(0x40, 0x01, 0x01, 0x02, 0x28, 0x05, 0x05, 0x02,
+ 0x0c, 0x01, 0x04, 0x04, 0x04, 0x8a, 0x0a, 0x00,
+ 0x72, 0x73, 0x00, 0x00, 0x00, 0x77, 0x00, 0x09,
+ 0x00, 0x6f, 0x00, 0x73, 0x00, 0x00, 0x00, 0x77,
+ 0x00, 0x09, 0x00, 0x6f, 0x00),
+ PHYREGS(0x08a4, 0x08a0, 0x089c, 0x01da, 0x01db, 0x01dc),
+ },
+ {
+ .freq = 5530,
+ RADIOREGS3(0x40, 0x01, 0x01, 0x02, 0x29, 0x05, 0x05, 0x02,
+ 0x0c, 0x01, 0x04, 0x04, 0x04, 0x8a, 0x09, 0x00,
+ 0x72, 0x73, 0x00, 0x00, 0x00, 0x77, 0x00, 0x09,
+ 0x00, 0x6f, 0x00, 0x73, 0x00, 0x00, 0x00, 0x77,
+ 0x00, 0x09, 0x00, 0x6f, 0x00),
+ PHYREGS(0x08a8, 0x08a4, 0x08a0, 0x01d9, 0x01da, 0x01db),
+ },
+ {
+ .freq = 5540,
+ RADIOREGS3(0x3a, 0x01, 0x01, 0x02, 0x2a, 0x05, 0x05, 0x02,
+ 0x0c, 0x01, 0x04, 0x04, 0x04, 0x8a, 0x09, 0x00,
+ 0x71, 0x73, 0x00, 0x00, 0x00, 0x77, 0x00, 0x09,
+ 0x00, 0x6f, 0x00, 0x73, 0x00, 0x00, 0x00, 0x77,
+ 0x00, 0x09, 0x00, 0x6f, 0x00),
+ PHYREGS(0x08ac, 0x08a8, 0x08a4, 0x01d8, 0x01d9, 0x01da),
+ },
+ {
+ .freq = 5550,
+ RADIOREGS3(0x3a, 0x01, 0x01, 0x02, 0x2b, 0x05, 0x05, 0x02,
+ 0x0c, 0x01, 0x04, 0x04, 0x04, 0x89, 0x09, 0x00,
+ 0x61, 0x73, 0x00, 0x00, 0x00, 0x77, 0x00, 0x09,
+ 0x00, 0x6f, 0x00, 0x73, 0x00, 0x00, 0x00, 0x77,
+ 0x00, 0x09, 0x00, 0x6f, 0x00),
+ PHYREGS(0x08b0, 0x08ac, 0x08a8, 0x01d7, 0x01d8, 0x01d9),
+ },
+ {
+ .freq = 5560,
+ RADIOREGS3(0x34, 0x01, 0x01, 0x02, 0x2c, 0x05, 0x05, 0x02,
+ 0x0c, 0x01, 0x04, 0x04, 0x04, 0x89, 0x09, 0x00,
+ 0x61, 0x73, 0x00, 0x00, 0x00, 0x77, 0x00, 0x09,
+ 0x00, 0x6f, 0x00, 0x73, 0x00, 0x00, 0x00, 0x77,
+ 0x00, 0x09, 0x00, 0x6f, 0x00),
+ PHYREGS(0x08b4, 0x08b0, 0x08ac, 0x01d7, 0x01d7, 0x01d8),
+ },
+ {
+ .freq = 5570,
+ RADIOREGS3(0x34, 0x01, 0x01, 0x02, 0x2d, 0x05, 0x05, 0x02,
+ 0x0c, 0x01, 0x04, 0x04, 0x04, 0x89, 0x09, 0x00,
+ 0x61, 0x62, 0x00, 0x00, 0x00, 0x77, 0x00, 0x09,
+ 0x00, 0x6f, 0x00, 0x62, 0x00, 0x00, 0x00, 0x77,
+ 0x00, 0x09, 0x00, 0x6f, 0x00),
+ PHYREGS(0x08b8, 0x08b4, 0x08b0, 0x01d6, 0x01d7, 0x01d7),
+ },
+ {
+ .freq = 5580,
+ RADIOREGS3(0x2e, 0x01, 0x01, 0x02, 0x2e, 0x05, 0x05, 0x02,
+ 0x0c, 0x01, 0x04, 0x04, 0x04, 0x89, 0x08, 0x00,
+ 0x60, 0x62, 0x00, 0x00, 0x00, 0x77, 0x00, 0x08,
+ 0x00, 0x6f, 0x00, 0x62, 0x00, 0x00, 0x00, 0x77,
+ 0x00, 0x08, 0x00, 0x6f, 0x00),
+ PHYREGS(0x08bc, 0x08b8, 0x08b4, 0x01d5, 0x01d6, 0x01d7),
+ },
+ {
+ .freq = 5590,
+ RADIOREGS3(0x2e, 0x01, 0x01, 0x02, 0x2f, 0x05, 0x05, 0x02,
+ 0x0c, 0x01, 0x04, 0x04, 0x04, 0x89, 0x08, 0x00,
+ 0x50, 0x61, 0x00, 0x00, 0x00, 0x77, 0x00, 0x08,
+ 0x00, 0x6f, 0x00, 0x61, 0x00, 0x00, 0x00, 0x77,
+ 0x00, 0x08, 0x00, 0x6f, 0x00),
+ PHYREGS(0x08c0, 0x08bc, 0x08b8, 0x01d4, 0x01d5, 0x01d6),
+ },
+ {
+ .freq = 5600,
+ RADIOREGS3(0x28, 0x01, 0x01, 0x02, 0x30, 0x05, 0x05, 0x02,
+ 0x0c, 0x01, 0x05, 0x05, 0x05, 0x89, 0x08, 0x00,
+ 0x50, 0x51, 0x00, 0x00, 0x00, 0x77, 0x00, 0x08,
+ 0x00, 0x6f, 0x00, 0x51, 0x00, 0x00, 0x00, 0x77,
+ 0x00, 0x08, 0x00, 0x6f, 0x00),
+ PHYREGS(0x08c4, 0x08c0, 0x08bc, 0x01d3, 0x01d4, 0x01d5),
+ },
+ {
+ .freq = 5610,
+ RADIOREGS3(0x28, 0x01, 0x01, 0x02, 0x31, 0x05, 0x05, 0x02,
+ 0x0c, 0x01, 0x05, 0x05, 0x05, 0x89, 0x08, 0x00,
+ 0x50, 0x51, 0x00, 0x00, 0x00, 0x77, 0x00, 0x08,
+ 0x00, 0x6f, 0x00, 0x51, 0x00, 0x00, 0x00, 0x77,
+ 0x00, 0x08, 0x00, 0x6f, 0x00),
+ PHYREGS(0x08c8, 0x08c4, 0x08c0, 0x01d2, 0x01d3, 0x01d4),
+ },
+ {
+ .freq = 5620,
+ RADIOREGS3(0x21, 0x01, 0x01, 0x02, 0x32, 0x05, 0x05, 0x02,
+ 0x0c, 0x01, 0x05, 0x05, 0x05, 0x89, 0x08, 0x00,
+ 0x50, 0x50, 0x00, 0x00, 0x00, 0x77, 0x00, 0x07,
+ 0x00, 0x6f, 0x00, 0x50, 0x00, 0x00, 0x00, 0x77,
+ 0x00, 0x07, 0x00, 0x6f, 0x00),
+ PHYREGS(0x08cc, 0x08c8, 0x08c4, 0x01d2, 0x01d2, 0x01d3),
+ },
+ {
+ .freq = 5630,
+ RADIOREGS3(0x21, 0x01, 0x01, 0x02, 0x33, 0x05, 0x05, 0x02,
+ 0x0c, 0x01, 0x05, 0x05, 0x05, 0x88, 0x07, 0x00,
+ 0x50, 0x50, 0x00, 0x00, 0x00, 0x77, 0x00, 0x07,
+ 0x00, 0x6f, 0x00, 0x50, 0x00, 0x00, 0x00, 0x77,
+ 0x00, 0x07, 0x00, 0x6f, 0x00),
+ PHYREGS(0x08d0, 0x08cc, 0x08c8, 0x01d1, 0x01d2, 0x01d2),
+ },
+ {
+ .freq = 5640,
+ RADIOREGS3(0x1c, 0x01, 0x01, 0x02, 0x34, 0x05, 0x05, 0x02,
+ 0x0c, 0x01, 0x05, 0x05, 0x05, 0x88, 0x07, 0x00,
+ 0x40, 0x50, 0x00, 0x00, 0x00, 0x77, 0x00, 0x07,
+ 0x00, 0x6f, 0x00, 0x50, 0x00, 0x00, 0x00, 0x77,
+ 0x00, 0x07, 0x00, 0x6f, 0x00),
+ PHYREGS(0x08d4, 0x08d0, 0x08cc, 0x01d0, 0x01d1, 0x01d2),
+ },
+ {
+ .freq = 5650,
+ RADIOREGS3(0x1c, 0x01, 0x01, 0x02, 0x35, 0x05, 0x05, 0x02,
+ 0x0c, 0x01, 0x05, 0x05, 0x05, 0x88, 0x07, 0x00,
+ 0x40, 0x40, 0x00, 0x00, 0x00, 0x77, 0x00, 0x07,
+ 0x00, 0x6f, 0x00, 0x40, 0x00, 0x00, 0x00, 0x77,
+ 0x00, 0x07, 0x00, 0x6f, 0x00),
+ PHYREGS(0x08d8, 0x08d4, 0x08d0, 0x01cf, 0x01d0, 0x01d1),
+ },
+ {
+ .freq = 5660,
+ RADIOREGS3(0x16, 0x01, 0x01, 0x02, 0x36, 0x05, 0x05, 0x02,
+ 0x0c, 0x01, 0x05, 0x05, 0x05, 0x88, 0x07, 0x00,
+ 0x40, 0x40, 0x00, 0x00, 0x00, 0x77, 0x00, 0x06,
+ 0x00, 0x6f, 0x00, 0x40, 0x00, 0x00, 0x00, 0x77,
+ 0x00, 0x06, 0x00, 0x6f, 0x00),
+ PHYREGS(0x08dc, 0x08d8, 0x08d4, 0x01ce, 0x01cf, 0x01d0),
+ },
+ {
+ .freq = 5670,
+ RADIOREGS3(0x16, 0x01, 0x01, 0x02, 0x37, 0x05, 0x05, 0x02,
+ 0x0c, 0x01, 0x05, 0x05, 0x05, 0x88, 0x07, 0x00,
+ 0x40, 0x30, 0x00, 0x00, 0x00, 0x77, 0x00, 0x06,
+ 0x00, 0x6f, 0x00, 0x30, 0x00, 0x00, 0x00, 0x77,
+ 0x00, 0x06, 0x00, 0x6f, 0x00),
+ PHYREGS(0x08e0, 0x08dc, 0x08d8, 0x01ce, 0x01ce, 0x01cf),
+ },
+ {
+ .freq = 5680,
+ RADIOREGS3(0x10, 0x01, 0x01, 0x02, 0x38, 0x05, 0x05, 0x02,
+ 0x0c, 0x01, 0x05, 0x05, 0x05, 0x87, 0x06, 0x00,
+ 0x30, 0x30, 0x00, 0x00, 0x00, 0x77, 0x00, 0x06,
+ 0x00, 0x6f, 0x00, 0x30, 0x00, 0x00, 0x00, 0x77,
+ 0x00, 0x06, 0x00, 0x6f, 0x00),
+ PHYREGS(0x08e4, 0x08e0, 0x08dc, 0x01cd, 0x01ce, 0x01ce),
+ },
+ {
+ .freq = 5690,
+ RADIOREGS3(0x10, 0x01, 0x01, 0x02, 0x39, 0x05, 0x05, 0x02,
+ 0x0c, 0x01, 0x05, 0x05, 0x05, 0x87, 0x06, 0x00,
+ 0x30, 0x30, 0x00, 0x00, 0x00, 0x77, 0x00, 0x06,
+ 0x00, 0x6f, 0x00, 0x30, 0x00, 0x00, 0x00, 0x77,
+ 0x00, 0x06, 0x00, 0x6f, 0x00),
+ PHYREGS(0x08e8, 0x08e4, 0x08e0, 0x01cc, 0x01cd, 0x01ce),
+ },
+ {
+ .freq = 5700,
+ RADIOREGS3(0x0a, 0x01, 0x01, 0x02, 0x3a, 0x05, 0x05, 0x02,
+ 0x0c, 0x01, 0x05, 0x05, 0x05, 0x87, 0x06, 0x00,
+ 0x30, 0x30, 0x00, 0x00, 0x00, 0x77, 0x00, 0x06,
+ 0x00, 0x6e, 0x00, 0x30, 0x00, 0x00, 0x00, 0x77,
+ 0x00, 0x06, 0x00, 0x6e, 0x00),
+ PHYREGS(0x08ec, 0x08e8, 0x08e4, 0x01cb, 0x01cc, 0x01cd),
+ },
+ {
+ .freq = 5710,
+ RADIOREGS3(0x0a, 0x01, 0x01, 0x02, 0x3b, 0x05, 0x05, 0x02,
+ 0x0c, 0x01, 0x05, 0x05, 0x05, 0x87, 0x06, 0x00,
+ 0x30, 0x30, 0x00, 0x00, 0x00, 0x77, 0x00, 0x06,
+ 0x00, 0x6e, 0x00, 0x30, 0x00, 0x00, 0x00, 0x77,
+ 0x00, 0x06, 0x00, 0x6e, 0x00),
+ PHYREGS(0x08f0, 0x08ec, 0x08e8, 0x01ca, 0x01cb, 0x01cc),
+ },
+ {
+ .freq = 5720,
+ RADIOREGS3(0x0a, 0x01, 0x01, 0x02, 0x3c, 0x05, 0x05, 0x02,
+ 0x0c, 0x01, 0x05, 0x05, 0x05, 0x87, 0x06, 0x00,
+ 0x30, 0x30, 0x00, 0x00, 0x00, 0x77, 0x00, 0x06,
+ 0x00, 0x6e, 0x00, 0x30, 0x00, 0x00, 0x00, 0x77,
+ 0x00, 0x06, 0x00, 0x6e, 0x00),
+ PHYREGS(0x08f4, 0x08f0, 0x08ec, 0x01c9, 0x01ca, 0x01cb),
+ },
+ {
+ .freq = 5725,
+ RADIOREGS3(0x03, 0x01, 0x02, 0x04, 0x79, 0x05, 0x05, 0x02,
+ 0x15, 0x01, 0x05, 0x05, 0x05, 0x87, 0x06, 0x00,
+ 0x30, 0x30, 0x00, 0x00, 0x00, 0x77, 0x00, 0x06,
+ 0x00, 0x6e, 0x00, 0x30, 0x00, 0x00, 0x00, 0x77,
+ 0x00, 0x06, 0x00, 0x6e, 0x00),
+ PHYREGS(0x08f6, 0x08f2, 0x08ee, 0x01c9, 0x01ca, 0x01cb),
+ },
+ {
+ .freq = 5730,
+ RADIOREGS3(0x0a, 0x01, 0x01, 0x02, 0x3d, 0x05, 0x05, 0x02,
+ 0x0c, 0x01, 0x05, 0x05, 0x05, 0x87, 0x05, 0x00,
+ 0x20, 0x30, 0x00, 0x00, 0x00, 0x77, 0x00, 0x06,
+ 0x00, 0x6e, 0x00, 0x30, 0x00, 0x00, 0x00, 0x77,
+ 0x00, 0x06, 0x00, 0x6e, 0x00),
+ PHYREGS(0x08f8, 0x08f4, 0x08f0, 0x01c9, 0x01c9, 0x01ca),
+ },
+ {
+ .freq = 5735,
+ RADIOREGS3(0x03, 0x01, 0x02, 0x04, 0x7b, 0x05, 0x05, 0x02,
+ 0x15, 0x01, 0x05, 0x05, 0x05, 0x87, 0x05, 0x00,
+ 0x20, 0x30, 0x00, 0x00, 0x00, 0x77, 0x00, 0x06,
+ 0x00, 0x6d, 0x00, 0x30, 0x00, 0x00, 0x00, 0x77,
+ 0x00, 0x06, 0x00, 0x6d, 0x00),
+ PHYREGS(0x08fa, 0x08f6, 0x08f2, 0x01c8, 0x01c9, 0x01ca),
+ },
+ {
+ .freq = 5740,
+ RADIOREGS3(0x0a, 0x01, 0x01, 0x02, 0x3e, 0x05, 0x05, 0x02,
+ 0x0c, 0x01, 0x05, 0x05, 0x05, 0x87, 0x05, 0x00,
+ 0x20, 0x30, 0x00, 0x00, 0x00, 0x77, 0x00, 0x06,
+ 0x00, 0x6d, 0x00, 0x30, 0x00, 0x00, 0x00, 0x77,
+ 0x00, 0x06, 0x00, 0x6d, 0x00),
+ PHYREGS(0x08fc, 0x08f8, 0x08f4, 0x01c8, 0x01c9, 0x01c9),
+ },
+ {
+ .freq = 5745,
+ RADIOREGS3(0xfe, 0x00, 0x02, 0x04, 0x7d, 0x05, 0x05, 0x02,
+ 0x15, 0x01, 0x05, 0x05, 0x05, 0x87, 0x05, 0x00,
+ 0x20, 0x30, 0x00, 0x00, 0x00, 0x77, 0x00, 0x06,
+ 0x00, 0x6d, 0x00, 0x30, 0x00, 0x00, 0x00, 0x77,
+ 0x00, 0x06, 0x00, 0x6d, 0x00),
+ PHYREGS(0x08fe, 0x08fa, 0x08f6, 0x01c8, 0x01c8, 0x01c9),
+ },
+ {
+ .freq = 5750,
+ RADIOREGS3(0x0a, 0x01, 0x01, 0x02, 0x3f, 0x05, 0x05, 0x02,
+ 0x0c, 0x01, 0x05, 0x05, 0x05, 0x87, 0x05, 0x00,
+ 0x20, 0x20, 0x00, 0x00, 0x00, 0x77, 0x00, 0x05,
+ 0x00, 0x6d, 0x00, 0x20, 0x00, 0x00, 0x00, 0x77,
+ 0x00, 0x05, 0x00, 0x6d, 0x00),
+ PHYREGS(0x0900, 0x08fc, 0x08f8, 0x01c7, 0x01c8, 0x01c9),
+ },
+ {
+ .freq = 5755,
+ RADIOREGS3(0xfe, 0x00, 0x02, 0x04, 0x7f, 0x05, 0x05, 0x02,
+ 0x15, 0x01, 0x05, 0x05, 0x05, 0x87, 0x05, 0x00,
+ 0x10, 0x20, 0x00, 0x00, 0x00, 0x77, 0x00, 0x05,
+ 0x00, 0x6c, 0x00, 0x20, 0x00, 0x00, 0x00, 0x77,
+ 0x00, 0x05, 0x00, 0x6c, 0x00),
+ PHYREGS(0x0902, 0x08fe, 0x08fa, 0x01c7, 0x01c8, 0x01c8),
+ },
+ {
+ .freq = 5760,
+ RADIOREGS3(0x0a, 0x01, 0x01, 0x02, 0x40, 0x05, 0x05, 0x02,
+ 0x0c, 0x01, 0x05, 0x05, 0x05, 0x86, 0x05, 0x00,
+ 0x10, 0x20, 0x00, 0x00, 0x00, 0x77, 0x00, 0x05,
+ 0x00, 0x6c, 0x00, 0x20, 0x00, 0x00, 0x00, 0x77,
+ 0x00, 0x05, 0x00, 0x6c, 0x00),
+ PHYREGS(0x0904, 0x0900, 0x08fc, 0x01c6, 0x01c7, 0x01c8),
+ },
+ {
+ .freq = 5765,
+ RADIOREGS3(0xf8, 0x00, 0x02, 0x04, 0x81, 0x05, 0x05, 0x02,
+ 0x15, 0x01, 0x05, 0x05, 0x05, 0x86, 0x05, 0x00,
+ 0x10, 0x10, 0x00, 0x00, 0x00, 0x77, 0x00, 0x05,
+ 0x00, 0x6c, 0x00, 0x10, 0x00, 0x00, 0x00, 0x77,
+ 0x00, 0x05, 0x00, 0x6c, 0x00),
+ PHYREGS(0x0906, 0x0902, 0x08fe, 0x01c6, 0x01c7, 0x01c8),
+ },
+ {
+ .freq = 5770,
+ RADIOREGS3(0x0a, 0x01, 0x01, 0x02, 0x41, 0x05, 0x05, 0x02,
+ 0x0c, 0x01, 0x05, 0x05, 0x05, 0x86, 0x04, 0x00,
+ 0x10, 0x10, 0x00, 0x00, 0x00, 0x77, 0x00, 0x05,
+ 0x00, 0x6b, 0x00, 0x10, 0x00, 0x00, 0x00, 0x77,
+ 0x00, 0x05, 0x00, 0x6b, 0x00),
+ PHYREGS(0x0908, 0x0904, 0x0900, 0x01c6, 0x01c6, 0x01c7),
+ },
+ {
+ .freq = 5775,
+ RADIOREGS3(0xf8, 0x00, 0x02, 0x04, 0x83, 0x05, 0x05, 0x02,
+ 0x15, 0x01, 0x05, 0x05, 0x05, 0x86, 0x04, 0x00,
+ 0x10, 0x10, 0x00, 0x00, 0x00, 0x77, 0x00, 0x05,
+ 0x00, 0x6b, 0x00, 0x10, 0x00, 0x00, 0x00, 0x77,
+ 0x00, 0x05, 0x00, 0x6b, 0x00),
+ PHYREGS(0x090a, 0x0906, 0x0902, 0x01c5, 0x01c6, 0x01c7),
+ },
+ {
+ .freq = 5780,
+ RADIOREGS3(0x0a, 0x01, 0x01, 0x02, 0x42, 0x05, 0x05, 0x02,
+ 0x0c, 0x01, 0x05, 0x05, 0x05, 0x86, 0x04, 0x00,
+ 0x10, 0x10, 0x00, 0x00, 0x00, 0x77, 0x00, 0x05,
+ 0x00, 0x6b, 0x00, 0x10, 0x00, 0x00, 0x00, 0x77,
+ 0x00, 0x05, 0x00, 0x6b, 0x00),
+ PHYREGS(0x090c, 0x0908, 0x0904, 0x01c5, 0x01c6, 0x01c6),
+ },
+ {
+ .freq = 5785,
+ RADIOREGS3(0xf2, 0x00, 0x02, 0x04, 0x85, 0x05, 0x05, 0x02,
+ 0x15, 0x01, 0x06, 0x06, 0x06, 0x86, 0x04, 0x00,
+ 0x00, 0x10, 0x00, 0x00, 0x00, 0x77, 0x00, 0x05,
+ 0x00, 0x6b, 0x00, 0x10, 0x00, 0x00, 0x00, 0x77,
+ 0x00, 0x05, 0x00, 0x6b, 0x00),
+ PHYREGS(0x090e, 0x090a, 0x0906, 0x01c4, 0x01c5, 0x01c6),
+ },
+ {
+ .freq = 5790,
+ RADIOREGS3(0x0a, 0x01, 0x01, 0x02, 0x43, 0x05, 0x05, 0x02,
+ 0x0c, 0x01, 0x06, 0x06, 0x06, 0x86, 0x04, 0x00,
+ 0x00, 0x10, 0x00, 0x00, 0x00, 0x77, 0x00, 0x05,
+ 0x00, 0x6b, 0x00, 0x10, 0x00, 0x00, 0x00, 0x77,
+ 0x00, 0x05, 0x00, 0x6b, 0x00),
+ PHYREGS(0x0910, 0x090c, 0x0908, 0x01c4, 0x01c5, 0x01c6),
+ },
+ {
+ .freq = 5795,
+ RADIOREGS3(0xf2, 0x00, 0x02, 0x04, 0x87, 0x05, 0x05, 0x02,
+ 0x15, 0x01, 0x06, 0x06, 0x06, 0x86, 0x04, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x77, 0x00, 0x05,
+ 0x00, 0x6b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x77,
+ 0x00, 0x05, 0x00, 0x6b, 0x00),
+ PHYREGS(0x0912, 0x090e, 0x090a, 0x01c4, 0x01c4, 0x01c5),
+ },
+ {
+ .freq = 5800,
+ RADIOREGS3(0x0a, 0x01, 0x01, 0x02, 0x44, 0x05, 0x05, 0x02,
+ 0x0c, 0x01, 0x06, 0x06, 0x06, 0x86, 0x04, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x77, 0x00, 0x05,
+ 0x00, 0x6b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x77,
+ 0x00, 0x05, 0x00, 0x6b, 0x00),
+ PHYREGS(0x0914, 0x0910, 0x090c, 0x01c3, 0x01c4, 0x01c5),
+ },
+ {
+ .freq = 5805,
+ RADIOREGS3(0xed, 0x00, 0x02, 0x04, 0x89, 0x05, 0x05, 0x02,
+ 0x15, 0x01, 0x06, 0x06, 0x06, 0x86, 0x04, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x77, 0x00, 0x05,
+ 0x00, 0x6a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x77,
+ 0x00, 0x05, 0x00, 0x6a, 0x00),
+ PHYREGS(0x0916, 0x0912, 0x090e, 0x01c3, 0x01c4, 0x01c4),
+ },
+ {
+ .freq = 5810,
+ RADIOREGS3(0x0a, 0x01, 0x01, 0x02, 0x45, 0x05, 0x05, 0x02,
+ 0x0c, 0x01, 0x06, 0x06, 0x06, 0x86, 0x04, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x77, 0x00, 0x05,
+ 0x00, 0x6a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x77,
+ 0x00, 0x05, 0x00, 0x6a, 0x00),
+ PHYREGS(0x0918, 0x0914, 0x0910, 0x01c2, 0x01c3, 0x01c4),
+ },
+ {
+ .freq = 5815,
+ RADIOREGS3(0xed, 0x00, 0x02, 0x04, 0x8b, 0x05, 0x05, 0x02,
+ 0x15, 0x01, 0x06, 0x06, 0x06, 0x86, 0x04, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x77, 0x00, 0x05,
+ 0x00, 0x6a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x77,
+ 0x00, 0x05, 0x00, 0x6a, 0x00),
+ PHYREGS(0x091a, 0x0916, 0x0912, 0x01c2, 0x01c3, 0x01c4),
+ },
+ {
+ .freq = 5820,
+ RADIOREGS3(0x0a, 0x01, 0x01, 0x02, 0x46, 0x05, 0x05, 0x02,
+ 0x0c, 0x01, 0x06, 0x06, 0x06, 0x86, 0x04, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x77, 0x00, 0x05,
+ 0x00, 0x6a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x77,
+ 0x00, 0x05, 0x00, 0x6a, 0x00),
+ PHYREGS(0x091c, 0x0918, 0x0914, 0x01c2, 0x01c2, 0x01c3),
+ },
+ {
+ .freq = 5825,
+ RADIOREGS3(0xed, 0x00, 0x02, 0x04, 0x8d, 0x05, 0x05, 0x02,
+ 0x15, 0x01, 0x06, 0x06, 0x06, 0x86, 0x04, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x77, 0x00, 0x05,
+ 0x00, 0x69, 0x00, 0x00, 0x00, 0x00, 0x00, 0x77,
+ 0x00, 0x05, 0x00, 0x69, 0x00),
+ PHYREGS(0x091e, 0x091a, 0x0916, 0x01c1, 0x01c2, 0x01c3),
+ },
+ {
+ .freq = 5830,
+ RADIOREGS3(0x0a, 0x01, 0x01, 0x02, 0x47, 0x05, 0x05, 0x02,
+ 0x0c, 0x01, 0x06, 0x06, 0x06, 0x86, 0x04, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x77, 0x00, 0x05,
+ 0x00, 0x69, 0x00, 0x00, 0x00, 0x00, 0x00, 0x77,
+ 0x00, 0x05, 0x00, 0x69, 0x00),
+ PHYREGS(0x0920, 0x091c, 0x0918, 0x01c1, 0x01c2, 0x01c2),
+ },
+ {
+ .freq = 5840,
+ RADIOREGS3(0x0a, 0x01, 0x01, 0x02, 0x48, 0x05, 0x05, 0x02,
+ 0x0c, 0x01, 0x06, 0x06, 0x06, 0x86, 0x04, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x77, 0x00, 0x04,
+ 0x00, 0x69, 0x00, 0x00, 0x00, 0x00, 0x00, 0x77,
+ 0x00, 0x04, 0x00, 0x69, 0x00),
+ PHYREGS(0x0924, 0x0920, 0x091c, 0x01c0, 0x01c1, 0x01c2),
+ },
+ {
+ .freq = 5850,
+ RADIOREGS3(0xe0, 0x00, 0x01, 0x02, 0x49, 0x05, 0x05, 0x02,
+ 0x0c, 0x01, 0x06, 0x06, 0x06, 0x85, 0x03, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x77, 0x00, 0x04,
+ 0x00, 0x69, 0x00, 0x00, 0x00, 0x00, 0x00, 0x77,
+ 0x00, 0x04, 0x00, 0x69, 0x00),
+ PHYREGS(0x0928, 0x0924, 0x0920, 0x01bf, 0x01c0, 0x01c1),
+ },
+ {
+ .freq = 5860,
+ RADIOREGS3(0xde, 0x00, 0x01, 0x02, 0x4a, 0x05, 0x05, 0x02,
+ 0x0c, 0x01, 0x06, 0x06, 0x06, 0x85, 0x03, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x77, 0x00, 0x04,
+ 0x00, 0x69, 0x00, 0x00, 0x00, 0x00, 0x00, 0x77,
+ 0x00, 0x04, 0x00, 0x69, 0x00),
+ PHYREGS(0x092c, 0x0928, 0x0924, 0x01bf, 0x01bf, 0x01c0),
+ },
+ {
+ .freq = 5870,
+ RADIOREGS3(0xdb, 0x00, 0x01, 0x02, 0x4b, 0x05, 0x05, 0x02,
+ 0x0c, 0x01, 0x06, 0x06, 0x06, 0x85, 0x03, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x77, 0x00, 0x04,
+ 0x00, 0x68, 0x00, 0x00, 0x00, 0x00, 0x00, 0x77,
+ 0x00, 0x04, 0x00, 0x68, 0x00),
+ PHYREGS(0x0930, 0x092c, 0x0928, 0x01be, 0x01bf, 0x01bf),
+ },
+ {
+ .freq = 5880,
+ RADIOREGS3(0xd8, 0x00, 0x01, 0x02, 0x4c, 0x05, 0x05, 0x02,
+ 0x0c, 0x01, 0x06, 0x06, 0x06, 0x85, 0x03, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x77, 0x00, 0x04,
+ 0x00, 0x68, 0x00, 0x00, 0x00, 0x00, 0x00, 0x77,
+ 0x00, 0x04, 0x00, 0x68, 0x00),
+ PHYREGS(0x0934, 0x0930, 0x092c, 0x01bd, 0x01be, 0x01bf),
+ },
+ {
+ .freq = 5890,
+ RADIOREGS3(0xd6, 0x00, 0x01, 0x02, 0x4d, 0x05, 0x05, 0x02,
+ 0x0c, 0x01, 0x06, 0x06, 0x06, 0x85, 0x03, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x77, 0x00, 0x04,
+ 0x00, 0x68, 0x00, 0x00, 0x00, 0x00, 0x00, 0x77,
+ 0x00, 0x04, 0x00, 0x68, 0x00),
+ PHYREGS(0x0938, 0x0934, 0x0930, 0x01bc, 0x01bd, 0x01be),
+ },
+ {
+ .freq = 5900,
+ RADIOREGS3(0xd3, 0x00, 0x01, 0x02, 0x4e, 0x05, 0x05, 0x02,
+ 0x0c, 0x01, 0x06, 0x06, 0x06, 0x85, 0x03, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x77, 0x00, 0x04,
+ 0x00, 0x68, 0x00, 0x00, 0x00, 0x00, 0x00, 0x77,
+ 0x00, 0x04, 0x00, 0x68, 0x00),
+ PHYREGS(0x093c, 0x0938, 0x0934, 0x01bc, 0x01bc, 0x01bd),
+ },
+ {
+ .freq = 5910,
+ RADIOREGS3(0xd6, 0x00, 0x01, 0x02, 0x4f, 0x05, 0x05, 0x02,
+ 0x0c, 0x01, 0x06, 0x06, 0x06, 0x85, 0x03, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x77, 0x00, 0x04,
+ 0x00, 0x68, 0x00, 0x00, 0x00, 0x00, 0x00, 0x77,
+ 0x00, 0x04, 0x00, 0x68, 0x00),
+ PHYREGS(0x0940, 0x093c, 0x0938, 0x01bb, 0x01bc, 0x01bc),
+ },
+ {
+ .freq = 2412,
+ RADIOREGS3(0x00, 0x01, 0x03, 0x09, 0x6c, 0x06, 0x06, 0x04,
+ 0x2b, 0x01, 0x04, 0x04, 0x04, 0x8f, 0x30, 0x00,
+ 0x00, 0x00, 0x78, 0x00, 0x03, 0x00, 0x70, 0x00,
+ 0x0b, 0x00, 0x0a, 0x00, 0x89, 0x00, 0x03, 0x00,
+ 0x70, 0x00, 0x0b, 0x00, 0x0a),
+ PHYREGS(0x03c9, 0x03c5, 0x03c1, 0x043a, 0x043f, 0x0443),
+ },
+ {
+ .freq = 2417,
+ RADIOREGS3(0x00, 0x01, 0x03, 0x09, 0x71, 0x06, 0x06, 0x04,
+ 0x2b, 0x01, 0x05, 0x05, 0x05, 0x8f, 0x30, 0x00,
+ 0x00, 0x00, 0x78, 0x00, 0x03, 0x00, 0x70, 0x00,
+ 0x0b, 0x00, 0x0a, 0x00, 0x89, 0x00, 0x03, 0x00,
+ 0x70, 0x00, 0x0b, 0x00, 0x0a),
+ PHYREGS(0x03cb, 0x03c7, 0x03c3, 0x0438, 0x043d, 0x0441),
+ },
+ {
+ .freq = 2422,
+ RADIOREGS3(0x00, 0x01, 0x03, 0x09, 0x76, 0x06, 0x06, 0x04,
+ 0x2b, 0x01, 0x05, 0x05, 0x05, 0x8f, 0x30, 0x00,
+ 0x00, 0x00, 0x67, 0x00, 0x03, 0x00, 0x70, 0x00,
+ 0x0b, 0x00, 0x0a, 0x00, 0x89, 0x00, 0x03, 0x00,
+ 0x70, 0x00, 0x0b, 0x00, 0x0a),
+ PHYREGS(0x03cd, 0x03c9, 0x03c5, 0x0436, 0x043a, 0x043f),
+ },
+ {
+ .freq = 2427,
+ RADIOREGS3(0x00, 0x01, 0x03, 0x09, 0x7b, 0x06, 0x06, 0x04,
+ 0x2b, 0x01, 0x05, 0x05, 0x05, 0x8f, 0x30, 0x00,
+ 0x00, 0x00, 0x57, 0x00, 0x03, 0x00, 0x70, 0x00,
+ 0x0a, 0x00, 0x0a, 0x00, 0x78, 0x00, 0x03, 0x00,
+ 0x70, 0x00, 0x0a, 0x00, 0x0a),
+ PHYREGS(0x03cf, 0x03cb, 0x03c7, 0x0434, 0x0438, 0x043d),
+ },
+ {
+ .freq = 2432,
+ RADIOREGS3(0x00, 0x01, 0x03, 0x09, 0x80, 0x06, 0x06, 0x04,
+ 0x2b, 0x01, 0x05, 0x05, 0x05, 0x8f, 0x30, 0x00,
+ 0x00, 0x00, 0x56, 0x00, 0x03, 0x00, 0x70, 0x00,
+ 0x0a, 0x00, 0x0a, 0x00, 0x77, 0x00, 0x03, 0x00,
+ 0x70, 0x00, 0x0a, 0x00, 0x0a),
+ PHYREGS(0x03d1, 0x03cd, 0x03c9, 0x0431, 0x0436, 0x043a),
+ },
+ {
+ .freq = 2437,
+ RADIOREGS3(0x00, 0x01, 0x03, 0x09, 0x85, 0x06, 0x06, 0x04,
+ 0x2b, 0x01, 0x05, 0x05, 0x05, 0x8f, 0x30, 0x00,
+ 0x00, 0x00, 0x46, 0x00, 0x03, 0x00, 0x70, 0x00,
+ 0x0a, 0x00, 0x0a, 0x00, 0x76, 0x00, 0x03, 0x00,
+ 0x70, 0x00, 0x0a, 0x00, 0x0a),
+ PHYREGS(0x03d3, 0x03cf, 0x03cb, 0x042f, 0x0434, 0x0438),
+ },
+ {
+ .freq = 2442,
+ RADIOREGS3(0x00, 0x01, 0x03, 0x09, 0x8a, 0x06, 0x06, 0x04,
+ 0x2b, 0x01, 0x05, 0x05, 0x05, 0x8f, 0x30, 0x00,
+ 0x00, 0x00, 0x45, 0x00, 0x02, 0x00, 0x70, 0x00,
+ 0x0a, 0x00, 0x0a, 0x00, 0x66, 0x00, 0x02, 0x00,
+ 0x70, 0x00, 0x0a, 0x00, 0x0a),
+ PHYREGS(0x03d5, 0x03d1, 0x03cd, 0x042d, 0x0431, 0x0436),
+ },
+ {
+ .freq = 2447,
+ RADIOREGS3(0x00, 0x01, 0x03, 0x09, 0x8f, 0x06, 0x06, 0x04,
+ 0x2b, 0x01, 0x06, 0x06, 0x06, 0x8f, 0x30, 0x00,
+ 0x00, 0x00, 0x34, 0x00, 0x02, 0x00, 0x70, 0x00,
+ 0x0a, 0x00, 0x09, 0x00, 0x55, 0x00, 0x02, 0x00,
+ 0x70, 0x00, 0x0a, 0x00, 0x09),
+ PHYREGS(0x03d7, 0x03d3, 0x03cf, 0x042b, 0x042f, 0x0434),
+ },
+ {
+ .freq = 2452,
+ RADIOREGS3(0x00, 0x01, 0x03, 0x09, 0x94, 0x06, 0x06, 0x04,
+ 0x2b, 0x01, 0x06, 0x06, 0x06, 0x8f, 0x30, 0x00,
+ 0x00, 0x00, 0x23, 0x00, 0x02, 0x00, 0x70, 0x00,
+ 0x0a, 0x00, 0x09, 0x00, 0x45, 0x00, 0x02, 0x00,
+ 0x70, 0x00, 0x0a, 0x00, 0x09),
+ PHYREGS(0x03d9, 0x03d5, 0x03d1, 0x0429, 0x042d, 0x0431),
+ },
+ {
+ .freq = 2457,
+ RADIOREGS3(0x00, 0x01, 0x03, 0x09, 0x99, 0x06, 0x06, 0x04,
+ 0x2b, 0x01, 0x06, 0x06, 0x06, 0x8f, 0x30, 0x00,
+ 0x00, 0x00, 0x12, 0x00, 0x02, 0x00, 0x70, 0x00,
+ 0x0a, 0x00, 0x09, 0x00, 0x34, 0x00, 0x02, 0x00,
+ 0x70, 0x00, 0x0a, 0x00, 0x09),
+ PHYREGS(0x03db, 0x03d7, 0x03d3, 0x0427, 0x042b, 0x042f),
+ },
+ {
+ .freq = 2462,
+ RADIOREGS3(0x00, 0x01, 0x03, 0x09, 0x9e, 0x06, 0x06, 0x04,
+ 0x2b, 0x01, 0x06, 0x06, 0x06, 0x8f, 0x30, 0x00,
+ 0x00, 0x00, 0x02, 0x00, 0x02, 0x00, 0x70, 0x00,
+ 0x09, 0x00, 0x09, 0x00, 0x33, 0x00, 0x02, 0x00,
+ 0x70, 0x00, 0x09, 0x00, 0x09),
+ PHYREGS(0x03dd, 0x03d9, 0x03d5, 0x0424, 0x0429, 0x042d),
+ },
+ {
+ .freq = 2467,
+ RADIOREGS3(0x00, 0x01, 0x03, 0x09, 0xa3, 0x06, 0x06, 0x04,
+ 0x2b, 0x01, 0x06, 0x06, 0x06, 0x8f, 0x30, 0x00,
+ 0x00, 0x00, 0x01, 0x00, 0x02, 0x00, 0x70, 0x00,
+ 0x09, 0x00, 0x09, 0x00, 0x22, 0x00, 0x02, 0x00,
+ 0x70, 0x00, 0x09, 0x00, 0x09),
+ PHYREGS(0x03df, 0x03db, 0x03d7, 0x0422, 0x0427, 0x042b),
+ },
+ {
+ .freq = 2472,
+ RADIOREGS3(0x00, 0x01, 0x03, 0x09, 0xa8, 0x06, 0x06, 0x04,
+ 0x2b, 0x01, 0x07, 0x07, 0x07, 0x8f, 0x30, 0x00,
+ 0x00, 0x00, 0x01, 0x00, 0x02, 0x00, 0x70, 0x00,
+ 0x09, 0x00, 0x09, 0x00, 0x11, 0x00, 0x02, 0x00,
+ 0x70, 0x00, 0x09, 0x00, 0x09),
+ PHYREGS(0x03e1, 0x03dd, 0x03d9, 0x0420, 0x0424, 0x0429),
+ },
+ {
+ .freq = 2484,
+ RADIOREGS3(0xff, 0x01, 0x03, 0x09, 0xb4, 0x06, 0x06, 0x04,
+ 0x2b, 0x01, 0x07, 0x07, 0x07, 0x8f, 0x20, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x70, 0x00,
+ 0x09, 0x00, 0x09, 0x00, 0x00, 0x00, 0x02, 0x00,
+ 0x70, 0x00, 0x09, 0x00, 0x09),
+ PHYREGS(0x03e6, 0x03e2, 0x03de, 0x041b, 0x041f, 0x0424),
+ },
+};
+
+static const struct b2056_inittabs_pts
+*b43_nphy_get_inittabs_rev3(struct b43_wldev *dev)
+{
+ struct b43_phy *phy = &dev->phy;
+
+ switch (dev->phy.rev) {
+ case 3:
+ return &b2056_inittab_phy_rev3;
+ case 4:
+ return &b2056_inittab_phy_rev4;
+ default:
+ switch (phy->radio_rev) {
+ case 5:
+ return &b2056_inittab_radio_rev5;
+ case 6:
+ return &b2056_inittab_radio_rev6;
+ case 7:
+ case 9:
+ return &b2056_inittab_radio_rev7_9;
+ case 8:
+ return &b2056_inittab_radio_rev8;
+ case 11:
+ return &b2056_inittab_radio_rev11;
+ }
+ }
+
+ return NULL;
+}
+
static void b2056_upload_inittab(struct b43_wldev *dev, bool ghz5,
bool ignore_uploadflag, u16 routing,
const struct b2056_inittab_entry *e,
@@ -9037,11 +10230,11 @@ void b2056_upload_inittabs(struct b43_wldev *dev,
{
const struct b2056_inittabs_pts *pts;
- if (dev->phy.rev >= ARRAY_SIZE(b2056_inittabs)) {
+ pts = b43_nphy_get_inittabs_rev3(dev);
+ if (!pts) {
B43_WARN_ON(1);
return;
}
- pts = &b2056_inittabs[dev->phy.rev];
b2056_upload_inittab(dev, ghz5, ignore_uploadflag,
B2056_SYN, pts->syn, pts->syn_length);
@@ -9060,11 +10253,12 @@ void b2056_upload_syn_pll_cp2(struct b43_wldev *dev, bool ghz5)
const struct b2056_inittabs_pts *pts;
const struct b2056_inittab_entry *e;
- if (dev->phy.rev >= ARRAY_SIZE(b2056_inittabs)) {
+ pts = b43_nphy_get_inittabs_rev3(dev);
+ if (!pts) {
B43_WARN_ON(1);
return;
}
- pts = &b2056_inittabs[dev->phy.rev];
+
e = &pts->syn[B2056_SYN_PLL_CP2];
b43_radio_write(dev, B2056_SYN_PLL_CP2, ghz5 ? e->ghz5 : e->ghz2);
@@ -9073,38 +10267,46 @@ void b2056_upload_syn_pll_cp2(struct b43_wldev *dev, bool ghz5)
const struct b43_nphy_channeltab_entry_rev3 *
b43_nphy_get_chantabent_rev3(struct b43_wldev *dev, u16 freq)
{
+ struct b43_phy *phy = &dev->phy;
const struct b43_nphy_channeltab_entry_rev3 *e;
unsigned int length, i;
- switch (dev->phy.rev) {
+ switch (phy->rev) {
case 3:
- e = b43_nphy_channeltab_rev3;
- length = ARRAY_SIZE(b43_nphy_channeltab_rev3);
+ e = b43_nphy_channeltab_phy_rev3;
+ length = ARRAY_SIZE(b43_nphy_channeltab_phy_rev3);
break;
case 4:
- e = b43_nphy_channeltab_rev4;
- length = ARRAY_SIZE(b43_nphy_channeltab_rev4);
- break;
- case 5:
- e = b43_nphy_channeltab_rev5;
- length = ARRAY_SIZE(b43_nphy_channeltab_rev5);
- break;
- case 6:
- e = b43_nphy_channeltab_rev6;
- length = ARRAY_SIZE(b43_nphy_channeltab_rev6);
- break;
- case 7:
- case 9:
- e = b43_nphy_channeltab_rev7_9;
- length = ARRAY_SIZE(b43_nphy_channeltab_rev7_9);
- break;
- case 8:
- e = b43_nphy_channeltab_rev8;
- length = ARRAY_SIZE(b43_nphy_channeltab_rev8);
+ e = b43_nphy_channeltab_phy_rev4;
+ length = ARRAY_SIZE(b43_nphy_channeltab_phy_rev4);
break;
default:
- B43_WARN_ON(1);
- return NULL;
+ switch (phy->radio_rev) {
+ case 5:
+ e = b43_nphy_channeltab_radio_rev5;
+ length = ARRAY_SIZE(b43_nphy_channeltab_radio_rev5);
+ break;
+ case 6:
+ e = b43_nphy_channeltab_radio_rev6;
+ length = ARRAY_SIZE(b43_nphy_channeltab_radio_rev6);
+ break;
+ case 7:
+ case 9:
+ e = b43_nphy_channeltab_radio_rev7_9;
+ length = ARRAY_SIZE(b43_nphy_channeltab_radio_rev7_9);
+ break;
+ case 8:
+ e = b43_nphy_channeltab_radio_rev8;
+ length = ARRAY_SIZE(b43_nphy_channeltab_radio_rev8);
+ break;
+ case 11:
+ e = b43_nphy_channeltab_radio_rev11;
+ length = ARRAY_SIZE(b43_nphy_channeltab_radio_rev11);
+ break;
+ default:
+ B43_WARN_ON(1);
+ return NULL;
+ }
}
for (i = 0; i < length; i++, e++) {
diff --git a/drivers/net/wireless/b43/tables_nphy.c b/drivers/net/wireless/b43/tables_nphy.c
index 94c755fdda14..4047c05e3807 100644
--- a/drivers/net/wireless/b43/tables_nphy.c
+++ b/drivers/net/wireless/b43/tables_nphy.c
@@ -1627,74 +1627,7 @@ static const u32 b43_ntab_tdtrn_r3[] = {
0xfa58fc00, 0x0b64fc7e, 0x0800f7b6, 0x00f006be,
};
-static const u32 b43_ntab_noisevar0_r3[] = {
- 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
- 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
- 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
- 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
- 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
- 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
- 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
- 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
- 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
- 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
- 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
- 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
- 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
- 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
- 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
- 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
- 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
- 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
- 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
- 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
- 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
- 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
- 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
- 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
- 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
- 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
- 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
- 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
- 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
- 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
- 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
- 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
- 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
- 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
- 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
- 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
- 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
- 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
- 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
- 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
- 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
- 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
- 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
- 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
- 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
- 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
- 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
- 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
- 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
- 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
- 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
- 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
- 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
- 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
- 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
- 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
- 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
- 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
- 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
- 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
- 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
- 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
- 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
- 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
-};
-
-static const u32 b43_ntab_noisevar1_r3[] = {
+static const u32 b43_ntab_noisevar_r3[] = {
0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
@@ -3109,31 +3042,32 @@ static void b43_nphy_tables_init_rev3(struct b43_wldev *dev)
antswlut = sprom->fem.ghz2.antswlut;
/* Static tables */
- ntab_upload(dev, B43_NTAB_FRAMESTRUCT_R3, b43_ntab_framestruct_r3);
- ntab_upload(dev, B43_NTAB_PILOT_R3, b43_ntab_pilot_r3);
- ntab_upload(dev, B43_NTAB_TMAP_R3, b43_ntab_tmap_r3);
- ntab_upload(dev, B43_NTAB_INTLEVEL_R3, b43_ntab_intlevel_r3);
- ntab_upload(dev, B43_NTAB_TDTRN_R3, b43_ntab_tdtrn_r3);
- ntab_upload(dev, B43_NTAB_NOISEVAR0_R3, b43_ntab_noisevar0_r3);
- ntab_upload(dev, B43_NTAB_NOISEVAR1_R3, b43_ntab_noisevar1_r3);
- ntab_upload(dev, B43_NTAB_MCS_R3, b43_ntab_mcs_r3);
- ntab_upload(dev, B43_NTAB_TDI20A0_R3, b43_ntab_tdi20a0_r3);
- ntab_upload(dev, B43_NTAB_TDI20A1_R3, b43_ntab_tdi20a1_r3);
- ntab_upload(dev, B43_NTAB_TDI40A0_R3, b43_ntab_tdi40a0_r3);
- ntab_upload(dev, B43_NTAB_TDI40A1_R3, b43_ntab_tdi40a1_r3);
- ntab_upload(dev, B43_NTAB_PILOTLT_R3, b43_ntab_pilotlt_r3);
- ntab_upload(dev, B43_NTAB_CHANEST_R3, b43_ntab_channelest_r3);
- ntab_upload(dev, B43_NTAB_FRAMELT_R3, b43_ntab_framelookup_r3);
- ntab_upload(dev, B43_NTAB_C0_ESTPLT_R3, b43_ntab_estimatepowerlt0_r3);
- ntab_upload(dev, B43_NTAB_C1_ESTPLT_R3, b43_ntab_estimatepowerlt1_r3);
- ntab_upload(dev, B43_NTAB_C0_ADJPLT_R3, b43_ntab_adjustpower0_r3);
- ntab_upload(dev, B43_NTAB_C1_ADJPLT_R3, b43_ntab_adjustpower1_r3);
- ntab_upload(dev, B43_NTAB_C0_GAINCTL_R3, b43_ntab_gainctl0_r3);
- ntab_upload(dev, B43_NTAB_C1_GAINCTL_R3, b43_ntab_gainctl1_r3);
- ntab_upload(dev, B43_NTAB_C0_IQLT_R3, b43_ntab_iqlt0_r3);
- ntab_upload(dev, B43_NTAB_C1_IQLT_R3, b43_ntab_iqlt1_r3);
- ntab_upload(dev, B43_NTAB_C0_LOFEEDTH_R3, b43_ntab_loftlt0_r3);
- ntab_upload(dev, B43_NTAB_C1_LOFEEDTH_R3, b43_ntab_loftlt1_r3);
+ if (dev->phy.do_full_init) {
+ ntab_upload(dev, B43_NTAB_FRAMESTRUCT_R3, b43_ntab_framestruct_r3);
+ ntab_upload(dev, B43_NTAB_PILOT_R3, b43_ntab_pilot_r3);
+ ntab_upload(dev, B43_NTAB_TMAP_R3, b43_ntab_tmap_r3);
+ ntab_upload(dev, B43_NTAB_INTLEVEL_R3, b43_ntab_intlevel_r3);
+ ntab_upload(dev, B43_NTAB_TDTRN_R3, b43_ntab_tdtrn_r3);
+ ntab_upload(dev, B43_NTAB_NOISEVAR_R3, b43_ntab_noisevar_r3);
+ ntab_upload(dev, B43_NTAB_MCS_R3, b43_ntab_mcs_r3);
+ ntab_upload(dev, B43_NTAB_TDI20A0_R3, b43_ntab_tdi20a0_r3);
+ ntab_upload(dev, B43_NTAB_TDI20A1_R3, b43_ntab_tdi20a1_r3);
+ ntab_upload(dev, B43_NTAB_TDI40A0_R3, b43_ntab_tdi40a0_r3);
+ ntab_upload(dev, B43_NTAB_TDI40A1_R3, b43_ntab_tdi40a1_r3);
+ ntab_upload(dev, B43_NTAB_PILOTLT_R3, b43_ntab_pilotlt_r3);
+ ntab_upload(dev, B43_NTAB_CHANEST_R3, b43_ntab_channelest_r3);
+ ntab_upload(dev, B43_NTAB_FRAMELT_R3, b43_ntab_framelookup_r3);
+ ntab_upload(dev, B43_NTAB_C0_ESTPLT_R3, b43_ntab_estimatepowerlt0_r3);
+ ntab_upload(dev, B43_NTAB_C1_ESTPLT_R3, b43_ntab_estimatepowerlt1_r3);
+ ntab_upload(dev, B43_NTAB_C0_ADJPLT_R3, b43_ntab_adjustpower0_r3);
+ ntab_upload(dev, B43_NTAB_C1_ADJPLT_R3, b43_ntab_adjustpower1_r3);
+ ntab_upload(dev, B43_NTAB_C0_GAINCTL_R3, b43_ntab_gainctl0_r3);
+ ntab_upload(dev, B43_NTAB_C1_GAINCTL_R3, b43_ntab_gainctl1_r3);
+ ntab_upload(dev, B43_NTAB_C0_IQLT_R3, b43_ntab_iqlt0_r3);
+ ntab_upload(dev, B43_NTAB_C1_IQLT_R3, b43_ntab_iqlt1_r3);
+ ntab_upload(dev, B43_NTAB_C0_LOFEEDTH_R3, b43_ntab_loftlt0_r3);
+ ntab_upload(dev, B43_NTAB_C1_LOFEEDTH_R3, b43_ntab_loftlt1_r3);
+ }
/* Volatile tables */
if (antswlut < ARRAY_SIZE(b43_ntab_antswctl_r3))
@@ -3146,20 +3080,22 @@ static void b43_nphy_tables_init_rev3(struct b43_wldev *dev)
static void b43_nphy_tables_init_rev0(struct b43_wldev *dev)
{
/* Static tables */
- ntab_upload(dev, B43_NTAB_FRAMESTRUCT, b43_ntab_framestruct);
- ntab_upload(dev, B43_NTAB_FRAMELT, b43_ntab_framelookup);
- ntab_upload(dev, B43_NTAB_TMAP, b43_ntab_tmap);
- ntab_upload(dev, B43_NTAB_TDTRN, b43_ntab_tdtrn);
- ntab_upload(dev, B43_NTAB_INTLEVEL, b43_ntab_intlevel);
- ntab_upload(dev, B43_NTAB_PILOT, b43_ntab_pilot);
- ntab_upload(dev, B43_NTAB_TDI20A0, b43_ntab_tdi20a0);
- ntab_upload(dev, B43_NTAB_TDI20A1, b43_ntab_tdi20a1);
- ntab_upload(dev, B43_NTAB_TDI40A0, b43_ntab_tdi40a0);
- ntab_upload(dev, B43_NTAB_TDI40A1, b43_ntab_tdi40a1);
- ntab_upload(dev, B43_NTAB_CHANEST, b43_ntab_channelest);
- ntab_upload(dev, B43_NTAB_MCS, b43_ntab_mcs);
- ntab_upload(dev, B43_NTAB_NOISEVAR10, b43_ntab_noisevar10);
- ntab_upload(dev, B43_NTAB_NOISEVAR11, b43_ntab_noisevar11);
+ if (dev->phy.do_full_init) {
+ ntab_upload(dev, B43_NTAB_FRAMESTRUCT, b43_ntab_framestruct);
+ ntab_upload(dev, B43_NTAB_FRAMELT, b43_ntab_framelookup);
+ ntab_upload(dev, B43_NTAB_TMAP, b43_ntab_tmap);
+ ntab_upload(dev, B43_NTAB_TDTRN, b43_ntab_tdtrn);
+ ntab_upload(dev, B43_NTAB_INTLEVEL, b43_ntab_intlevel);
+ ntab_upload(dev, B43_NTAB_PILOT, b43_ntab_pilot);
+ ntab_upload(dev, B43_NTAB_TDI20A0, b43_ntab_tdi20a0);
+ ntab_upload(dev, B43_NTAB_TDI20A1, b43_ntab_tdi20a1);
+ ntab_upload(dev, B43_NTAB_TDI40A0, b43_ntab_tdi40a0);
+ ntab_upload(dev, B43_NTAB_TDI40A1, b43_ntab_tdi40a1);
+ ntab_upload(dev, B43_NTAB_CHANEST, b43_ntab_channelest);
+ ntab_upload(dev, B43_NTAB_MCS, b43_ntab_mcs);
+ ntab_upload(dev, B43_NTAB_NOISEVAR10, b43_ntab_noisevar10);
+ ntab_upload(dev, B43_NTAB_NOISEVAR11, b43_ntab_noisevar11);
+ }
/* Volatile tables */
ntab_upload(dev, B43_NTAB_BDI, b43_ntab_bdi);
diff --git a/drivers/net/wireless/b43/tables_nphy.h b/drivers/net/wireless/b43/tables_nphy.h
index 9ff33adcff89..3a58aee4c4cf 100644
--- a/drivers/net/wireless/b43/tables_nphy.h
+++ b/drivers/net/wireless/b43/tables_nphy.h
@@ -143,8 +143,7 @@ struct nphy_gain_ctl_workaround_entry *b43_nphy_get_gain_ctl_workaround_ent(
#define B43_NTAB_TMAP_R3 B43_NTAB32(12, 0) /* TM AP */
#define B43_NTAB_INTLEVEL_R3 B43_NTAB32(13, 0) /* INT LV */
#define B43_NTAB_TDTRN_R3 B43_NTAB32(14, 0) /* TD TRN */
-#define B43_NTAB_NOISEVAR0_R3 B43_NTAB32(16, 0) /* noise variance 0 */
-#define B43_NTAB_NOISEVAR1_R3 B43_NTAB32(16, 128) /* noise variance 1 */
+#define B43_NTAB_NOISEVAR_R3 B43_NTAB32(16, 0) /* noise variance */
#define B43_NTAB_MCS_R3 B43_NTAB16(18, 0) /* MCS */
#define B43_NTAB_TDI20A0_R3 B43_NTAB32(19, 128) /* TDI 20/0 */
#define B43_NTAB_TDI20A1_R3 B43_NTAB32(19, 256) /* TDI 20/1 */
diff --git a/drivers/net/wireless/b43/wa.c b/drivers/net/wireless/b43/wa.c
index 9b1a038be08b..c218c08fb2f5 100644
--- a/drivers/net/wireless/b43/wa.c
+++ b/drivers/net/wireless/b43/wa.c
@@ -441,7 +441,7 @@ static void b43_wa_altagc(struct b43_wldev *dev)
static void b43_wa_tr_ltov(struct b43_wldev *dev) /* TR Lookup Table Original Values */
{
- b43_gtab_write(dev, B43_GTAB_ORIGTR, 0, 0xC480);
+ b43_gtab_write(dev, B43_GTAB_ORIGTR, 0, 0x7654);
}
static void b43_wa_cpll_nonpilot(struct b43_wldev *dev)
diff --git a/drivers/net/wireless/b43/xmit.c b/drivers/net/wireless/b43/xmit.c
index 31adb8cf0291..4f38f19b8e3d 100644
--- a/drivers/net/wireless/b43/xmit.c
+++ b/drivers/net/wireless/b43/xmit.c
@@ -408,7 +408,7 @@ int b43_generate_txhdr(struct b43_wldev *dev,
mac_ctl |= B43_TXH_MAC_HWSEQ;
if (info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT)
mac_ctl |= B43_TXH_MAC_STMSDU;
- if (phy->type == B43_PHYTYPE_A)
+ if (!phy->gmode)
mac_ctl |= B43_TXH_MAC_5GHZ;
/* Overwrite rates[0].count to make the retry calculation
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/Makefile b/drivers/net/wireless/brcm80211/brcmfmac/Makefile
index 1d2ceac3a221..98e67c18f276 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/Makefile
+++ b/drivers/net/wireless/brcm80211/brcmfmac/Makefile
@@ -33,7 +33,7 @@ brcmfmac-objs += \
bcdc.o \
dhd_common.o \
dhd_linux.o \
- nvram.o \
+ firmware.o \
btcoex.o
brcmfmac-$(CONFIG_BRCMFMAC_SDIO) += \
dhd_sdio.o \
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd.h b/drivers/net/wireless/brcm80211/brcmfmac/dhd.h
index 939d6b132922..16f9ab2568a8 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd.h
@@ -186,7 +186,7 @@ void brcmf_del_if(struct brcmf_pub *drvr, s32 bssidx);
void brcmf_txflowblock_if(struct brcmf_if *ifp,
enum brcmf_netif_stop_reason reason, bool state);
u32 brcmf_get_chip_info(struct brcmf_if *ifp);
-void brcmf_txfinalize(struct brcmf_pub *drvr, struct sk_buff *txp,
+void brcmf_txfinalize(struct brcmf_pub *drvr, struct sk_buff *txp, u8 ifidx,
bool success);
/* Sets dongle media info (drv_version, mac address). */
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_bus.h b/drivers/net/wireless/brcm80211/brcmfmac/dhd_bus.h
index c4535616064e..7735328fff21 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_bus.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_bus.h
@@ -63,7 +63,6 @@ struct brcmf_bus_dcmd {
*/
struct brcmf_bus_ops {
int (*preinit)(struct device *dev);
- int (*init)(struct device *dev);
void (*stop)(struct device *dev);
int (*txdata)(struct device *dev, struct sk_buff *skb);
int (*txctl)(struct device *dev, unsigned char *msg, uint len);
@@ -99,6 +98,7 @@ struct brcmf_bus {
unsigned long tx_realloc;
u32 chip;
u32 chiprev;
+ bool always_use_fws_queue;
struct brcmf_bus_ops *ops;
};
@@ -113,11 +113,6 @@ static inline int brcmf_bus_preinit(struct brcmf_bus *bus)
return bus->ops->preinit(bus->dev);
}
-static inline int brcmf_bus_init(struct brcmf_bus *bus)
-{
- return bus->ops->init(bus->dev);
-}
-
static inline void brcmf_bus_stop(struct brcmf_bus *bus)
{
bus->ops->stop(bus->dev);
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_common.c b/drivers/net/wireless/brcm80211/brcmfmac/dhd_common.c
index 6a8983a1fb9c..ed3e32ce8c23 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_common.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_common.c
@@ -32,6 +32,9 @@
#define BRCMF_DEFAULT_SCAN_UNASSOC_TIME 40
#define BRCMF_DEFAULT_PACKET_FILTER "100 0 0 0 0x01 0x00"
+/* boost value for RSSI_DELTA in preferred join selection */
+#define BRCMF_JOIN_PREF_RSSI_BOOST 8
+
bool brcmf_c_prec_enq(struct device *dev, struct pktq *q,
struct sk_buff *pkt, int prec)
@@ -246,6 +249,7 @@ int brcmf_c_preinit_dcmds(struct brcmf_if *ifp)
{
s8 eventmask[BRCMF_EVENTING_MASK_LEN];
u8 buf[BRCMF_DCMD_SMLEN];
+ struct brcmf_join_pref_params join_pref_params[2];
char *ptr;
s32 err;
@@ -298,6 +302,20 @@ int brcmf_c_preinit_dcmds(struct brcmf_if *ifp)
goto done;
}
+ /* Setup join_pref to select target by RSSI(with boost on 5GHz) */
+ join_pref_params[0].type = BRCMF_JOIN_PREF_RSSI_DELTA;
+ join_pref_params[0].len = 2;
+ join_pref_params[0].rssi_gain = BRCMF_JOIN_PREF_RSSI_BOOST;
+ join_pref_params[0].band = WLC_BAND_5G;
+ join_pref_params[1].type = BRCMF_JOIN_PREF_RSSI;
+ join_pref_params[1].len = 2;
+ join_pref_params[1].rssi_gain = 0;
+ join_pref_params[1].band = 0;
+ err = brcmf_fil_iovar_data_set(ifp, "join_pref", join_pref_params,
+ sizeof(join_pref_params));
+ if (err)
+ brcmf_err("Set join_pref error (%d)\n", err);
+
/* Setup event_msgs, enable E_IF */
err = brcmf_fil_iovar_data_get(ifp, "event_msgs", eventmask,
BRCMF_EVENTING_MASK_LEN);
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c b/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c
index 7d28cd385092..09dd8c13d844 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c
@@ -190,7 +190,7 @@ static netdev_tx_t brcmf_netdev_start_xmit(struct sk_buff *skb,
int ret;
struct brcmf_if *ifp = netdev_priv(ndev);
struct brcmf_pub *drvr = ifp->drvr;
- struct ethhdr *eh;
+ struct ethhdr *eh = (struct ethhdr *)(skb->data);
brcmf_dbg(DATA, "Enter, idx=%d\n", ifp->bssidx);
@@ -236,6 +236,9 @@ static netdev_tx_t brcmf_netdev_start_xmit(struct sk_buff *skb,
goto done;
}
+ if (eh->h_proto == htons(ETH_P_PAE))
+ atomic_inc(&ifp->pend_8021x_cnt);
+
ret = brcmf_fws_process_skb(ifp, skb);
done:
@@ -538,31 +541,26 @@ void brcmf_rx_frame(struct device *dev, struct sk_buff *skb)
brcmf_netif_rx(ifp, skb);
}
-void brcmf_txfinalize(struct brcmf_pub *drvr, struct sk_buff *txp,
+void brcmf_txfinalize(struct brcmf_pub *drvr, struct sk_buff *txp, u8 ifidx,
bool success)
{
struct brcmf_if *ifp;
struct ethhdr *eh;
- u8 ifidx;
u16 type;
- int res;
-
- res = brcmf_proto_hdrpull(drvr, false, &ifidx, txp);
ifp = drvr->iflist[ifidx];
if (!ifp)
goto done;
- if (res == 0) {
- eh = (struct ethhdr *)(txp->data);
- type = ntohs(eh->h_proto);
+ eh = (struct ethhdr *)(txp->data);
+ type = ntohs(eh->h_proto);
- if (type == ETH_P_PAE) {
- atomic_dec(&ifp->pend_8021x_cnt);
- if (waitqueue_active(&ifp->pend_8021x_wait))
- wake_up(&ifp->pend_8021x_wait);
- }
+ if (type == ETH_P_PAE) {
+ atomic_dec(&ifp->pend_8021x_cnt);
+ if (waitqueue_active(&ifp->pend_8021x_wait))
+ wake_up(&ifp->pend_8021x_wait);
}
+
if (!success)
ifp->stats.tx_errors++;
done:
@@ -573,13 +571,17 @@ void brcmf_txcomplete(struct device *dev, struct sk_buff *txp, bool success)
{
struct brcmf_bus *bus_if = dev_get_drvdata(dev);
struct brcmf_pub *drvr = bus_if->drvr;
+ u8 ifidx;
/* await txstatus signal for firmware if active */
if (brcmf_fws_fc_active(drvr->fws)) {
if (!success)
brcmf_fws_bustxfail(drvr->fws, txp);
} else {
- brcmf_txfinalize(drvr, txp, success);
+ if (brcmf_proto_hdrpull(drvr, false, &ifidx, txp))
+ brcmu_pkt_buf_free_skb(txp);
+ else
+ brcmf_txfinalize(drvr, txp, ifidx, success);
}
}
@@ -914,13 +916,6 @@ int brcmf_bus_start(struct device *dev)
brcmf_dbg(TRACE, "\n");
- /* Bring up the bus */
- ret = brcmf_bus_init(bus_if);
- if (ret != 0) {
- brcmf_err("brcmf_sdbrcm_bus_init failed %d\n", ret);
- return ret;
- }
-
/* add primary networking interface */
ifp = brcmf_add_if(drvr, 0, 0, "wlan%d", NULL);
if (IS_ERR(ifp))
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c b/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c
index 13c89a0c4ba7..8fa0dbbbda72 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c
@@ -42,7 +42,7 @@
#include <soc.h>
#include "sdio_host.h"
#include "chip.h"
-#include "nvram.h"
+#include "firmware.h"
#define DCMD_RESP_TIMEOUT 2000 /* In milli second */
@@ -632,43 +632,28 @@ static const struct brcmf_firmware_names brcmf_fwname_data[] = {
{ BCM4354_CHIP_ID, 0xFFFFFFFF, BRCMF_FIRMWARE_NVRAM(BCM4354) }
};
-
-static const struct firmware *brcmf_sdio_get_fw(struct brcmf_sdio *bus,
- enum brcmf_firmware_type type)
+static const char *brcmf_sdio_get_fwname(struct brcmf_chip *ci,
+ enum brcmf_firmware_type type)
{
- const struct firmware *fw;
- const char *name;
- int err, i;
+ int i;
for (i = 0; i < ARRAY_SIZE(brcmf_fwname_data); i++) {
- if (brcmf_fwname_data[i].chipid == bus->ci->chip &&
- brcmf_fwname_data[i].revmsk & BIT(bus->ci->chiprev)) {
+ if (brcmf_fwname_data[i].chipid == ci->chip &&
+ brcmf_fwname_data[i].revmsk & BIT(ci->chiprev)) {
switch (type) {
case BRCMF_FIRMWARE_BIN:
- name = brcmf_fwname_data[i].bin;
- break;
+ return brcmf_fwname_data[i].bin;
case BRCMF_FIRMWARE_NVRAM:
- name = brcmf_fwname_data[i].nv;
- break;
+ return brcmf_fwname_data[i].nv;
default:
brcmf_err("invalid firmware type (%d)\n", type);
return NULL;
}
- goto found;
}
}
brcmf_err("Unknown chipid %d [%d]\n",
- bus->ci->chip, bus->ci->chiprev);
+ ci->chip, ci->chiprev);
return NULL;
-
-found:
- err = request_firmware(&fw, name, &bus->sdiodev->func[2]->dev);
- if ((err) || (!fw)) {
- brcmf_err("fail to request firmware %s (%d)\n", name, err);
- return NULL;
- }
-
- return fw;
}
static void pkt_align(struct sk_buff *p, int len, int align)
@@ -3278,20 +3263,13 @@ static int brcmf_sdio_download_code_file(struct brcmf_sdio *bus,
}
static int brcmf_sdio_download_nvram(struct brcmf_sdio *bus,
- const struct firmware *nv)
+ void *vars, u32 varsz)
{
- void *vars;
- u32 varsz;
int address;
int err;
brcmf_dbg(TRACE, "Enter\n");
- vars = brcmf_nvram_strip(nv, &varsz);
-
- if (vars == NULL)
- return -EINVAL;
-
address = bus->ci->ramsize - varsz + bus->ci->rambase;
err = brcmf_sdiod_ramrw(bus->sdiodev, true, address, vars, varsz);
if (err)
@@ -3300,15 +3278,14 @@ static int brcmf_sdio_download_nvram(struct brcmf_sdio *bus,
else if (!brcmf_sdio_verifymemory(bus->sdiodev, address, vars, varsz))
err = -EIO;
- brcmf_nvram_free(vars);
-
return err;
}
-static int brcmf_sdio_download_firmware(struct brcmf_sdio *bus)
+static int brcmf_sdio_download_firmware(struct brcmf_sdio *bus,
+ const struct firmware *fw,
+ void *nvram, u32 nvlen)
{
int bcmerror = -EFAULT;
- const struct firmware *fw;
u32 rstvec;
sdio_claim_host(bus->sdiodev->func[1]);
@@ -3317,12 +3294,6 @@ static int brcmf_sdio_download_firmware(struct brcmf_sdio *bus)
/* Keep arm in reset */
brcmf_chip_enter_download(bus->ci);
- fw = brcmf_sdio_get_fw(bus, BRCMF_FIRMWARE_BIN);
- if (fw == NULL) {
- bcmerror = -ENOENT;
- goto err;
- }
-
rstvec = get_unaligned_le32(fw->data);
brcmf_dbg(SDIO, "firmware rstvec: %x\n", rstvec);
@@ -3330,17 +3301,12 @@ static int brcmf_sdio_download_firmware(struct brcmf_sdio *bus)
release_firmware(fw);
if (bcmerror) {
brcmf_err("dongle image file download failed\n");
+ brcmf_fw_nvram_free(nvram);
goto err;
}
- fw = brcmf_sdio_get_fw(bus, BRCMF_FIRMWARE_NVRAM);
- if (fw == NULL) {
- bcmerror = -ENOENT;
- goto err;
- }
-
- bcmerror = brcmf_sdio_download_nvram(bus, fw);
- release_firmware(fw);
+ bcmerror = brcmf_sdio_download_nvram(bus, nvram, nvlen);
+ brcmf_fw_nvram_free(nvram);
if (bcmerror) {
brcmf_err("dongle nvram file download failed\n");
goto err;
@@ -3490,97 +3456,6 @@ done:
return err;
}
-static int brcmf_sdio_bus_init(struct device *dev)
-{
- struct brcmf_bus *bus_if = dev_get_drvdata(dev);
- struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio;
- struct brcmf_sdio *bus = sdiodev->bus;
- int err, ret = 0;
- u8 saveclk;
-
- brcmf_dbg(TRACE, "Enter\n");
-
- /* try to download image and nvram to the dongle */
- if (bus_if->state == BRCMF_BUS_DOWN) {
- bus->alp_only = true;
- err = brcmf_sdio_download_firmware(bus);
- if (err)
- return err;
- bus->alp_only = false;
- }
-
- if (!bus->sdiodev->bus_if->drvr)
- return 0;
-
- /* Start the watchdog timer */
- bus->sdcnt.tickcnt = 0;
- brcmf_sdio_wd_timer(bus, BRCMF_WD_POLL_MS);
-
- sdio_claim_host(bus->sdiodev->func[1]);
-
- /* Make sure backplane clock is on, needed to generate F2 interrupt */
- brcmf_sdio_clkctl(bus, CLK_AVAIL, false);
- if (bus->clkstate != CLK_AVAIL)
- goto exit;
-
- /* Force clocks on backplane to be sure F2 interrupt propagates */
- saveclk = brcmf_sdiod_regrb(bus->sdiodev,
- SBSDIO_FUNC1_CHIPCLKCSR, &err);
- if (!err) {
- brcmf_sdiod_regwb(bus->sdiodev, SBSDIO_FUNC1_CHIPCLKCSR,
- (saveclk | SBSDIO_FORCE_HT), &err);
- }
- if (err) {
- brcmf_err("Failed to force clock for F2: err %d\n", err);
- goto exit;
- }
-
- /* Enable function 2 (frame transfers) */
- w_sdreg32(bus, SDPCM_PROT_VERSION << SMB_DATA_VERSION_SHIFT,
- offsetof(struct sdpcmd_regs, tosbmailboxdata));
- err = sdio_enable_func(bus->sdiodev->func[SDIO_FUNC_2]);
-
-
- brcmf_dbg(INFO, "enable F2: err=%d\n", err);
-
- /* If F2 successfully enabled, set core and enable interrupts */
- if (!err) {
- /* Set up the interrupt mask and enable interrupts */
- bus->hostintmask = HOSTINTMASK;
- w_sdreg32(bus, bus->hostintmask,
- offsetof(struct sdpcmd_regs, hostintmask));
-
- brcmf_sdiod_regwb(bus->sdiodev, SBSDIO_WATERMARK, 8, &err);
- } else {
- /* Disable F2 again */
- sdio_disable_func(bus->sdiodev->func[SDIO_FUNC_2]);
- ret = -ENODEV;
- }
-
- if (brcmf_chip_sr_capable(bus->ci)) {
- brcmf_sdio_sr_init(bus);
- } else {
- /* Restore previous clock setting */
- brcmf_sdiod_regwb(bus->sdiodev, SBSDIO_FUNC1_CHIPCLKCSR,
- saveclk, &err);
- }
-
- if (ret == 0) {
- ret = brcmf_sdiod_intr_register(bus->sdiodev);
- if (ret != 0)
- brcmf_err("intr register failed:%d\n", ret);
- }
-
- /* If we didn't come up, turn off backplane clock */
- if (ret != 0)
- brcmf_sdio_clkctl(bus, CLK_NONE, false);
-
-exit:
- sdio_release_host(bus->sdiodev->func[1]);
-
- return ret;
-}
-
void brcmf_sdio_isr(struct brcmf_sdio *bus)
{
brcmf_dbg(TRACE, "Enter\n");
@@ -4020,13 +3895,114 @@ brcmf_sdio_watchdog(unsigned long data)
static struct brcmf_bus_ops brcmf_sdio_bus_ops = {
.stop = brcmf_sdio_bus_stop,
.preinit = brcmf_sdio_bus_preinit,
- .init = brcmf_sdio_bus_init,
.txdata = brcmf_sdio_bus_txdata,
.txctl = brcmf_sdio_bus_txctl,
.rxctl = brcmf_sdio_bus_rxctl,
.gettxq = brcmf_sdio_bus_gettxq,
};
+static void brcmf_sdio_firmware_callback(struct device *dev,
+ const struct firmware *code,
+ void *nvram, u32 nvram_len)
+{
+ struct brcmf_bus *bus_if = dev_get_drvdata(dev);
+ struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio;
+ struct brcmf_sdio *bus = sdiodev->bus;
+ int err = 0;
+ u8 saveclk;
+
+ brcmf_dbg(TRACE, "Enter: dev=%s\n", dev_name(dev));
+
+ /* try to download image and nvram to the dongle */
+ if (bus_if->state == BRCMF_BUS_DOWN) {
+ bus->alp_only = true;
+ err = brcmf_sdio_download_firmware(bus, code, nvram, nvram_len);
+ if (err)
+ goto fail;
+ bus->alp_only = false;
+ }
+
+ if (!bus_if->drvr)
+ return;
+
+ /* Start the watchdog timer */
+ bus->sdcnt.tickcnt = 0;
+ brcmf_sdio_wd_timer(bus, BRCMF_WD_POLL_MS);
+
+ sdio_claim_host(sdiodev->func[1]);
+
+ /* Make sure backplane clock is on, needed to generate F2 interrupt */
+ brcmf_sdio_clkctl(bus, CLK_AVAIL, false);
+ if (bus->clkstate != CLK_AVAIL)
+ goto release;
+
+ /* Force clocks on backplane to be sure F2 interrupt propagates */
+ saveclk = brcmf_sdiod_regrb(sdiodev, SBSDIO_FUNC1_CHIPCLKCSR, &err);
+ if (!err) {
+ brcmf_sdiod_regwb(sdiodev, SBSDIO_FUNC1_CHIPCLKCSR,
+ (saveclk | SBSDIO_FORCE_HT), &err);
+ }
+ if (err) {
+ brcmf_err("Failed to force clock for F2: err %d\n", err);
+ goto release;
+ }
+
+ /* Enable function 2 (frame transfers) */
+ w_sdreg32(bus, SDPCM_PROT_VERSION << SMB_DATA_VERSION_SHIFT,
+ offsetof(struct sdpcmd_regs, tosbmailboxdata));
+ err = sdio_enable_func(sdiodev->func[SDIO_FUNC_2]);
+
+
+ brcmf_dbg(INFO, "enable F2: err=%d\n", err);
+
+ /* If F2 successfully enabled, set core and enable interrupts */
+ if (!err) {
+ /* Set up the interrupt mask and enable interrupts */
+ bus->hostintmask = HOSTINTMASK;
+ w_sdreg32(bus, bus->hostintmask,
+ offsetof(struct sdpcmd_regs, hostintmask));
+
+ brcmf_sdiod_regwb(sdiodev, SBSDIO_WATERMARK, 8, &err);
+ } else {
+ /* Disable F2 again */
+ sdio_disable_func(sdiodev->func[SDIO_FUNC_2]);
+ goto release;
+ }
+
+ if (brcmf_chip_sr_capable(bus->ci)) {
+ brcmf_sdio_sr_init(bus);
+ } else {
+ /* Restore previous clock setting */
+ brcmf_sdiod_regwb(sdiodev, SBSDIO_FUNC1_CHIPCLKCSR,
+ saveclk, &err);
+ }
+
+ if (err == 0) {
+ err = brcmf_sdiod_intr_register(sdiodev);
+ if (err != 0)
+ brcmf_err("intr register failed:%d\n", err);
+ }
+
+ /* If we didn't come up, turn off backplane clock */
+ if (err != 0)
+ brcmf_sdio_clkctl(bus, CLK_NONE, false);
+
+ sdio_release_host(sdiodev->func[1]);
+
+ err = brcmf_bus_start(dev);
+ if (err != 0) {
+ brcmf_err("dongle is not responding\n");
+ goto fail;
+ }
+ return;
+
+release:
+ sdio_release_host(sdiodev->func[1]);
+fail:
+ brcmf_dbg(TRACE, "failed: dev=%s, err=%d\n", dev_name(dev), err);
+ device_release_driver(dev);
+}
+
struct brcmf_sdio *brcmf_sdio_probe(struct brcmf_sdio_dev *sdiodev)
{
int ret;
@@ -4110,8 +4086,13 @@ struct brcmf_sdio *brcmf_sdio_probe(struct brcmf_sdio_dev *sdiodev)
goto fail;
}
+ /* Query the F2 block size, set roundup accordingly */
+ bus->blocksize = bus->sdiodev->func[2]->cur_blksize;
+ bus->roundup = min(max_roundup, bus->blocksize);
+
/* Allocate buffers */
if (bus->sdiodev->bus_if->maxctl) {
+ bus->sdiodev->bus_if->maxctl += bus->roundup;
bus->rxblen =
roundup((bus->sdiodev->bus_if->maxctl + SDPCM_HDRLEN),
ALIGNMENT) + bus->head_align;
@@ -4139,10 +4120,6 @@ struct brcmf_sdio *brcmf_sdio_probe(struct brcmf_sdio_dev *sdiodev)
bus->idletime = BRCMF_IDLE_INTERVAL;
bus->idleclock = BRCMF_IDLE_ACTIVE;
- /* Query the F2 block size, set roundup accordingly */
- bus->blocksize = bus->sdiodev->func[2]->cur_blksize;
- bus->roundup = min(max_roundup, bus->blocksize);
-
/* SR state */
bus->sleeping = false;
bus->sr_enabled = false;
@@ -4150,10 +4127,14 @@ struct brcmf_sdio *brcmf_sdio_probe(struct brcmf_sdio_dev *sdiodev)
brcmf_sdio_debugfs_create(bus);
brcmf_dbg(INFO, "completed!!\n");
- /* if firmware path present try to download and bring up bus */
- ret = brcmf_bus_start(bus->sdiodev->dev);
+ ret = brcmf_fw_get_firmwares(sdiodev->dev, BRCMF_FW_REQUEST_NVRAM,
+ brcmf_sdio_get_fwname(bus->ci,
+ BRCMF_FIRMWARE_BIN),
+ brcmf_sdio_get_fwname(bus->ci,
+ BRCMF_FIRMWARE_NVRAM),
+ brcmf_sdio_firmware_callback);
if (ret != 0) {
- brcmf_err("dongle is not responding\n");
+ brcmf_err("async firmware request failed: %d\n", ret);
goto fail;
}
@@ -4173,9 +4154,7 @@ void brcmf_sdio_remove(struct brcmf_sdio *bus)
/* De-register interrupt handler */
brcmf_sdiod_intr_unregister(bus->sdiodev);
- if (bus->sdiodev->bus_if->drvr) {
- brcmf_detach(bus->sdiodev->dev);
- }
+ brcmf_detach(bus->sdiodev->dev);
cancel_work_sync(&bus->datawork);
if (bus->brcmf_wq)
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/firmware.c b/drivers/net/wireless/brcm80211/brcmfmac/firmware.c
new file mode 100644
index 000000000000..7b7d237c1ddb
--- /dev/null
+++ b/drivers/net/wireless/brcm80211/brcmfmac/firmware.c
@@ -0,0 +1,332 @@
+/*
+ * Copyright (c) 2013 Broadcom Corporation
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/device.h>
+#include <linux/firmware.h>
+
+#include "dhd_dbg.h"
+#include "firmware.h"
+
+enum nvram_parser_state {
+ IDLE,
+ KEY,
+ VALUE,
+ COMMENT,
+ END
+};
+
+/**
+ * struct nvram_parser - internal info for parser.
+ *
+ * @state: current parser state.
+ * @fwnv: input buffer being parsed.
+ * @nvram: output buffer with parse result.
+ * @nvram_len: lenght of parse result.
+ * @line: current line.
+ * @column: current column in line.
+ * @pos: byte offset in input buffer.
+ * @entry: start position of key,value entry.
+ */
+struct nvram_parser {
+ enum nvram_parser_state state;
+ const struct firmware *fwnv;
+ u8 *nvram;
+ u32 nvram_len;
+ u32 line;
+ u32 column;
+ u32 pos;
+ u32 entry;
+};
+
+static bool is_nvram_char(char c)
+{
+ /* comment marker excluded */
+ if (c == '#')
+ return false;
+
+ /* key and value may have any other readable character */
+ return (c > 0x20 && c < 0x7f);
+}
+
+static bool is_whitespace(char c)
+{
+ return (c == ' ' || c == '\r' || c == '\n' || c == '\t');
+}
+
+static enum nvram_parser_state brcmf_nvram_handle_idle(struct nvram_parser *nvp)
+{
+ char c;
+
+ c = nvp->fwnv->data[nvp->pos];
+ if (c == '\n')
+ return COMMENT;
+ if (is_whitespace(c))
+ goto proceed;
+ if (c == '#')
+ return COMMENT;
+ if (is_nvram_char(c)) {
+ nvp->entry = nvp->pos;
+ return KEY;
+ }
+ brcmf_dbg(INFO, "warning: ln=%d:col=%d: ignoring invalid character\n",
+ nvp->line, nvp->column);
+proceed:
+ nvp->column++;
+ nvp->pos++;
+ return IDLE;
+}
+
+static enum nvram_parser_state brcmf_nvram_handle_key(struct nvram_parser *nvp)
+{
+ enum nvram_parser_state st = nvp->state;
+ char c;
+
+ c = nvp->fwnv->data[nvp->pos];
+ if (c == '=') {
+ st = VALUE;
+ } else if (!is_nvram_char(c)) {
+ brcmf_dbg(INFO, "warning: ln=%d:col=%d: '=' expected, skip invalid key entry\n",
+ nvp->line, nvp->column);
+ return COMMENT;
+ }
+
+ nvp->column++;
+ nvp->pos++;
+ return st;
+}
+
+static enum nvram_parser_state
+brcmf_nvram_handle_value(struct nvram_parser *nvp)
+{
+ char c;
+ char *skv;
+ char *ekv;
+ u32 cplen;
+
+ c = nvp->fwnv->data[nvp->pos];
+ if (!is_nvram_char(c)) {
+ /* key,value pair complete */
+ ekv = (u8 *)&nvp->fwnv->data[nvp->pos];
+ skv = (u8 *)&nvp->fwnv->data[nvp->entry];
+ cplen = ekv - skv;
+ /* copy to output buffer */
+ memcpy(&nvp->nvram[nvp->nvram_len], skv, cplen);
+ nvp->nvram_len += cplen;
+ nvp->nvram[nvp->nvram_len] = '\0';
+ nvp->nvram_len++;
+ return IDLE;
+ }
+ nvp->pos++;
+ nvp->column++;
+ return VALUE;
+}
+
+static enum nvram_parser_state
+brcmf_nvram_handle_comment(struct nvram_parser *nvp)
+{
+ char *eol, *sol;
+
+ sol = (char *)&nvp->fwnv->data[nvp->pos];
+ eol = strchr(sol, '\n');
+ if (eol == NULL)
+ return END;
+
+ /* eat all moving to next line */
+ nvp->line++;
+ nvp->column = 1;
+ nvp->pos += (eol - sol) + 1;
+ return IDLE;
+}
+
+static enum nvram_parser_state brcmf_nvram_handle_end(struct nvram_parser *nvp)
+{
+ /* final state */
+ return END;
+}
+
+static enum nvram_parser_state
+(*nv_parser_states[])(struct nvram_parser *nvp) = {
+ brcmf_nvram_handle_idle,
+ brcmf_nvram_handle_key,
+ brcmf_nvram_handle_value,
+ brcmf_nvram_handle_comment,
+ brcmf_nvram_handle_end
+};
+
+static int brcmf_init_nvram_parser(struct nvram_parser *nvp,
+ const struct firmware *nv)
+{
+ memset(nvp, 0, sizeof(*nvp));
+ nvp->fwnv = nv;
+ /* Alloc for extra 0 byte + roundup by 4 + length field */
+ nvp->nvram = kzalloc(nv->size + 1 + 3 + sizeof(u32), GFP_KERNEL);
+ if (!nvp->nvram)
+ return -ENOMEM;
+
+ nvp->line = 1;
+ nvp->column = 1;
+ return 0;
+}
+
+/* brcmf_nvram_strip :Takes a buffer of "<var>=<value>\n" lines read from a fil
+ * and ending in a NUL. Removes carriage returns, empty lines, comment lines,
+ * and converts newlines to NULs. Shortens buffer as needed and pads with NULs.
+ * End of buffer is completed with token identifying length of buffer.
+ */
+static void *brcmf_fw_nvram_strip(const struct firmware *nv, u32 *new_length)
+{
+ struct nvram_parser nvp;
+ u32 pad;
+ u32 token;
+ __le32 token_le;
+
+ if (brcmf_init_nvram_parser(&nvp, nv) < 0)
+ return NULL;
+
+ while (nvp.pos < nv->size) {
+ nvp.state = nv_parser_states[nvp.state](&nvp);
+ if (nvp.state == END)
+ break;
+ }
+ pad = nvp.nvram_len;
+ *new_length = roundup(nvp.nvram_len + 1, 4);
+ while (pad != *new_length) {
+ nvp.nvram[pad] = 0;
+ pad++;
+ }
+
+ token = *new_length / 4;
+ token = (~token << 16) | (token & 0x0000FFFF);
+ token_le = cpu_to_le32(token);
+
+ memcpy(&nvp.nvram[*new_length], &token_le, sizeof(token_le));
+ *new_length += sizeof(token_le);
+
+ return nvp.nvram;
+}
+
+void brcmf_fw_nvram_free(void *nvram)
+{
+ kfree(nvram);
+}
+
+struct brcmf_fw {
+ struct device *dev;
+ u16 flags;
+ const struct firmware *code;
+ const char *nvram_name;
+ void (*done)(struct device *dev, const struct firmware *fw,
+ void *nvram_image, u32 nvram_len);
+};
+
+static void brcmf_fw_request_nvram_done(const struct firmware *fw, void *ctx)
+{
+ struct brcmf_fw *fwctx = ctx;
+ u32 nvram_length = 0;
+ void *nvram = NULL;
+
+ brcmf_dbg(TRACE, "enter: dev=%s\n", dev_name(fwctx->dev));
+ if (!fw && !(fwctx->flags & BRCMF_FW_REQ_NV_OPTIONAL))
+ goto fail;
+
+ if (fw) {
+ nvram = brcmf_fw_nvram_strip(fw, &nvram_length);
+ release_firmware(fw);
+ if (!nvram && !(fwctx->flags & BRCMF_FW_REQ_NV_OPTIONAL))
+ goto fail;
+ }
+
+ fwctx->done(fwctx->dev, fwctx->code, nvram, nvram_length);
+ kfree(fwctx);
+ return;
+
+fail:
+ brcmf_dbg(TRACE, "failed: dev=%s\n", dev_name(fwctx->dev));
+ if (fwctx->code)
+ release_firmware(fwctx->code);
+ device_release_driver(fwctx->dev);
+ kfree(fwctx);
+}
+
+static void brcmf_fw_request_code_done(const struct firmware *fw, void *ctx)
+{
+ struct brcmf_fw *fwctx = ctx;
+ int ret;
+
+ brcmf_dbg(TRACE, "enter: dev=%s\n", dev_name(fwctx->dev));
+ if (!fw)
+ goto fail;
+
+ /* only requested code so done here */
+ if (!(fwctx->flags & BRCMF_FW_REQUEST_NVRAM)) {
+ fwctx->done(fwctx->dev, fw, NULL, 0);
+ kfree(fwctx);
+ return;
+ }
+ fwctx->code = fw;
+ ret = request_firmware_nowait(THIS_MODULE, true, fwctx->nvram_name,
+ fwctx->dev, GFP_KERNEL, fwctx,
+ brcmf_fw_request_nvram_done);
+
+ if (!ret)
+ return;
+
+ /* when nvram is optional call .done() callback here */
+ if (fwctx->flags & BRCMF_FW_REQ_NV_OPTIONAL) {
+ fwctx->done(fwctx->dev, fw, NULL, 0);
+ kfree(fwctx);
+ return;
+ }
+
+ /* failed nvram request */
+ release_firmware(fw);
+fail:
+ brcmf_dbg(TRACE, "failed: dev=%s\n", dev_name(fwctx->dev));
+ device_release_driver(fwctx->dev);
+ kfree(fwctx);
+}
+
+int brcmf_fw_get_firmwares(struct device *dev, u16 flags,
+ const char *code, const char *nvram,
+ void (*fw_cb)(struct device *dev,
+ const struct firmware *fw,
+ void *nvram_image, u32 nvram_len))
+{
+ struct brcmf_fw *fwctx;
+
+ brcmf_dbg(TRACE, "enter: dev=%s\n", dev_name(dev));
+ if (!fw_cb || !code)
+ return -EINVAL;
+
+ if ((flags & BRCMF_FW_REQUEST_NVRAM) && !nvram)
+ return -EINVAL;
+
+ fwctx = kzalloc(sizeof(*fwctx), GFP_KERNEL);
+ if (!fwctx)
+ return -ENOMEM;
+
+ fwctx->dev = dev;
+ fwctx->flags = flags;
+ fwctx->done = fw_cb;
+ if (flags & BRCMF_FW_REQUEST_NVRAM)
+ fwctx->nvram_name = nvram;
+
+ return request_firmware_nowait(THIS_MODULE, true, code, dev,
+ GFP_KERNEL, fwctx,
+ brcmf_fw_request_code_done);
+}
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/nvram.h b/drivers/net/wireless/brcm80211/brcmfmac/firmware.h
index d454580928c9..6431bfd7afff 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/nvram.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/firmware.h
@@ -13,12 +13,24 @@
* OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
* CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
-#ifndef BRCMFMAC_NVRAM_H
-#define BRCMFMAC_NVRAM_H
+#ifndef BRCMFMAC_FIRMWARE_H
+#define BRCMFMAC_FIRMWARE_H
+#define BRCMF_FW_REQUEST 0x000F
+#define BRCMF_FW_REQUEST_NVRAM 0x0001
+#define BRCMF_FW_REQ_FLAGS 0x00F0
+#define BRCMF_FW_REQ_NV_OPTIONAL 0x0010
-void *brcmf_nvram_strip(const struct firmware *nv, u32 *new_length);
-void brcmf_nvram_free(void *nvram);
-
+void brcmf_fw_nvram_free(void *nvram);
+/*
+ * Request firmware(s) asynchronously. When the asynchronous request
+ * fails it will not use the callback, but call device_release_driver()
+ * instead which will call the driver .remove() callback.
+ */
+int brcmf_fw_get_firmwares(struct device *dev, u16 flags,
+ const char *code, const char *nvram,
+ void (*fw_cb)(struct device *dev,
+ const struct firmware *fw,
+ void *nvram_image, u32 nvram_len));
-#endif /* BRCMFMAC_NVRAM_H */
+#endif /* BRCMFMAC_FIRMWARE_H */
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/fwil_types.h b/drivers/net/wireless/brcm80211/brcmfmac/fwil_types.h
index 614e4888504f..2bc68a2137fc 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/fwil_types.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/fwil_types.h
@@ -53,6 +53,14 @@
#define BRCMF_OBSS_COEX_OFF 0
#define BRCMF_OBSS_COEX_ON 1
+/* join preference types for join_pref iovar */
+enum brcmf_join_pref_types {
+ BRCMF_JOIN_PREF_RSSI = 1,
+ BRCMF_JOIN_PREF_WPA,
+ BRCMF_JOIN_PREF_BAND,
+ BRCMF_JOIN_PREF_RSSI_DELTA,
+};
+
enum brcmf_fil_p2p_if_types {
BRCMF_FIL_P2P_IF_CLIENT,
BRCMF_FIL_P2P_IF_GO,
@@ -282,6 +290,22 @@ struct brcmf_assoc_params_le {
__le16 chanspec_list[1];
};
+/**
+ * struct join_pref params - parameters for preferred join selection.
+ *
+ * @type: preference type (see enum brcmf_join_pref_types).
+ * @len: length of bytes following (currently always 2).
+ * @rssi_gain: signal gain for selection (only when @type is RSSI_DELTA).
+ * @band: band to which selection preference applies.
+ * This is used if @type is BAND or RSSI_DELTA.
+ */
+struct brcmf_join_pref_params {
+ u8 type;
+ u8 len;
+ u8 rssi_gain;
+ u8 band;
+};
+
/* used for join with or without a specific bssid and channel list */
struct brcmf_join_params {
struct brcmf_ssid_le ssid_le;
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/fwsignal.c b/drivers/net/wireless/brcm80211/brcmfmac/fwsignal.c
index c3e7d76dbf35..699908de314a 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/fwsignal.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/fwsignal.c
@@ -476,6 +476,7 @@ struct brcmf_fws_info {
bool bus_flow_blocked;
bool creditmap_received;
u8 mode;
+ bool avoid_queueing;
};
/*
@@ -1369,13 +1370,12 @@ done:
}
static int brcmf_fws_txstatus_suppressed(struct brcmf_fws_info *fws, int fifo,
- struct sk_buff *skb, u32 genbit,
- u16 seq)
+ struct sk_buff *skb, u8 ifidx,
+ u32 genbit, u16 seq)
{
struct brcmf_fws_mac_descriptor *entry = brcmf_skbcb(skb)->mac;
u32 hslot;
int ret;
- u8 ifidx;
hslot = brcmf_skb_htod_tag_get_field(skb, HSLOT);
@@ -1389,29 +1389,21 @@ static int brcmf_fws_txstatus_suppressed(struct brcmf_fws_info *fws, int fifo,
entry->generation = genbit;
- ret = brcmf_proto_hdrpull(fws->drvr, false, &ifidx, skb);
- if (ret == 0) {
- brcmf_skb_htod_tag_set_field(skb, GENERATION, genbit);
- brcmf_skbcb(skb)->htod_seq = seq;
- if (brcmf_skb_htod_seq_get_field(skb, FROMFW)) {
- brcmf_skb_htod_seq_set_field(skb, FROMDRV, 1);
- brcmf_skb_htod_seq_set_field(skb, FROMFW, 0);
- } else {
- brcmf_skb_htod_seq_set_field(skb, FROMDRV, 0);
- }
- ret = brcmf_fws_enq(fws, BRCMF_FWS_SKBSTATE_SUPPRESSED, fifo,
- skb);
+ brcmf_skb_htod_tag_set_field(skb, GENERATION, genbit);
+ brcmf_skbcb(skb)->htod_seq = seq;
+ if (brcmf_skb_htod_seq_get_field(skb, FROMFW)) {
+ brcmf_skb_htod_seq_set_field(skb, FROMDRV, 1);
+ brcmf_skb_htod_seq_set_field(skb, FROMFW, 0);
+ } else {
+ brcmf_skb_htod_seq_set_field(skb, FROMDRV, 0);
}
+ ret = brcmf_fws_enq(fws, BRCMF_FWS_SKBSTATE_SUPPRESSED, fifo, skb);
if (ret != 0) {
- /* suppress q is full or hdrpull failed, drop this packet */
- brcmf_fws_hanger_poppkt(&fws->hanger, hslot, &skb,
- true);
+ /* suppress q is full drop this packet */
+ brcmf_fws_hanger_poppkt(&fws->hanger, hslot, &skb, true);
} else {
- /*
- * Mark suppressed to avoid a double free during
- * wlfc cleanup
- */
+ /* Mark suppressed to avoid a double free during wlfc cleanup */
brcmf_fws_hanger_mark_suppressed(&fws->hanger, hslot);
}
@@ -1428,6 +1420,7 @@ brcmf_fws_txs_process(struct brcmf_fws_info *fws, u8 flags, u32 hslot,
struct sk_buff *skb;
struct brcmf_skbuff_cb *skcb;
struct brcmf_fws_mac_descriptor *entry = NULL;
+ u8 ifidx;
brcmf_dbg(DATA, "flags %d\n", flags);
@@ -1476,12 +1469,15 @@ brcmf_fws_txs_process(struct brcmf_fws_info *fws, u8 flags, u32 hslot,
}
brcmf_fws_macdesc_return_req_credit(skb);
+ if (brcmf_proto_hdrpull(fws->drvr, false, &ifidx, skb)) {
+ brcmu_pkt_buf_free_skb(skb);
+ return -EINVAL;
+ }
if (!remove_from_hanger)
- ret = brcmf_fws_txstatus_suppressed(fws, fifo, skb, genbit,
- seq);
-
+ ret = brcmf_fws_txstatus_suppressed(fws, fifo, skb, ifidx,
+ genbit, seq);
if (remove_from_hanger || ret)
- brcmf_txfinalize(fws->drvr, skb, true);
+ brcmf_txfinalize(fws->drvr, skb, ifidx, true);
return 0;
}
@@ -1868,7 +1864,7 @@ int brcmf_fws_process_skb(struct brcmf_if *ifp, struct sk_buff *skb)
struct ethhdr *eh = (struct ethhdr *)(skb->data);
int fifo = BRCMF_FWS_FIFO_BCMC;
bool multicast = is_multicast_ether_addr(eh->h_dest);
- bool pae = eh->h_proto == htons(ETH_P_PAE);
+ int rc = 0;
brcmf_dbg(DATA, "tx proto=0x%X\n", ntohs(eh->h_proto));
/* determine the priority */
@@ -1876,8 +1872,13 @@ int brcmf_fws_process_skb(struct brcmf_if *ifp, struct sk_buff *skb)
skb->priority = cfg80211_classify8021d(skb, NULL);
drvr->tx_multicast += !!multicast;
- if (pae)
- atomic_inc(&ifp->pend_8021x_cnt);
+
+ if (fws->avoid_queueing) {
+ rc = brcmf_proto_txdata(drvr, ifp->ifidx, 0, skb);
+ if (rc < 0)
+ brcmf_txfinalize(drvr, skb, ifp->ifidx, false);
+ return rc;
+ }
/* set control buffer information */
skcb->if_flags = 0;
@@ -1899,15 +1900,12 @@ int brcmf_fws_process_skb(struct brcmf_if *ifp, struct sk_buff *skb)
brcmf_fws_schedule_deq(fws);
} else {
brcmf_err("drop skb: no hanger slot\n");
- if (pae) {
- atomic_dec(&ifp->pend_8021x_cnt);
- if (waitqueue_active(&ifp->pend_8021x_wait))
- wake_up(&ifp->pend_8021x_wait);
- }
- brcmu_pkt_buf_free_skb(skb);
+ brcmf_txfinalize(drvr, skb, ifp->ifidx, false);
+ rc = -ENOMEM;
}
brcmf_fws_unlock(fws);
- return 0;
+
+ return rc;
}
void brcmf_fws_reset_interface(struct brcmf_if *ifp)
@@ -1982,7 +1980,8 @@ static void brcmf_fws_dequeue_worker(struct work_struct *worker)
ret = brcmf_proto_txdata(drvr, ifidx, 0, skb);
brcmf_fws_lock(fws);
if (ret < 0)
- brcmf_txfinalize(drvr, skb, false);
+ brcmf_txfinalize(drvr, skb, ifidx,
+ false);
if (fws->bus_flow_blocked)
break;
}
@@ -2039,6 +2038,13 @@ int brcmf_fws_init(struct brcmf_pub *drvr)
fws->drvr = drvr;
fws->fcmode = fcmode;
+ if ((drvr->bus_if->always_use_fws_queue == false) &&
+ (fcmode == BRCMF_FWS_FCMODE_NONE)) {
+ fws->avoid_queueing = true;
+ brcmf_dbg(INFO, "FWS queueing will be avoided\n");
+ return 0;
+ }
+
fws->fws_wq = create_singlethread_workqueue("brcmf_fws_wq");
if (fws->fws_wq == NULL) {
brcmf_err("workqueue creation failed\n");
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/nvram.c b/drivers/net/wireless/brcm80211/brcmfmac/nvram.c
deleted file mode 100644
index d5ef86db631b..000000000000
--- a/drivers/net/wireless/brcm80211/brcmfmac/nvram.c
+++ /dev/null
@@ -1,94 +0,0 @@
-/*
- * Copyright (c) 2013 Broadcom Corporation
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
- * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
- * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
- * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
- */
-
-#include <linux/kernel.h>
-#include <linux/slab.h>
-#include <linux/firmware.h>
-
-#include "nvram.h"
-
-/* brcmf_nvram_strip :Takes a buffer of "<var>=<value>\n" lines read from a file
- * and ending in a NUL. Removes carriage returns, empty lines, comment lines,
- * and converts newlines to NULs. Shortens buffer as needed and pads with NULs.
- * End of buffer is completed with token identifying length of buffer.
- */
-void *brcmf_nvram_strip(const struct firmware *nv, u32 *new_length)
-{
- u8 *nvram;
- u32 i;
- u32 len;
- u32 column;
- u8 val;
- bool comment;
- u32 token;
- __le32 token_le;
-
- /* Alloc for extra 0 byte + roundup by 4 + length field */
- nvram = kmalloc(nv->size + 1 + 3 + sizeof(token_le), GFP_KERNEL);
- if (!nvram)
- return NULL;
-
- len = 0;
- column = 0;
- comment = false;
- for (i = 0; i < nv->size; i++) {
- val = nv->data[i];
- if (val == 0)
- break;
- if (val == '\r')
- continue;
- if (comment && (val != '\n'))
- continue;
- comment = false;
- if (val == '#') {
- comment = true;
- continue;
- }
- if (val == '\n') {
- if (column == 0)
- continue;
- nvram[len] = 0;
- len++;
- column = 0;
- continue;
- }
- nvram[len] = val;
- len++;
- column++;
- }
- column = len;
- *new_length = roundup(len + 1, 4);
- while (column != *new_length) {
- nvram[column] = 0;
- column++;
- }
-
- token = *new_length / 4;
- token = (~token << 16) | (token & 0x0000FFFF);
- token_le = cpu_to_le32(token);
-
- memcpy(&nvram[*new_length], &token_le, sizeof(token_le));
- *new_length += sizeof(token_le);
-
- return nvram;
-}
-
-void brcmf_nvram_free(void *nvram)
-{
- kfree(nvram);
-}
-
-
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/usb.c b/drivers/net/wireless/brcm80211/brcmfmac/usb.c
index 24f65cd53859..6db51a666f61 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/usb.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/usb.c
@@ -25,6 +25,7 @@
#include <dhd_bus.h>
#include <dhd_dbg.h>
+#include "firmware.h"
#include "usb_rdl.h"
#include "usb.h"
@@ -61,12 +62,6 @@ struct brcmf_usb_image {
u8 *image;
int image_len;
};
-static struct list_head fw_image_list;
-
-struct intr_transfer_buf {
- u32 notification;
- u32 reserved;
-};
struct brcmf_usbdev_info {
struct brcmf_usbdev bus_pub; /* MUST BE FIRST */
@@ -75,7 +70,7 @@ struct brcmf_usbdev_info {
struct list_head rx_postq;
struct list_head tx_freeq;
struct list_head tx_postq;
- uint rx_pipe, tx_pipe, intr_pipe, rx_pipe2;
+ uint rx_pipe, tx_pipe, rx_pipe2;
int rx_low_watermark;
int tx_low_watermark;
@@ -87,7 +82,7 @@ struct brcmf_usbdev_info {
struct brcmf_usbreq *tx_reqs;
struct brcmf_usbreq *rx_reqs;
- u8 *image; /* buffer for combine fw and nvram */
+ const u8 *image; /* buffer for combine fw and nvram */
int image_len;
struct usb_device *usbdev;
@@ -104,10 +99,6 @@ struct brcmf_usbdev_info {
ulong ctl_op;
struct urb *bulk_urb; /* used for FW download */
- struct urb *intr_urb; /* URB for interrupt endpoint */
- int intr_size; /* Size of interrupt message */
- int interval; /* Interrupt polling interval */
- struct intr_transfer_buf intr; /* Data buffer for interrupt endpoint */
};
static void brcmf_usb_rx_refill(struct brcmf_usbdev_info *devinfo,
@@ -531,39 +522,6 @@ brcmf_usb_state_change(struct brcmf_usbdev_info *devinfo, int state)
}
}
-static void
-brcmf_usb_intr_complete(struct urb *urb)
-{
- struct brcmf_usbdev_info *devinfo =
- (struct brcmf_usbdev_info *)urb->context;
- int err;
-
- brcmf_dbg(USB, "Enter, urb->status=%d\n", urb->status);
-
- if (devinfo == NULL)
- return;
-
- if (unlikely(urb->status)) {
- if (urb->status == -ENOENT ||
- urb->status == -ESHUTDOWN ||
- urb->status == -ENODEV) {
- brcmf_usb_state_change(devinfo,
- BRCMFMAC_USB_STATE_DOWN);
- }
- }
-
- if (devinfo->bus_pub.state == BRCMFMAC_USB_STATE_DOWN) {
- brcmf_err("intr cb when DBUS down, ignoring\n");
- return;
- }
-
- if (devinfo->bus_pub.state == BRCMFMAC_USB_STATE_UP) {
- err = usb_submit_urb(devinfo->intr_urb, GFP_ATOMIC);
- if (err)
- brcmf_err("usb_submit_urb, err=%d\n", err);
- }
-}
-
static int brcmf_usb_tx(struct device *dev, struct sk_buff *skb)
{
struct brcmf_usbdev_info *devinfo = brcmf_usb_get_businfo(dev);
@@ -619,7 +577,6 @@ static int brcmf_usb_up(struct device *dev)
{
struct brcmf_usbdev_info *devinfo = brcmf_usb_get_businfo(dev);
u16 ifnum;
- int ret;
brcmf_dbg(USB, "Enter\n");
if (devinfo->bus_pub.state == BRCMFMAC_USB_STATE_UP)
@@ -628,23 +585,6 @@ static int brcmf_usb_up(struct device *dev)
/* Success, indicate devinfo is fully up */
brcmf_usb_state_change(devinfo, BRCMFMAC_USB_STATE_UP);
- if (devinfo->intr_urb) {
- usb_fill_int_urb(devinfo->intr_urb, devinfo->usbdev,
- devinfo->intr_pipe,
- &devinfo->intr,
- devinfo->intr_size,
- (usb_complete_t)brcmf_usb_intr_complete,
- devinfo,
- devinfo->interval);
-
- ret = usb_submit_urb(devinfo->intr_urb, GFP_ATOMIC);
- if (ret) {
- brcmf_err("USB_SUBMIT_URB failed with status %d\n",
- ret);
- return -EINVAL;
- }
- }
-
if (devinfo->ctl_urb) {
devinfo->ctl_in_pipe = usb_rcvctrlpipe(devinfo->usbdev, 0);
devinfo->ctl_out_pipe = usb_sndctrlpipe(devinfo->usbdev, 0);
@@ -681,8 +621,6 @@ static void brcmf_usb_down(struct device *dev)
return;
brcmf_usb_state_change(devinfo, BRCMFMAC_USB_STATE_DOWN);
- if (devinfo->intr_urb)
- usb_kill_urb(devinfo->intr_urb);
if (devinfo->ctl_urb)
usb_kill_urb(devinfo->ctl_urb);
@@ -1021,7 +959,7 @@ brcmf_usb_fw_download(struct brcmf_usbdev_info *devinfo)
}
err = brcmf_usb_dlstart(devinfo,
- devinfo->image, devinfo->image_len);
+ (u8 *)devinfo->image, devinfo->image_len);
if (err == 0)
err = brcmf_usb_dlrun(devinfo);
return err;
@@ -1036,7 +974,6 @@ static void brcmf_usb_detach(struct brcmf_usbdev_info *devinfo)
brcmf_usb_free_q(&devinfo->rx_freeq, false);
brcmf_usb_free_q(&devinfo->tx_freeq, false);
- usb_free_urb(devinfo->intr_urb);
usb_free_urb(devinfo->ctl_urb);
usb_free_urb(devinfo->bulk_urb);
@@ -1080,68 +1017,20 @@ static int check_file(const u8 *headers)
return -1;
}
-static int brcmf_usb_get_fw(struct brcmf_usbdev_info *devinfo)
+static const char *brcmf_usb_get_fwname(struct brcmf_usbdev_info *devinfo)
{
- s8 *fwname;
- const struct firmware *fw;
- struct brcmf_usb_image *fw_image;
- int err;
-
- brcmf_dbg(USB, "Enter\n");
switch (devinfo->bus_pub.devid) {
case 43143:
- fwname = BRCMF_USB_43143_FW_NAME;
- break;
+ return BRCMF_USB_43143_FW_NAME;
case 43235:
case 43236:
case 43238:
- fwname = BRCMF_USB_43236_FW_NAME;
- break;
+ return BRCMF_USB_43236_FW_NAME;
case 43242:
- fwname = BRCMF_USB_43242_FW_NAME;
- break;
+ return BRCMF_USB_43242_FW_NAME;
default:
- return -EINVAL;
- break;
- }
- brcmf_dbg(USB, "Loading FW %s\n", fwname);
- list_for_each_entry(fw_image, &fw_image_list, list) {
- if (fw_image->fwname == fwname) {
- devinfo->image = fw_image->image;
- devinfo->image_len = fw_image->image_len;
- return 0;
- }
- }
- /* fw image not yet loaded. Load it now and add to list */
- err = request_firmware(&fw, fwname, devinfo->dev);
- if (!fw) {
- brcmf_err("fail to request firmware %s\n", fwname);
- return err;
- }
- if (check_file(fw->data) < 0) {
- brcmf_err("invalid firmware %s\n", fwname);
- return -EINVAL;
+ return NULL;
}
-
- fw_image = kzalloc(sizeof(*fw_image), GFP_ATOMIC);
- if (!fw_image)
- return -ENOMEM;
- INIT_LIST_HEAD(&fw_image->list);
- list_add_tail(&fw_image->list, &fw_image_list);
- fw_image->fwname = fwname;
- fw_image->image = vmalloc(fw->size);
- if (!fw_image->image)
- return -ENOMEM;
-
- memcpy(fw_image->image, fw->data, fw->size);
- fw_image->image_len = fw->size;
-
- release_firmware(fw);
-
- devinfo->image = fw_image->image;
- devinfo->image_len = fw_image->image_len;
-
- return 0;
}
@@ -1186,11 +1075,6 @@ struct brcmf_usbdev *brcmf_usb_attach(struct brcmf_usbdev_info *devinfo,
goto error;
devinfo->tx_freecount = ntxq;
- devinfo->intr_urb = usb_alloc_urb(0, GFP_ATOMIC);
- if (!devinfo->intr_urb) {
- brcmf_err("usb_alloc_urb (intr) failed\n");
- goto error;
- }
devinfo->ctl_urb = usb_alloc_urb(0, GFP_ATOMIC);
if (!devinfo->ctl_urb) {
brcmf_err("usb_alloc_urb (ctl) failed\n");
@@ -1202,16 +1086,6 @@ struct brcmf_usbdev *brcmf_usb_attach(struct brcmf_usbdev_info *devinfo,
goto error;
}
- if (!brcmf_usb_dlneeded(devinfo))
- return &devinfo->bus_pub;
-
- brcmf_dbg(USB, "Start fw downloading\n");
- if (brcmf_usb_get_fw(devinfo))
- goto error;
-
- if (brcmf_usb_fw_download(devinfo))
- goto error;
-
return &devinfo->bus_pub;
error:
@@ -1222,18 +1096,77 @@ error:
static struct brcmf_bus_ops brcmf_usb_bus_ops = {
.txdata = brcmf_usb_tx,
- .init = brcmf_usb_up,
.stop = brcmf_usb_down,
.txctl = brcmf_usb_tx_ctlpkt,
.rxctl = brcmf_usb_rx_ctlpkt,
};
+static int brcmf_usb_bus_setup(struct brcmf_usbdev_info *devinfo)
+{
+ int ret;
+
+ /* Attach to the common driver interface */
+ ret = brcmf_attach(devinfo->dev);
+ if (ret) {
+ brcmf_err("brcmf_attach failed\n");
+ return ret;
+ }
+
+ ret = brcmf_usb_up(devinfo->dev);
+ if (ret)
+ goto fail;
+
+ ret = brcmf_bus_start(devinfo->dev);
+ if (ret)
+ goto fail;
+
+ return 0;
+fail:
+ brcmf_detach(devinfo->dev);
+ return ret;
+}
+
+static void brcmf_usb_probe_phase2(struct device *dev,
+ const struct firmware *fw,
+ void *nvram, u32 nvlen)
+{
+ struct brcmf_bus *bus = dev_get_drvdata(dev);
+ struct brcmf_usbdev_info *devinfo;
+ int ret;
+
+ brcmf_dbg(USB, "Start fw downloading\n");
+ ret = check_file(fw->data);
+ if (ret < 0) {
+ brcmf_err("invalid firmware\n");
+ release_firmware(fw);
+ goto error;
+ }
+
+ devinfo = bus->bus_priv.usb->devinfo;
+ devinfo->image = fw->data;
+ devinfo->image_len = fw->size;
+
+ ret = brcmf_usb_fw_download(devinfo);
+ release_firmware(fw);
+ if (ret)
+ goto error;
+
+ ret = brcmf_usb_bus_setup(devinfo);
+ if (ret)
+ goto error;
+
+ return;
+error:
+ brcmf_dbg(TRACE, "failed: dev=%s, err=%d\n", dev_name(dev), ret);
+ device_release_driver(dev);
+}
+
static int brcmf_usb_probe_cb(struct brcmf_usbdev_info *devinfo)
{
struct brcmf_bus *bus = NULL;
struct brcmf_usbdev *bus_pub = NULL;
- int ret;
struct device *dev = devinfo->dev;
+ int ret;
brcmf_dbg(USB, "Enter\n");
bus_pub = brcmf_usb_attach(devinfo, BRCMF_USB_NRXQ, BRCMF_USB_NTXQ);
@@ -1254,22 +1187,18 @@ static int brcmf_usb_probe_cb(struct brcmf_usbdev_info *devinfo)
bus->chip = bus_pub->devid;
bus->chiprev = bus_pub->chiprev;
bus->proto_type = BRCMF_PROTO_BCDC;
+ bus->always_use_fws_queue = true;
- /* Attach to the common driver interface */
- ret = brcmf_attach(dev);
- if (ret) {
- brcmf_err("brcmf_attach failed\n");
- goto fail;
- }
-
- ret = brcmf_bus_start(dev);
- if (ret) {
- brcmf_err("dongle is not responding\n");
- brcmf_detach(dev);
- goto fail;
+ if (!brcmf_usb_dlneeded(devinfo)) {
+ ret = brcmf_usb_bus_setup(devinfo);
+ if (ret)
+ goto fail;
}
-
+ /* request firmware here */
+ brcmf_fw_get_firmwares(dev, 0, brcmf_usb_get_fwname(devinfo), NULL,
+ brcmf_usb_probe_phase2);
return 0;
+
fail:
/* Release resources in reverse order */
kfree(bus);
@@ -1357,9 +1286,6 @@ brcmf_usb_probe(struct usb_interface *intf, const struct usb_device_id *id)
goto fail;
}
- endpoint_num = endpoint->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK;
- devinfo->intr_pipe = usb_rcvintpipe(usb, endpoint_num);
-
devinfo->rx_pipe = 0;
devinfo->rx_pipe2 = 0;
devinfo->tx_pipe = 0;
@@ -1391,16 +1317,9 @@ brcmf_usb_probe(struct usb_interface *intf, const struct usb_device_id *id)
}
}
- /* Allocate interrupt URB and data buffer */
- /* RNDIS says 8-byte intr, our old drivers used 4-byte */
- if (IFEPDESC(usb, CONTROL_IF, 0).wMaxPacketSize == cpu_to_le16(16))
- devinfo->intr_size = 8;
- else
- devinfo->intr_size = 4;
-
- devinfo->interval = IFEPDESC(usb, CONTROL_IF, 0).bInterval;
-
- if (usb->speed == USB_SPEED_HIGH)
+ if (usb->speed == USB_SPEED_SUPER)
+ brcmf_dbg(USB, "Broadcom super speed USB wireless device detected\n");
+ else if (usb->speed == USB_SPEED_HIGH)
brcmf_dbg(USB, "Broadcom high speed USB wireless device detected\n");
else
brcmf_dbg(USB, "Broadcom full speed USB wireless device detected\n");
@@ -1455,23 +1374,18 @@ static int brcmf_usb_resume(struct usb_interface *intf)
struct brcmf_usbdev_info *devinfo = brcmf_usb_get_businfo(&usb->dev);
brcmf_dbg(USB, "Enter\n");
- if (!brcmf_attach(devinfo->dev))
- return brcmf_bus_start(&usb->dev);
-
- return 0;
+ return brcmf_usb_bus_setup(devinfo);
}
static int brcmf_usb_reset_resume(struct usb_interface *intf)
{
struct usb_device *usb = interface_to_usbdev(intf);
struct brcmf_usbdev_info *devinfo = brcmf_usb_get_businfo(&usb->dev);
-
brcmf_dbg(USB, "Enter\n");
- if (!brcmf_usb_fw_download(devinfo))
- return brcmf_usb_resume(intf);
-
- return -EIO;
+ return brcmf_fw_get_firmwares(&usb->dev, 0,
+ brcmf_usb_get_fwname(devinfo), NULL,
+ brcmf_usb_probe_phase2);
}
#define BRCMF_USB_VENDOR_ID_BROADCOM 0x0a5c
@@ -1506,16 +1420,6 @@ static struct usb_driver brcmf_usbdrvr = {
.disable_hub_initiated_lpm = 1,
};
-static void brcmf_release_fw(struct list_head *q)
-{
- struct brcmf_usb_image *fw_image, *next;
-
- list_for_each_entry_safe(fw_image, next, q, list) {
- vfree(fw_image->image);
- list_del_init(&fw_image->list);
- }
-}
-
static int brcmf_usb_reset_device(struct device *dev, void *notused)
{
/* device past is the usb interface so we
@@ -1534,12 +1438,10 @@ void brcmf_usb_exit(void)
ret = driver_for_each_device(drv, NULL, NULL,
brcmf_usb_reset_device);
usb_deregister(&brcmf_usbdrvr);
- brcmf_release_fw(&fw_image_list);
}
void brcmf_usb_register(void)
{
brcmf_dbg(USB, "Enter\n");
- INIT_LIST_HEAD(&fw_image_list);
usb_register(&brcmf_usbdrvr);
}
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c b/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c
index be1985296bdc..d8fa276e368b 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c
@@ -221,9 +221,9 @@ static const struct ieee80211_regdomain brcmf_regdom = {
*/
REG_RULE(2484-10, 2484+10, 20, 6, 20, 0),
/* IEEE 802.11a, channel 36..64 */
- REG_RULE(5150-10, 5350+10, 40, 6, 20, 0),
+ REG_RULE(5150-10, 5350+10, 80, 6, 20, 0),
/* IEEE 802.11a, channel 100..165 */
- REG_RULE(5470-10, 5850+10, 40, 6, 20, 0), }
+ REG_RULE(5470-10, 5850+10, 80, 6, 20, 0), }
};
static const u32 __wl_cipher_suites[] = {
@@ -341,6 +341,60 @@ static u8 brcmf_mw_to_qdbm(u16 mw)
return qdbm;
}
+static u16 chandef_to_chanspec(struct brcmu_d11inf *d11inf,
+ struct cfg80211_chan_def *ch)
+{
+ struct brcmu_chan ch_inf;
+ s32 primary_offset;
+
+ brcmf_dbg(TRACE, "chandef: control %d center %d width %d\n",
+ ch->chan->center_freq, ch->center_freq1, ch->width);
+ ch_inf.chnum = ieee80211_frequency_to_channel(ch->center_freq1);
+ primary_offset = ch->center_freq1 - ch->chan->center_freq;
+ switch (ch->width) {
+ case NL80211_CHAN_WIDTH_20:
+ ch_inf.bw = BRCMU_CHAN_BW_20;
+ WARN_ON(primary_offset != 0);
+ break;
+ case NL80211_CHAN_WIDTH_40:
+ ch_inf.bw = BRCMU_CHAN_BW_40;
+ if (primary_offset < 0)
+ ch_inf.sb = BRCMU_CHAN_SB_U;
+ else
+ ch_inf.sb = BRCMU_CHAN_SB_L;
+ break;
+ case NL80211_CHAN_WIDTH_80:
+ ch_inf.bw = BRCMU_CHAN_BW_80;
+ if (primary_offset < 0) {
+ if (primary_offset < -CH_10MHZ_APART)
+ ch_inf.sb = BRCMU_CHAN_SB_UU;
+ else
+ ch_inf.sb = BRCMU_CHAN_SB_UL;
+ } else {
+ if (primary_offset > CH_10MHZ_APART)
+ ch_inf.sb = BRCMU_CHAN_SB_LL;
+ else
+ ch_inf.sb = BRCMU_CHAN_SB_LU;
+ }
+ break;
+ default:
+ WARN_ON_ONCE(1);
+ }
+ switch (ch->chan->band) {
+ case IEEE80211_BAND_2GHZ:
+ ch_inf.band = BRCMU_CHAN_BAND_2G;
+ break;
+ case IEEE80211_BAND_5GHZ:
+ ch_inf.band = BRCMU_CHAN_BAND_5G;
+ break;
+ default:
+ WARN_ON_ONCE(1);
+ }
+ d11inf->encchspec(&ch_inf);
+
+ return ch_inf.chspec;
+}
+
u16 channel_to_chanspec(struct brcmu_d11inf *d11inf,
struct ieee80211_channel *ch)
{
@@ -586,6 +640,9 @@ s32 brcmf_notify_escan_complete(struct brcmf_cfg80211_info *cfg,
if (err)
brcmf_err("Scan abort failed\n");
}
+
+ brcmf_set_mpc(ifp, 1);
+
/*
* e-scan can be initiated by scheduled scan
* which takes precedence.
@@ -595,12 +652,10 @@ s32 brcmf_notify_escan_complete(struct brcmf_cfg80211_info *cfg,
cfg->sched_escan = false;
if (!aborted)
cfg80211_sched_scan_results(cfg_to_wiphy(cfg));
- brcmf_set_mpc(ifp, 1);
} else if (scan_request) {
brcmf_dbg(SCAN, "ESCAN Completed scan: %s\n",
aborted ? "Aborted" : "Done");
cfg80211_scan_done(scan_request, aborted);
- brcmf_set_mpc(ifp, 1);
}
if (!test_and_clear_bit(BRCMF_SCAN_STATUS_BUSY, &cfg->scan_status))
brcmf_dbg(SCAN, "Scan complete, probably P2P scan\n");
@@ -1236,8 +1291,8 @@ brcmf_cfg80211_join_ibss(struct wiphy *wiphy, struct net_device *ndev,
params->chandef.chan->center_freq);
if (params->channel_fixed) {
/* adding chanspec */
- chanspec = channel_to_chanspec(&cfg->d11inf,
- params->chandef.chan);
+ chanspec = chandef_to_chanspec(&cfg->d11inf,
+ &params->chandef);
join_params.params_le.chanspec_list[0] =
cpu_to_le16(chanspec);
join_params.params_le.chanspec_num = cpu_to_le32(1);
@@ -2182,7 +2237,7 @@ brcmf_cfg80211_config_default_mgmt_key(struct wiphy *wiphy,
static s32
brcmf_cfg80211_get_station(struct wiphy *wiphy, struct net_device *ndev,
- u8 *mac, struct station_info *sinfo)
+ const u8 *mac, struct station_info *sinfo)
{
struct brcmf_if *ifp = netdev_priv(ndev);
struct brcmf_cfg80211_profile *profile = &ifp->vif->profile;
@@ -3124,7 +3179,7 @@ brcmf_cfg80211_sched_scan_start(struct wiphy *wiphy,
}
if (!request->n_ssids || !request->n_match_sets) {
- brcmf_err("Invalid sched scan req!! n_ssids:%d\n",
+ brcmf_dbg(SCAN, "Invalid sched scan req!! n_ssids:%d\n",
request->n_ssids);
return -EINVAL;
}
@@ -3734,23 +3789,6 @@ brcmf_config_ap_mgmt_ie(struct brcmf_cfg80211_vif *vif,
}
static s32
-brcmf_cfg80211_set_channel(struct brcmf_cfg80211_info *cfg,
- struct brcmf_if *ifp,
- struct ieee80211_channel *channel)
-{
- u16 chanspec;
- s32 err;
-
- brcmf_dbg(TRACE, "band=%d, center_freq=%d\n", channel->band,
- channel->center_freq);
-
- chanspec = channel_to_chanspec(&cfg->d11inf, channel);
- err = brcmf_fil_iovar_int_set(ifp, "chanspec", chanspec);
-
- return err;
-}
-
-static s32
brcmf_cfg80211_start_ap(struct wiphy *wiphy, struct net_device *ndev,
struct cfg80211_ap_settings *settings)
{
@@ -3765,11 +3803,12 @@ brcmf_cfg80211_start_ap(struct wiphy *wiphy, struct net_device *ndev,
struct brcmf_join_params join_params;
enum nl80211_iftype dev_role;
struct brcmf_fil_bss_enable_le bss_enable;
+ u16 chanspec;
- brcmf_dbg(TRACE, "channel_type=%d, beacon_interval=%d, dtim_period=%d,\n",
- cfg80211_get_chandef_type(&settings->chandef),
- settings->beacon_interval,
- settings->dtim_period);
+ brcmf_dbg(TRACE, "ctrlchn=%d, center=%d, bw=%d, beacon_interval=%d, dtim_period=%d,\n",
+ settings->chandef.chan->hw_value,
+ settings->chandef.center_freq1, settings->chandef.width,
+ settings->beacon_interval, settings->dtim_period);
brcmf_dbg(TRACE, "ssid=%s(%zu), auth_type=%d, inactivity_timeout=%d\n",
settings->ssid, settings->ssid_len, settings->auth_type,
settings->inactivity_timeout);
@@ -3826,9 +3865,10 @@ brcmf_cfg80211_start_ap(struct wiphy *wiphy, struct net_device *ndev,
brcmf_config_ap_mgmt_ie(ifp->vif, &settings->beacon);
- err = brcmf_cfg80211_set_channel(cfg, ifp, settings->chandef.chan);
+ chanspec = chandef_to_chanspec(&cfg->d11inf, &settings->chandef);
+ err = brcmf_fil_iovar_int_set(ifp, "chanspec", chanspec);
if (err < 0) {
- brcmf_err("Set Channel failed, %d\n", err);
+ brcmf_err("Set Channel failed: chspec=%d, %d\n", chanspec, err);
goto exit;
}
@@ -3975,7 +4015,7 @@ brcmf_cfg80211_change_beacon(struct wiphy *wiphy, struct net_device *ndev,
static int
brcmf_cfg80211_del_station(struct wiphy *wiphy, struct net_device *ndev,
- u8 *mac)
+ const u8 *mac)
{
struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
struct brcmf_scb_val_le scbval;
@@ -4203,7 +4243,7 @@ static int brcmf_convert_nl80211_tdls_oper(enum nl80211_tdls_operation oper)
}
static int brcmf_cfg80211_tdls_oper(struct wiphy *wiphy,
- struct net_device *ndev, u8 *peer,
+ struct net_device *ndev, const u8 *peer,
enum nl80211_tdls_operation oper)
{
struct brcmf_if *ifp;
@@ -4364,6 +4404,8 @@ static struct wiphy *brcmf_setup_wiphy(struct device *phydev)
WIPHY_FLAG_OFFCHAN_TX |
WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL |
WIPHY_FLAG_SUPPORTS_TDLS;
+ if (!brcmf_roamoff)
+ wiphy->flags |= WIPHY_FLAG_SUPPORTS_FW_ROAM;
wiphy->mgmt_stypes = brcmf_txrx_stypes;
wiphy->max_remain_on_channel_duration = 5000;
brcmf_wiphy_pno_params(wiphy);
@@ -4685,7 +4727,6 @@ brcmf_notify_connect_status(struct brcmf_if *ifp,
struct brcmf_cfg80211_profile *profile = &ifp->vif->profile;
struct ieee80211_channel *chan;
s32 err = 0;
- u16 reason;
if (brcmf_is_apmode(ifp->vif)) {
err = brcmf_notify_connect_status_ap(cfg, ndev, e, data);
@@ -4706,16 +4747,6 @@ brcmf_notify_connect_status(struct brcmf_if *ifp,
brcmf_dbg(CONN, "Linkdown\n");
if (!brcmf_is_ibssmode(ifp->vif)) {
brcmf_bss_connect_done(cfg, ndev, e, false);
- if (test_and_clear_bit(BRCMF_VIF_STATUS_CONNECTED,
- &ifp->vif->sme_state)) {
- reason = 0;
- if (((e->event_code == BRCMF_E_DEAUTH_IND) ||
- (e->event_code == BRCMF_E_DISASSOC_IND)) &&
- (e->reason != WLAN_REASON_UNSPECIFIED))
- reason = e->reason;
- cfg80211_disconnected(ndev, reason, NULL, 0,
- GFP_KERNEL);
- }
}
brcmf_link_down(ifp->vif);
brcmf_init_prof(ndev_to_prof(ndev));
@@ -5215,6 +5246,9 @@ static s32 brcmf_construct_reginfo(struct brcmf_cfg80211_info *cfg,
if (!(bw_cap[band] & WLC_BW_40MHZ_BIT) &&
ch.bw == BRCMU_CHAN_BW_40)
continue;
+ if (!(bw_cap[band] & WLC_BW_80MHZ_BIT) &&
+ ch.bw == BRCMU_CHAN_BW_80)
+ continue;
update = false;
for (j = 0; (j < *n_cnt && (*n_cnt < array_size)); j++) {
if (band_chan_arr[j].hw_value == ch.chnum) {
@@ -5231,10 +5265,13 @@ static s32 brcmf_construct_reginfo(struct brcmf_cfg80211_info *cfg,
ieee80211_channel_to_frequency(ch.chnum, band);
band_chan_arr[index].hw_value = ch.chnum;
- if (ch.bw == BRCMU_CHAN_BW_40) {
- /* assuming the order is HT20, HT40 Upper,
- * HT40 lower from chanspecs
- */
+ /* assuming the chanspecs order is HT20,
+ * HT40 upper, HT40 lower, and VHT80.
+ */
+ if (ch.bw == BRCMU_CHAN_BW_80) {
+ band_chan_arr[index].flags &=
+ ~IEEE80211_CHAN_NO_80MHZ;
+ } else if (ch.bw == BRCMU_CHAN_BW_40) {
ht40_flag = band_chan_arr[index].flags &
IEEE80211_CHAN_NO_HT40;
if (ch.sb == BRCMU_CHAN_SB_U) {
@@ -5255,8 +5292,13 @@ static s32 brcmf_construct_reginfo(struct brcmf_cfg80211_info *cfg,
IEEE80211_CHAN_NO_HT40MINUS;
}
} else {
+ /* disable other bandwidths for now as mentioned
+ * order assure they are enabled for subsequent
+ * chanspecs.
+ */
band_chan_arr[index].flags =
- IEEE80211_CHAN_NO_HT40;
+ IEEE80211_CHAN_NO_HT40 |
+ IEEE80211_CHAN_NO_80MHZ;
ch.bw = BRCMU_CHAN_BW_20;
cfg->d11inf.encchspec(&ch);
channel = ch.chspec;
@@ -5323,13 +5365,63 @@ static void brcmf_get_bwcap(struct brcmf_if *ifp, u32 bw_cap[])
}
}
+static void brcmf_update_ht_cap(struct ieee80211_supported_band *band,
+ u32 bw_cap[2], u32 nchain)
+{
+ band->ht_cap.ht_supported = true;
+ if (bw_cap[band->band] & WLC_BW_40MHZ_BIT) {
+ band->ht_cap.cap |= IEEE80211_HT_CAP_SGI_40;
+ band->ht_cap.cap |= IEEE80211_HT_CAP_SUP_WIDTH_20_40;
+ }
+ band->ht_cap.cap |= IEEE80211_HT_CAP_SGI_20;
+ band->ht_cap.cap |= IEEE80211_HT_CAP_DSSSCCK40;
+ band->ht_cap.ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K;
+ band->ht_cap.ampdu_density = IEEE80211_HT_MPDU_DENSITY_16;
+ memset(band->ht_cap.mcs.rx_mask, 0xff, nchain);
+ band->ht_cap.mcs.tx_params = IEEE80211_HT_MCS_TX_DEFINED;
+}
+
+static __le16 brcmf_get_mcs_map(u32 nchain, enum ieee80211_vht_mcs_support supp)
+{
+ u16 mcs_map;
+ int i;
+
+ for (i = 0, mcs_map = 0xFFFF; i < nchain; i++)
+ mcs_map = (mcs_map << 2) | supp;
+
+ return cpu_to_le16(mcs_map);
+}
+
+static void brcmf_update_vht_cap(struct ieee80211_supported_band *band,
+ u32 bw_cap[2], u32 nchain)
+{
+ __le16 mcs_map;
+
+ /* not allowed in 2.4G band */
+ if (band->band == IEEE80211_BAND_2GHZ)
+ return;
+
+ band->vht_cap.vht_supported = true;
+ /* 80MHz is mandatory */
+ band->vht_cap.cap |= IEEE80211_VHT_CAP_SHORT_GI_80;
+ if (bw_cap[band->band] & WLC_BW_160MHZ_BIT) {
+ band->vht_cap.cap |= IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ;
+ band->vht_cap.cap |= IEEE80211_VHT_CAP_SHORT_GI_160;
+ }
+ /* all support 256-QAM */
+ mcs_map = brcmf_get_mcs_map(nchain, IEEE80211_VHT_MCS_SUPPORT_0_9);
+ band->vht_cap.vht_mcs.rx_mcs_map = mcs_map;
+ band->vht_cap.vht_mcs.tx_mcs_map = mcs_map;
+}
+
static s32 brcmf_update_wiphybands(struct brcmf_cfg80211_info *cfg)
{
struct brcmf_if *ifp = netdev_priv(cfg_to_ndev(cfg));
struct wiphy *wiphy;
s32 phy_list;
u32 band_list[3];
- u32 nmode;
+ u32 nmode = 0;
+ u32 vhtmode = 0;
u32 bw_cap[2] = { 0, 0 };
u32 rxchain;
u32 nchain;
@@ -5360,14 +5452,16 @@ static s32 brcmf_update_wiphybands(struct brcmf_cfg80211_info *cfg)
brcmf_dbg(INFO, "BRCMF_C_GET_BANDLIST reported: 0x%08x 0x%08x 0x%08x phy\n",
band_list[0], band_list[1], band_list[2]);
+ (void)brcmf_fil_iovar_int_get(ifp, "vhtmode", &vhtmode);
err = brcmf_fil_iovar_int_get(ifp, "nmode", &nmode);
if (err) {
brcmf_err("nmode error (%d)\n", err);
} else {
brcmf_get_bwcap(ifp, bw_cap);
}
- brcmf_dbg(INFO, "nmode=%d, bw_cap=(%d, %d)\n", nmode,
- bw_cap[IEEE80211_BAND_2GHZ], bw_cap[IEEE80211_BAND_5GHZ]);
+ brcmf_dbg(INFO, "nmode=%d, vhtmode=%d, bw_cap=(%d, %d)\n",
+ nmode, vhtmode, bw_cap[IEEE80211_BAND_2GHZ],
+ bw_cap[IEEE80211_BAND_5GHZ]);
err = brcmf_fil_iovar_int_get(ifp, "rxchain", &rxchain);
if (err) {
@@ -5398,17 +5492,10 @@ static s32 brcmf_update_wiphybands(struct brcmf_cfg80211_info *cfg)
else
continue;
- if (bw_cap[band->band] & WLC_BW_40MHZ_BIT) {
- band->ht_cap.cap |= IEEE80211_HT_CAP_SGI_40;
- band->ht_cap.cap |= IEEE80211_HT_CAP_SUP_WIDTH_20_40;
- }
- band->ht_cap.cap |= IEEE80211_HT_CAP_SGI_20;
- band->ht_cap.cap |= IEEE80211_HT_CAP_DSSSCCK40;
- band->ht_cap.ht_supported = true;
- band->ht_cap.ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K;
- band->ht_cap.ampdu_density = IEEE80211_HT_MPDU_DENSITY_16;
- memset(band->ht_cap.mcs.rx_mask, 0xff, nchain);
- band->ht_cap.mcs.tx_params = IEEE80211_HT_MCS_TX_DEFINED;
+ if (nmode)
+ brcmf_update_ht_cap(band, bw_cap, nchain);
+ if (vhtmode)
+ brcmf_update_vht_cap(band, bw_cap, nchain);
bands[band->band] = band;
}
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c b/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c
index 8c5fa4e58139..43c71bfaa474 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c
@@ -897,7 +897,8 @@ static bool brcms_tx_flush_completed(struct brcms_info *wl)
return result;
}
-static void brcms_ops_flush(struct ieee80211_hw *hw, u32 queues, bool drop)
+static void brcms_ops_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ u32 queues, bool drop)
{
struct brcms_info *wl = hw->priv;
int ret;
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/main.c b/drivers/net/wireless/brcm80211/brcmsmac/main.c
index 9417cb5a2553..af8ba64ace39 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/main.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/main.c
@@ -4870,14 +4870,11 @@ static void brcms_c_detach_module(struct brcms_c_info *wlc)
/*
* low level detach
*/
-static int brcms_b_detach(struct brcms_c_info *wlc)
+static void brcms_b_detach(struct brcms_c_info *wlc)
{
uint i;
struct brcms_hw_band *band;
struct brcms_hardware *wlc_hw = wlc->hw;
- int callbacks;
-
- callbacks = 0;
brcms_b_detach_dmapio(wlc_hw);
@@ -4900,9 +4897,6 @@ static int brcms_b_detach(struct brcms_c_info *wlc)
ai_detach(wlc_hw->sih);
wlc_hw->sih = NULL;
}
-
- return callbacks;
-
}
/*
@@ -4917,14 +4911,15 @@ static int brcms_b_detach(struct brcms_c_info *wlc)
*/
uint brcms_c_detach(struct brcms_c_info *wlc)
{
- uint callbacks = 0;
+ uint callbacks;
if (wlc == NULL)
return 0;
- callbacks += brcms_b_detach(wlc);
+ brcms_b_detach(wlc);
/* delete software timers */
+ callbacks = 0;
if (!brcms_c_radio_monitor_stop(wlc))
callbacks++;
diff --git a/drivers/net/wireless/brcm80211/brcmutil/d11.c b/drivers/net/wireless/brcm80211/brcmutil/d11.c
index 30e54e2c6c9b..2b2522bdd8eb 100644
--- a/drivers/net/wireless/brcm80211/brcmutil/d11.c
+++ b/drivers/net/wireless/brcm80211/brcmutil/d11.c
@@ -21,19 +21,46 @@
#include <brcmu_wifi.h>
#include <brcmu_d11.h>
-static void brcmu_d11n_encchspec(struct brcmu_chan *ch)
+static u16 d11n_sb(enum brcmu_chan_sb sb)
{
- ch->chspec = ch->chnum & BRCMU_CHSPEC_CH_MASK;
+ switch (sb) {
+ case BRCMU_CHAN_SB_NONE:
+ return BRCMU_CHSPEC_D11N_SB_N;
+ case BRCMU_CHAN_SB_L:
+ return BRCMU_CHSPEC_D11N_SB_L;
+ case BRCMU_CHAN_SB_U:
+ return BRCMU_CHSPEC_D11N_SB_U;
+ default:
+ WARN_ON(1);
+ }
+ return 0;
+}
- switch (ch->bw) {
+static u16 d11n_bw(enum brcmu_chan_bw bw)
+{
+ switch (bw) {
case BRCMU_CHAN_BW_20:
- ch->chspec |= BRCMU_CHSPEC_D11N_BW_20 | BRCMU_CHSPEC_D11N_SB_N;
- break;
+ return BRCMU_CHSPEC_D11N_BW_20;
case BRCMU_CHAN_BW_40:
+ return BRCMU_CHSPEC_D11N_BW_40;
default:
- WARN_ON_ONCE(1);
- break;
+ WARN_ON(1);
}
+ return 0;
+}
+
+static void brcmu_d11n_encchspec(struct brcmu_chan *ch)
+{
+ if (ch->bw == BRCMU_CHAN_BW_20)
+ ch->sb = BRCMU_CHAN_SB_NONE;
+
+ ch->chspec = 0;
+ brcmu_maskset16(&ch->chspec, BRCMU_CHSPEC_CH_MASK,
+ BRCMU_CHSPEC_CH_SHIFT, ch->chnum);
+ brcmu_maskset16(&ch->chspec, BRCMU_CHSPEC_D11N_SB_MASK,
+ 0, d11n_sb(ch->sb));
+ brcmu_maskset16(&ch->chspec, BRCMU_CHSPEC_D11N_BW_MASK,
+ 0, d11n_bw(ch->bw));
if (ch->chnum <= CH_MAX_2G_CHANNEL)
ch->chspec |= BRCMU_CHSPEC_D11N_BND_2G;
@@ -41,23 +68,34 @@ static void brcmu_d11n_encchspec(struct brcmu_chan *ch)
ch->chspec |= BRCMU_CHSPEC_D11N_BND_5G;
}
-static void brcmu_d11ac_encchspec(struct brcmu_chan *ch)
+static u16 d11ac_bw(enum brcmu_chan_bw bw)
{
- ch->chspec = ch->chnum & BRCMU_CHSPEC_CH_MASK;
-
- switch (ch->bw) {
+ switch (bw) {
case BRCMU_CHAN_BW_20:
- ch->chspec |= BRCMU_CHSPEC_D11AC_BW_20;
- break;
+ return BRCMU_CHSPEC_D11AC_BW_20;
case BRCMU_CHAN_BW_40:
+ return BRCMU_CHSPEC_D11AC_BW_40;
case BRCMU_CHAN_BW_80:
- case BRCMU_CHAN_BW_80P80:
- case BRCMU_CHAN_BW_160:
+ return BRCMU_CHSPEC_D11AC_BW_80;
default:
- WARN_ON_ONCE(1);
- break;
+ WARN_ON(1);
}
+ return 0;
+}
+static void brcmu_d11ac_encchspec(struct brcmu_chan *ch)
+{
+ if (ch->bw == BRCMU_CHAN_BW_20 || ch->sb == BRCMU_CHAN_SB_NONE)
+ ch->sb = BRCMU_CHAN_SB_L;
+
+ brcmu_maskset16(&ch->chspec, BRCMU_CHSPEC_CH_MASK,
+ BRCMU_CHSPEC_CH_SHIFT, ch->chnum);
+ brcmu_maskset16(&ch->chspec, BRCMU_CHSPEC_D11AC_SB_MASK,
+ BRCMU_CHSPEC_D11AC_SB_SHIFT, ch->sb);
+ brcmu_maskset16(&ch->chspec, BRCMU_CHSPEC_D11AC_BW_MASK,
+ 0, d11ac_bw(ch->bw));
+
+ ch->chspec &= ~BRCMU_CHSPEC_D11AC_BND_MASK;
if (ch->chnum <= CH_MAX_2G_CHANNEL)
ch->chspec |= BRCMU_CHSPEC_D11AC_BND_2G;
else
@@ -73,6 +111,7 @@ static void brcmu_d11n_decchspec(struct brcmu_chan *ch)
switch (ch->chspec & BRCMU_CHSPEC_D11N_BW_MASK) {
case BRCMU_CHSPEC_D11N_BW_20:
ch->bw = BRCMU_CHAN_BW_20;
+ ch->sb = BRCMU_CHAN_SB_NONE;
break;
case BRCMU_CHSPEC_D11N_BW_40:
ch->bw = BRCMU_CHAN_BW_40;
@@ -112,6 +151,7 @@ static void brcmu_d11ac_decchspec(struct brcmu_chan *ch)
switch (ch->chspec & BRCMU_CHSPEC_D11AC_BW_MASK) {
case BRCMU_CHSPEC_D11AC_BW_20:
ch->bw = BRCMU_CHAN_BW_20;
+ ch->sb = BRCMU_CHAN_SB_NONE;
break;
case BRCMU_CHSPEC_D11AC_BW_40:
ch->bw = BRCMU_CHAN_BW_40;
@@ -128,6 +168,25 @@ static void brcmu_d11ac_decchspec(struct brcmu_chan *ch)
break;
case BRCMU_CHSPEC_D11AC_BW_80:
ch->bw = BRCMU_CHAN_BW_80;
+ ch->sb = brcmu_maskget16(ch->chspec, BRCMU_CHSPEC_D11AC_SB_MASK,
+ BRCMU_CHSPEC_D11AC_SB_SHIFT);
+ switch (ch->sb) {
+ case BRCMU_CHAN_SB_LL:
+ ch->chnum -= CH_30MHZ_APART;
+ break;
+ case BRCMU_CHAN_SB_LU:
+ ch->chnum -= CH_10MHZ_APART;
+ break;
+ case BRCMU_CHAN_SB_UL:
+ ch->chnum += CH_10MHZ_APART;
+ break;
+ case BRCMU_CHAN_SB_UU:
+ ch->chnum += CH_30MHZ_APART;
+ break;
+ default:
+ WARN_ON_ONCE(1);
+ break;
+ }
break;
case BRCMU_CHSPEC_D11AC_BW_8080:
case BRCMU_CHSPEC_D11AC_BW_160:
diff --git a/drivers/net/wireless/brcm80211/include/brcmu_d11.h b/drivers/net/wireless/brcm80211/include/brcmu_d11.h
index 8660a2cba098..f9745ea8b3e0 100644
--- a/drivers/net/wireless/brcm80211/include/brcmu_d11.h
+++ b/drivers/net/wireless/brcm80211/include/brcmu_d11.h
@@ -108,13 +108,7 @@ enum brcmu_chan_bw {
};
enum brcmu_chan_sb {
- BRCMU_CHAN_SB_NONE = 0,
- BRCMU_CHAN_SB_L,
- BRCMU_CHAN_SB_U,
- BRCMU_CHAN_SB_LL,
- BRCMU_CHAN_SB_LU,
- BRCMU_CHAN_SB_UL,
- BRCMU_CHAN_SB_UU,
+ BRCMU_CHAN_SB_NONE = -1,
BRCMU_CHAN_SB_LLL,
BRCMU_CHAN_SB_LLU,
BRCMU_CHAN_SB_LUL,
@@ -123,6 +117,12 @@ enum brcmu_chan_sb {
BRCMU_CHAN_SB_ULU,
BRCMU_CHAN_SB_UUL,
BRCMU_CHAN_SB_UUU,
+ BRCMU_CHAN_SB_L = BRCMU_CHAN_SB_LLL,
+ BRCMU_CHAN_SB_U = BRCMU_CHAN_SB_LLU,
+ BRCMU_CHAN_SB_LL = BRCMU_CHAN_SB_LLL,
+ BRCMU_CHAN_SB_LU = BRCMU_CHAN_SB_LLU,
+ BRCMU_CHAN_SB_UL = BRCMU_CHAN_SB_LUL,
+ BRCMU_CHAN_SB_UU = BRCMU_CHAN_SB_LUU,
};
struct brcmu_chan {
diff --git a/drivers/net/wireless/brcm80211/include/brcmu_wifi.h b/drivers/net/wireless/brcm80211/include/brcmu_wifi.h
index 74419d4bd123..76b5d3a86294 100644
--- a/drivers/net/wireless/brcm80211/include/brcmu_wifi.h
+++ b/drivers/net/wireless/brcm80211/include/brcmu_wifi.h
@@ -29,6 +29,7 @@
#define CH_UPPER_SB 0x01
#define CH_LOWER_SB 0x02
#define CH_EWA_VALID 0x04
+#define CH_30MHZ_APART 6
#define CH_20MHZ_APART 4
#define CH_10MHZ_APART 2
#define CH_5MHZ_APART 1 /* 2G band channels are 5 Mhz apart */
diff --git a/drivers/net/wireless/cw1200/sta.c b/drivers/net/wireless/cw1200/sta.c
index 103f7bce8932..cd0cad7f7759 100644
--- a/drivers/net/wireless/cw1200/sta.c
+++ b/drivers/net/wireless/cw1200/sta.c
@@ -936,7 +936,8 @@ static int __cw1200_flush(struct cw1200_common *priv, bool drop)
return ret;
}
-void cw1200_flush(struct ieee80211_hw *hw, u32 queues, bool drop)
+void cw1200_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ u32 queues, bool drop)
{
struct cw1200_common *priv = hw->priv;
diff --git a/drivers/net/wireless/cw1200/sta.h b/drivers/net/wireless/cw1200/sta.h
index 35babb62cc6a..b7e386b7662b 100644
--- a/drivers/net/wireless/cw1200/sta.h
+++ b/drivers/net/wireless/cw1200/sta.h
@@ -40,7 +40,8 @@ int cw1200_set_key(struct ieee80211_hw *dev, enum set_key_cmd cmd,
int cw1200_set_rts_threshold(struct ieee80211_hw *hw, u32 value);
-void cw1200_flush(struct ieee80211_hw *hw, u32 queues, bool drop);
+void cw1200_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ u32 queues, bool drop);
u64 cw1200_prepare_multicast(struct ieee80211_hw *hw,
struct netdev_hw_addr_list *mc_list);
diff --git a/drivers/net/wireless/hostap/hostap_main.c b/drivers/net/wireless/hostap/hostap_main.c
index 67db34e56d7e..52919ad42726 100644
--- a/drivers/net/wireless/hostap/hostap_main.c
+++ b/drivers/net/wireless/hostap/hostap_main.c
@@ -882,7 +882,7 @@ void hostap_setup_dev(struct net_device *dev, local_info_t *local,
dev->mtu = local->mtu;
- SET_ETHTOOL_OPS(dev, &prism2_ethtool_ops);
+ dev->ethtool_ops = &prism2_ethtool_ops;
}
diff --git a/drivers/net/wireless/iwlegacy/3945.c b/drivers/net/wireless/iwlegacy/3945.c
index d37a6fd90d40..b598e2803500 100644
--- a/drivers/net/wireless/iwlegacy/3945.c
+++ b/drivers/net/wireless/iwlegacy/3945.c
@@ -573,7 +573,7 @@ il3945_hdl_rx(struct il_priv *il, struct il_rx_buf *rxb)
rx_status.flag |= RX_FLAG_SHORTPRE;
if ((unlikely(rx_stats->phy_count > 20))) {
- D_DROP("dsp size out of range [0,20]: %d/n",
+ D_DROP("dsp size out of range [0,20]: %d\n",
rx_stats->phy_count);
return;
}
diff --git a/drivers/net/wireless/iwlegacy/4965-mac.c b/drivers/net/wireless/iwlegacy/4965-mac.c
index 888ad5c74639..c159c05db6ef 100644
--- a/drivers/net/wireless/iwlegacy/4965-mac.c
+++ b/drivers/net/wireless/iwlegacy/4965-mac.c
@@ -670,7 +670,7 @@ il4965_hdl_rx(struct il_priv *il, struct il_rx_buf *rxb)
}
if ((unlikely(phy_res->cfg_phy_cnt > 20))) {
- D_DROP("dsp size out of range [0,20]: %d/n",
+ D_DROP("dsp size out of range [0,20]: %d\n",
phy_res->cfg_phy_cnt);
return;
}
diff --git a/drivers/net/wireless/iwlegacy/common.c b/drivers/net/wireless/iwlegacy/common.c
index 4f42174d9994..ecc674627e6e 100644
--- a/drivers/net/wireless/iwlegacy/common.c
+++ b/drivers/net/wireless/iwlegacy/common.c
@@ -4755,7 +4755,8 @@ out:
}
EXPORT_SYMBOL(il_mac_change_interface);
-void il_mac_flush(struct ieee80211_hw *hw, u32 queues, bool drop)
+void il_mac_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ u32 queues, bool drop)
{
struct il_priv *il = hw->priv;
unsigned long timeout = jiffies + msecs_to_jiffies(500);
diff --git a/drivers/net/wireless/iwlegacy/common.h b/drivers/net/wireless/iwlegacy/common.h
index dfb13c70efe8..ea5c0f863c4e 100644
--- a/drivers/net/wireless/iwlegacy/common.h
+++ b/drivers/net/wireless/iwlegacy/common.h
@@ -1723,7 +1723,8 @@ void il_mac_remove_interface(struct ieee80211_hw *hw,
struct ieee80211_vif *vif);
int il_mac_change_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
enum nl80211_iftype newtype, bool newp2p);
-void il_mac_flush(struct ieee80211_hw *hw, u32 queues, bool drop);
+void il_mac_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ u32 queues, bool drop);
int il_alloc_txq_mem(struct il_priv *il);
void il_free_txq_mem(struct il_priv *il);
diff --git a/drivers/net/wireless/iwlwifi/Kconfig b/drivers/net/wireless/iwlwifi/Kconfig
index 74b3b4de7bb7..7fd50428b934 100644
--- a/drivers/net/wireless/iwlwifi/Kconfig
+++ b/drivers/net/wireless/iwlwifi/Kconfig
@@ -2,10 +2,6 @@ config IWLWIFI
tristate "Intel Wireless WiFi Next Gen AGN - Wireless-N/Advanced-N/Ultimate-N (iwlwifi) "
depends on PCI && MAC80211 && HAS_IOMEM
select FW_LOADER
- select NEW_LEDS
- select LEDS_CLASS
- select LEDS_TRIGGERS
- select MAC80211_LEDS
---help---
Select to build the driver supporting the:
@@ -43,6 +39,14 @@ config IWLWIFI
say M here and read <file:Documentation/kbuild/modules.txt>. The
module will be called iwlwifi.
+config IWLWIFI_LEDS
+ bool
+ depends on IWLWIFI
+ depends on LEDS_CLASS=y || LEDS_CLASS=IWLWIFI
+ select LEDS_TRIGGERS
+ select MAC80211_LEDS
+ default y
+
config IWLDVM
tristate "Intel Wireless WiFi DVM Firmware support"
depends on IWLWIFI
@@ -124,7 +128,6 @@ config IWLWIFI_DEBUG_EXPERIMENTAL_UCODE
Enable use of experimental ucode for testing and debugging.
config IWLWIFI_DEVICE_TRACING
-
bool "iwlwifi device access tracing"
depends on IWLWIFI
depends on EVENT_TRACING
diff --git a/drivers/net/wireless/iwlwifi/dvm/Makefile b/drivers/net/wireless/iwlwifi/dvm/Makefile
index dce7ab2e0c4b..4d19685f31c3 100644
--- a/drivers/net/wireless/iwlwifi/dvm/Makefile
+++ b/drivers/net/wireless/iwlwifi/dvm/Makefile
@@ -4,9 +4,10 @@ iwldvm-objs += main.o rs.o mac80211.o ucode.o tx.o
iwldvm-objs += lib.o calib.o tt.o sta.o rx.o
iwldvm-objs += power.o
-iwldvm-objs += scan.o led.o
+iwldvm-objs += scan.o
iwldvm-objs += rxon.o devices.o
+iwldvm-$(CONFIG_IWLWIFI_LEDS) += led.o
iwldvm-$(CONFIG_IWLWIFI_DEBUGFS) += debugfs.o
ccflags-y += -D__CHECK_ENDIAN__ -I$(src)/../
diff --git a/drivers/net/wireless/iwlwifi/dvm/calib.c b/drivers/net/wireless/iwlwifi/dvm/calib.c
index be1086c87157..20e6aa910700 100644
--- a/drivers/net/wireless/iwlwifi/dvm/calib.c
+++ b/drivers/net/wireless/iwlwifi/dvm/calib.c
@@ -94,7 +94,6 @@ int iwl_send_calib_results(struct iwl_priv *priv)
{
struct iwl_host_cmd hcmd = {
.id = REPLY_PHY_CALIBRATION_CMD,
- .flags = CMD_SYNC,
};
struct iwl_calib_result *res;
diff --git a/drivers/net/wireless/iwlwifi/dvm/debugfs.c b/drivers/net/wireless/iwlwifi/dvm/debugfs.c
index d2fe2596d54e..0ffb6ff1a255 100644
--- a/drivers/net/wireless/iwlwifi/dvm/debugfs.c
+++ b/drivers/net/wireless/iwlwifi/dvm/debugfs.c
@@ -1481,7 +1481,7 @@ static ssize_t iwl_dbgfs_ucode_bt_stats_read(struct file *file,
/* make request to uCode to retrieve statistics information */
mutex_lock(&priv->mutex);
- ret = iwl_send_statistics_request(priv, CMD_SYNC, false);
+ ret = iwl_send_statistics_request(priv, 0, false);
mutex_unlock(&priv->mutex);
if (ret)
@@ -1868,7 +1868,7 @@ static ssize_t iwl_dbgfs_clear_ucode_statistics_write(struct file *file,
/* make request to uCode to retrieve statistics information */
mutex_lock(&priv->mutex);
- iwl_send_statistics_request(priv, CMD_SYNC, true);
+ iwl_send_statistics_request(priv, 0, true);
mutex_unlock(&priv->mutex);
return count;
@@ -2188,7 +2188,6 @@ static int iwl_cmd_echo_test(struct iwl_priv *priv)
struct iwl_host_cmd cmd = {
.id = REPLY_ECHO,
.len = { 0 },
- .flags = CMD_SYNC,
};
ret = iwl_dvm_send_cmd(priv, &cmd);
@@ -2320,7 +2319,7 @@ static ssize_t iwl_dbgfs_fw_restart_write(struct file *file,
mutex_lock(&priv->mutex);
/* take the return value to make compiler happy - it will fail anyway */
- ret = iwl_dvm_send_cmd_pdu(priv, REPLY_ERROR, CMD_SYNC, 0, NULL);
+ ret = iwl_dvm_send_cmd_pdu(priv, REPLY_ERROR, 0, 0, NULL);
mutex_unlock(&priv->mutex);
diff --git a/drivers/net/wireless/iwlwifi/dvm/dev.h b/drivers/net/wireless/iwlwifi/dvm/dev.h
index 3441f70d0ff9..a6f22c32a279 100644
--- a/drivers/net/wireless/iwlwifi/dvm/dev.h
+++ b/drivers/net/wireless/iwlwifi/dvm/dev.h
@@ -888,9 +888,11 @@ struct iwl_priv {
struct iwl_event_log event_log;
+#ifdef CONFIG_IWLWIFI_LEDS
struct led_classdev led;
unsigned long blink_on, blink_off;
bool led_registered;
+#endif
/* WoWLAN GTK rekey data */
u8 kck[NL80211_KCK_LEN], kek[NL80211_KEK_LEN];
diff --git a/drivers/net/wireless/iwlwifi/dvm/devices.c b/drivers/net/wireless/iwlwifi/dvm/devices.c
index 758c54eeb206..34b41e5f7cfc 100644
--- a/drivers/net/wireless/iwlwifi/dvm/devices.c
+++ b/drivers/net/wireless/iwlwifi/dvm/devices.c
@@ -417,7 +417,6 @@ static int iwl5000_hw_channel_switch(struct iwl_priv *priv,
struct iwl_host_cmd hcmd = {
.id = REPLY_CHANNEL_SWITCH,
.len = { sizeof(cmd), },
- .flags = CMD_SYNC,
.data = { &cmd, },
};
@@ -579,7 +578,6 @@ static int iwl6000_hw_channel_switch(struct iwl_priv *priv,
struct iwl_host_cmd hcmd = {
.id = REPLY_CHANNEL_SWITCH,
.len = { sizeof(*cmd), },
- .flags = CMD_SYNC,
.dataflags[0] = IWL_HCMD_DFL_NOCOPY,
};
int err;
diff --git a/drivers/net/wireless/iwlwifi/dvm/led.h b/drivers/net/wireless/iwlwifi/dvm/led.h
index 6a0817d9c4fa..1c6b2252d0f2 100644
--- a/drivers/net/wireless/iwlwifi/dvm/led.h
+++ b/drivers/net/wireless/iwlwifi/dvm/led.h
@@ -36,8 +36,20 @@ struct iwl_priv;
#define IWL_LED_ACTIVITY (0<<1)
#define IWL_LED_LINK (1<<1)
+#ifdef CONFIG_IWLWIFI_LEDS
void iwlagn_led_enable(struct iwl_priv *priv);
void iwl_leds_init(struct iwl_priv *priv);
void iwl_leds_exit(struct iwl_priv *priv);
+#else
+static inline void iwlagn_led_enable(struct iwl_priv *priv)
+{
+}
+static inline void iwl_leds_init(struct iwl_priv *priv)
+{
+}
+static inline void iwl_leds_exit(struct iwl_priv *priv)
+{
+}
+#endif
#endif /* __iwl_leds_h__ */
diff --git a/drivers/net/wireless/iwlwifi/dvm/lib.c b/drivers/net/wireless/iwlwifi/dvm/lib.c
index 576f7ee38ca5..2191621d69c1 100644
--- a/drivers/net/wireless/iwlwifi/dvm/lib.c
+++ b/drivers/net/wireless/iwlwifi/dvm/lib.c
@@ -81,7 +81,7 @@ int iwlagn_send_tx_power(struct iwl_priv *priv)
else
tx_ant_cfg_cmd = REPLY_TX_POWER_DBM_CMD;
- return iwl_dvm_send_cmd_pdu(priv, tx_ant_cfg_cmd, CMD_SYNC,
+ return iwl_dvm_send_cmd_pdu(priv, tx_ant_cfg_cmd, 0,
sizeof(tx_power_cmd), &tx_power_cmd);
}
@@ -141,7 +141,6 @@ int iwlagn_txfifo_flush(struct iwl_priv *priv, u32 scd_q_msk)
struct iwl_host_cmd cmd = {
.id = REPLY_TXFIFO_FLUSH,
.len = { sizeof(struct iwl_txfifo_flush_cmd), },
- .flags = CMD_SYNC,
.data = { &flush_cmd, },
};
@@ -180,7 +179,7 @@ void iwlagn_dev_txfifo_flush(struct iwl_priv *priv)
goto done;
}
IWL_DEBUG_INFO(priv, "wait transmit/flush all frames\n");
- iwl_trans_wait_tx_queue_empty(priv->trans);
+ iwl_trans_wait_tx_queue_empty(priv->trans, 0xffffffff);
done:
ieee80211_wake_queues(priv->hw);
mutex_unlock(&priv->mutex);
@@ -333,12 +332,12 @@ void iwlagn_send_advance_bt_config(struct iwl_priv *priv)
memcpy(&bt_cmd_v2.basic, &basic,
sizeof(basic));
ret = iwl_dvm_send_cmd_pdu(priv, REPLY_BT_CONFIG,
- CMD_SYNC, sizeof(bt_cmd_v2), &bt_cmd_v2);
+ 0, sizeof(bt_cmd_v2), &bt_cmd_v2);
} else {
memcpy(&bt_cmd_v1.basic, &basic,
sizeof(basic));
ret = iwl_dvm_send_cmd_pdu(priv, REPLY_BT_CONFIG,
- CMD_SYNC, sizeof(bt_cmd_v1), &bt_cmd_v1);
+ 0, sizeof(bt_cmd_v1), &bt_cmd_v1);
}
if (ret)
IWL_ERR(priv, "failed to send BT Coex Config\n");
@@ -1044,7 +1043,6 @@ int iwlagn_send_patterns(struct iwl_priv *priv,
struct iwl_host_cmd cmd = {
.id = REPLY_WOWLAN_PATTERNS,
.dataflags[0] = IWL_HCMD_DFL_NOCOPY,
- .flags = CMD_SYNC,
};
int i, err;
@@ -1201,7 +1199,6 @@ int iwlagn_suspend(struct iwl_priv *priv, struct cfg80211_wowlan *wowlan)
if (key_data.use_rsc_tsc) {
struct iwl_host_cmd rsc_tsc_cmd = {
.id = REPLY_WOWLAN_TSC_RSC_PARAMS,
- .flags = CMD_SYNC,
.data[0] = key_data.rsc_tsc,
.dataflags[0] = IWL_HCMD_DFL_NOCOPY,
.len[0] = sizeof(*key_data.rsc_tsc),
@@ -1215,7 +1212,7 @@ int iwlagn_suspend(struct iwl_priv *priv, struct cfg80211_wowlan *wowlan)
if (key_data.use_tkip) {
ret = iwl_dvm_send_cmd_pdu(priv,
REPLY_WOWLAN_TKIP_PARAMS,
- CMD_SYNC, sizeof(tkip_cmd),
+ 0, sizeof(tkip_cmd),
&tkip_cmd);
if (ret)
goto out;
@@ -1231,20 +1228,20 @@ int iwlagn_suspend(struct iwl_priv *priv, struct cfg80211_wowlan *wowlan)
ret = iwl_dvm_send_cmd_pdu(priv,
REPLY_WOWLAN_KEK_KCK_MATERIAL,
- CMD_SYNC, sizeof(kek_kck_cmd),
+ 0, sizeof(kek_kck_cmd),
&kek_kck_cmd);
if (ret)
goto out;
}
}
- ret = iwl_dvm_send_cmd_pdu(priv, REPLY_D3_CONFIG, CMD_SYNC,
+ ret = iwl_dvm_send_cmd_pdu(priv, REPLY_D3_CONFIG, 0,
sizeof(d3_cfg_cmd), &d3_cfg_cmd);
if (ret)
goto out;
ret = iwl_dvm_send_cmd_pdu(priv, REPLY_WOWLAN_WAKEUP_FILTER,
- CMD_SYNC, sizeof(wakeup_filter_cmd),
+ 0, sizeof(wakeup_filter_cmd),
&wakeup_filter_cmd);
if (ret)
goto out;
diff --git a/drivers/net/wireless/iwlwifi/dvm/mac80211.c b/drivers/net/wireless/iwlwifi/dvm/mac80211.c
index dd55c9cf7ba8..29af7b51e370 100644
--- a/drivers/net/wireless/iwlwifi/dvm/mac80211.c
+++ b/drivers/net/wireless/iwlwifi/dvm/mac80211.c
@@ -1091,7 +1091,8 @@ static void iwlagn_configure_filter(struct ieee80211_hw *hw,
FIF_BCN_PRBRESP_PROMISC | FIF_CONTROL;
}
-static void iwlagn_mac_flush(struct ieee80211_hw *hw, u32 queues, bool drop)
+static void iwlagn_mac_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ u32 queues, bool drop)
{
struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
@@ -1119,7 +1120,7 @@ static void iwlagn_mac_flush(struct ieee80211_hw *hw, u32 queues, bool drop)
}
}
IWL_DEBUG_MAC80211(priv, "wait transmit/flush all frames\n");
- iwl_trans_wait_tx_queue_empty(priv->trans);
+ iwl_trans_wait_tx_queue_empty(priv->trans, 0xffffffff);
done:
mutex_unlock(&priv->mutex);
IWL_DEBUG_MAC80211(priv, "leave\n");
diff --git a/drivers/net/wireless/iwlwifi/dvm/main.c b/drivers/net/wireless/iwlwifi/dvm/main.c
index 6a6df71af1d7..0b7f46f0b079 100644
--- a/drivers/net/wireless/iwlwifi/dvm/main.c
+++ b/drivers/net/wireless/iwlwifi/dvm/main.c
@@ -128,7 +128,6 @@ int iwlagn_send_beacon_cmd(struct iwl_priv *priv)
struct iwl_tx_beacon_cmd *tx_beacon_cmd;
struct iwl_host_cmd cmd = {
.id = REPLY_TX_BEACON,
- .flags = CMD_SYNC,
};
struct ieee80211_tx_info *info;
u32 frame_size;
@@ -311,8 +310,7 @@ int iwl_send_statistics_request(struct iwl_priv *priv, u8 flags, bool clear)
sizeof(struct iwl_statistics_cmd),
&statistics_cmd);
else
- return iwl_dvm_send_cmd_pdu(priv, REPLY_STATISTICS_CMD,
- CMD_SYNC,
+ return iwl_dvm_send_cmd_pdu(priv, REPLY_STATISTICS_CMD, 0,
sizeof(struct iwl_statistics_cmd),
&statistics_cmd);
}
@@ -622,7 +620,7 @@ static void iwl_rf_kill_ct_config(struct iwl_priv *priv)
ret = iwl_dvm_send_cmd_pdu(priv,
REPLY_CT_KILL_CONFIG_CMD,
- CMD_SYNC, sizeof(adv_cmd), &adv_cmd);
+ 0, sizeof(adv_cmd), &adv_cmd);
if (ret)
IWL_ERR(priv, "REPLY_CT_KILL_CONFIG_CMD failed\n");
else
@@ -637,7 +635,7 @@ static void iwl_rf_kill_ct_config(struct iwl_priv *priv)
ret = iwl_dvm_send_cmd_pdu(priv,
REPLY_CT_KILL_CONFIG_CMD,
- CMD_SYNC, sizeof(cmd), &cmd);
+ 0, sizeof(cmd), &cmd);
if (ret)
IWL_ERR(priv, "REPLY_CT_KILL_CONFIG_CMD failed\n");
else
@@ -673,9 +671,7 @@ static int iwlagn_send_tx_ant_config(struct iwl_priv *priv, u8 valid_tx_ant)
if (IWL_UCODE_API(priv->fw->ucode_ver) > 1) {
IWL_DEBUG_HC(priv, "select valid tx ant: %u\n", valid_tx_ant);
- return iwl_dvm_send_cmd_pdu(priv,
- TX_ANT_CONFIGURATION_CMD,
- CMD_SYNC,
+ return iwl_dvm_send_cmd_pdu(priv, TX_ANT_CONFIGURATION_CMD, 0,
sizeof(struct iwl_tx_ant_config_cmd),
&tx_ant_cmd);
} else {
@@ -703,7 +699,7 @@ static void iwl_send_bt_config(struct iwl_priv *priv)
(bt_cmd.flags == BT_COEX_DISABLE) ? "disable" : "active");
if (iwl_dvm_send_cmd_pdu(priv, REPLY_BT_CONFIG,
- CMD_SYNC, sizeof(struct iwl_bt_cmd), &bt_cmd))
+ 0, sizeof(struct iwl_bt_cmd), &bt_cmd))
IWL_ERR(priv, "failed to send BT Coex Config\n");
}
@@ -987,7 +983,7 @@ static void iwl_bg_restart(struct work_struct *data)
ieee80211_restart_hw(priv->hw);
else
IWL_ERR(priv,
- "Cannot request restart before registrating with mac80211");
+ "Cannot request restart before registrating with mac80211\n");
} else {
WARN_ON(1);
}
@@ -1127,7 +1123,6 @@ static void iwl_option_config(struct iwl_priv *priv)
static int iwl_eeprom_init_hw_params(struct iwl_priv *priv)
{
struct iwl_nvm_data *data = priv->nvm_data;
- char *debug_msg;
if (data->sku_cap_11n_enable &&
!priv->cfg->ht_params) {
@@ -1141,8 +1136,8 @@ static int iwl_eeprom_init_hw_params(struct iwl_priv *priv)
return -EINVAL;
}
- debug_msg = "Device SKU: 24GHz %s %s, 52GHz %s %s, 11.n %s %s\n";
- IWL_DEBUG_INFO(priv, debug_msg,
+ IWL_DEBUG_INFO(priv,
+ "Device SKU: 24GHz %s %s, 52GHz %s %s, 11.n %s %s\n",
data->sku_cap_band_24GHz_enable ? "" : "NOT", "enabled",
data->sku_cap_band_52GHz_enable ? "" : "NOT", "enabled",
data->sku_cap_11n_enable ? "" : "NOT", "enabled");
@@ -1350,7 +1345,7 @@ static struct iwl_op_mode *iwl_op_mode_dvm_start(struct iwl_trans *trans,
iwl_set_hw_params(priv);
if (!(priv->nvm_data->sku_cap_ipan_enable)) {
- IWL_DEBUG_INFO(priv, "Your EEPROM disabled PAN");
+ IWL_DEBUG_INFO(priv, "Your EEPROM disabled PAN\n");
ucode_flags &= ~IWL_UCODE_TLV_FLAGS_PAN;
/*
* if not PAN, then don't support P2P -- might be a uCode
@@ -2019,10 +2014,10 @@ void iwlagn_lift_passive_no_rx(struct iwl_priv *priv)
for (mq = 0; mq < IWLAGN_FIRST_AMPDU_QUEUE; mq++) {
if (!test_bit(mq, &priv->transport_queue_stop)) {
- IWL_DEBUG_TX_QUEUES(priv, "Wake queue %d", mq);
+ IWL_DEBUG_TX_QUEUES(priv, "Wake queue %d\n", mq);
ieee80211_wake_queue(priv->hw, mq);
} else {
- IWL_DEBUG_TX_QUEUES(priv, "Don't wake queue %d", mq);
+ IWL_DEBUG_TX_QUEUES(priv, "Don't wake queue %d\n", mq);
}
}
@@ -2053,6 +2048,17 @@ static bool iwl_set_hw_rfkill_state(struct iwl_op_mode *op_mode, bool state)
return false;
}
+static void iwl_napi_add(struct iwl_op_mode *op_mode,
+ struct napi_struct *napi,
+ struct net_device *napi_dev,
+ int (*poll)(struct napi_struct *, int),
+ int weight)
+{
+ struct iwl_priv *priv = IWL_OP_MODE_GET_DVM(op_mode);
+
+ ieee80211_napi_add(priv->hw, napi, napi_dev, poll, weight);
+}
+
static const struct iwl_op_mode_ops iwl_dvm_ops = {
.start = iwl_op_mode_dvm_start,
.stop = iwl_op_mode_dvm_stop,
@@ -2065,6 +2071,7 @@ static const struct iwl_op_mode_ops iwl_dvm_ops = {
.cmd_queue_full = iwl_cmd_queue_full,
.nic_config = iwl_nic_config,
.wimax_active = iwl_wimax_active,
+ .napi_add = iwl_napi_add,
};
/*****************************************************************************
diff --git a/drivers/net/wireless/iwlwifi/dvm/power.c b/drivers/net/wireless/iwlwifi/dvm/power.c
index b4e61417013a..f2c1439566b5 100644
--- a/drivers/net/wireless/iwlwifi/dvm/power.c
+++ b/drivers/net/wireless/iwlwifi/dvm/power.c
@@ -278,7 +278,7 @@ static int iwl_set_power(struct iwl_priv *priv, struct iwl_powertable_cmd *cmd)
le32_to_cpu(cmd->sleep_interval[3]),
le32_to_cpu(cmd->sleep_interval[4]));
- return iwl_dvm_send_cmd_pdu(priv, POWER_TABLE_CMD, CMD_SYNC,
+ return iwl_dvm_send_cmd_pdu(priv, POWER_TABLE_CMD, 0,
sizeof(struct iwl_powertable_cmd), cmd);
}
@@ -361,7 +361,7 @@ int iwl_power_set_mode(struct iwl_priv *priv, struct iwl_powertable_cmd *cmd,
memcpy(&priv->power_data.sleep_cmd, cmd, sizeof(*cmd));
} else
- IWL_ERR(priv, "set power fail, ret = %d", ret);
+ IWL_ERR(priv, "set power fail, ret = %d\n", ret);
return ret;
}
diff --git a/drivers/net/wireless/iwlwifi/dvm/rs.c b/drivers/net/wireless/iwlwifi/dvm/rs.c
index aa773a2da4ab..32b78a66536d 100644
--- a/drivers/net/wireless/iwlwifi/dvm/rs.c
+++ b/drivers/net/wireless/iwlwifi/dvm/rs.c
@@ -1453,7 +1453,7 @@ static int rs_move_legacy_other(struct iwl_priv *priv,
tbl->action = IWL_LEGACY_SWITCH_SISO;
break;
default:
- IWL_ERR(priv, "Invalid BT load %d", priv->bt_traffic_load);
+ IWL_ERR(priv, "Invalid BT load %d\n", priv->bt_traffic_load);
break;
}
@@ -1628,7 +1628,7 @@ static int rs_move_siso_to_other(struct iwl_priv *priv,
tbl->action = IWL_SISO_SWITCH_ANTENNA1;
break;
default:
- IWL_ERR(priv, "Invalid BT load %d", priv->bt_traffic_load);
+ IWL_ERR(priv, "Invalid BT load %d\n", priv->bt_traffic_load);
break;
}
@@ -1799,7 +1799,7 @@ static int rs_move_mimo2_to_other(struct iwl_priv *priv,
tbl->action = IWL_MIMO2_SWITCH_SISO_A;
break;
default:
- IWL_ERR(priv, "Invalid BT load %d", priv->bt_traffic_load);
+ IWL_ERR(priv, "Invalid BT load %d\n", priv->bt_traffic_load);
break;
}
@@ -1969,7 +1969,7 @@ static int rs_move_mimo3_to_other(struct iwl_priv *priv,
tbl->action = IWL_MIMO3_SWITCH_SISO_A;
break;
default:
- IWL_ERR(priv, "Invalid BT load %d", priv->bt_traffic_load);
+ IWL_ERR(priv, "Invalid BT load %d\n", priv->bt_traffic_load);
break;
}
@@ -2709,7 +2709,7 @@ static void rs_initialize_lq(struct iwl_priv *priv,
rs_set_expected_tpt_table(lq_sta, tbl);
rs_fill_link_cmd(NULL, lq_sta, rate);
priv->stations[lq_sta->lq.sta_id].lq = &lq_sta->lq;
- iwl_send_lq_cmd(priv, ctx, &lq_sta->lq, CMD_SYNC, true);
+ iwl_send_lq_cmd(priv, ctx, &lq_sta->lq, 0, true);
}
static void rs_get_rate(void *priv_r, struct ieee80211_sta *sta, void *priv_sta,
diff --git a/drivers/net/wireless/iwlwifi/dvm/rx.c b/drivers/net/wireless/iwlwifi/dvm/rx.c
index cd8377346aff..debec963c610 100644
--- a/drivers/net/wireless/iwlwifi/dvm/rx.c
+++ b/drivers/net/wireless/iwlwifi/dvm/rx.c
@@ -786,7 +786,7 @@ static void iwlagn_pass_packet_to_mac80211(struct iwl_priv *priv,
memcpy(IEEE80211_SKB_RXCB(skb), stats, sizeof(*stats));
- ieee80211_rx_ni(priv->hw, skb);
+ ieee80211_rx(priv->hw, skb);
}
static u32 iwlagn_translate_rx_status(struct iwl_priv *priv, u32 decrypt_in)
diff --git a/drivers/net/wireless/iwlwifi/dvm/rxon.c b/drivers/net/wireless/iwlwifi/dvm/rxon.c
index 503a81e58185..ed50de6362ed 100644
--- a/drivers/net/wireless/iwlwifi/dvm/rxon.c
+++ b/drivers/net/wireless/iwlwifi/dvm/rxon.c
@@ -104,7 +104,7 @@ static int iwlagn_disable_bss(struct iwl_priv *priv,
send->filter_flags &= ~RXON_FILTER_ASSOC_MSK;
ret = iwl_dvm_send_cmd_pdu(priv, ctx->rxon_cmd,
- CMD_SYNC, sizeof(*send), send);
+ 0, sizeof(*send), send);
send->filter_flags = old_filter;
@@ -134,7 +134,7 @@ static int iwlagn_disable_pan(struct iwl_priv *priv,
send->filter_flags &= ~RXON_FILTER_ASSOC_MSK;
send->dev_type = RXON_DEV_TYPE_P2P;
ret = iwl_dvm_send_cmd_pdu(priv, ctx->rxon_cmd,
- CMD_SYNC, sizeof(*send), send);
+ 0, sizeof(*send), send);
send->filter_flags = old_filter;
send->dev_type = old_dev_type;
@@ -160,7 +160,7 @@ static int iwlagn_disconn_pan(struct iwl_priv *priv,
int ret;
send->filter_flags &= ~RXON_FILTER_ASSOC_MSK;
- ret = iwl_dvm_send_cmd_pdu(priv, ctx->rxon_cmd, CMD_SYNC,
+ ret = iwl_dvm_send_cmd_pdu(priv, ctx->rxon_cmd, 0,
sizeof(*send), send);
send->filter_flags = old_filter;
@@ -189,7 +189,7 @@ static void iwlagn_update_qos(struct iwl_priv *priv,
ctx->qos_data.qos_active,
ctx->qos_data.def_qos_parm.qos_flags);
- ret = iwl_dvm_send_cmd_pdu(priv, ctx->qos_cmd, CMD_SYNC,
+ ret = iwl_dvm_send_cmd_pdu(priv, ctx->qos_cmd, 0,
sizeof(struct iwl_qosparam_cmd),
&ctx->qos_data.def_qos_parm);
if (ret)
@@ -353,7 +353,7 @@ static int iwl_send_rxon_timing(struct iwl_priv *priv,
le16_to_cpu(ctx->timing.atim_window));
return iwl_dvm_send_cmd_pdu(priv, ctx->rxon_timing_cmd,
- CMD_SYNC, sizeof(ctx->timing), &ctx->timing);
+ 0, sizeof(ctx->timing), &ctx->timing);
}
static int iwlagn_rxon_disconn(struct iwl_priv *priv,
@@ -495,7 +495,7 @@ static int iwlagn_rxon_connect(struct iwl_priv *priv,
* Associated RXON doesn't clear the station table in uCode,
* so we don't need to restore stations etc. after this.
*/
- ret = iwl_dvm_send_cmd_pdu(priv, ctx->rxon_cmd, CMD_SYNC,
+ ret = iwl_dvm_send_cmd_pdu(priv, ctx->rxon_cmd, 0,
sizeof(struct iwl_rxon_cmd), &ctx->staging);
if (ret) {
IWL_ERR(priv, "Error setting new RXON (%d)\n", ret);
@@ -610,7 +610,7 @@ int iwlagn_set_pan_params(struct iwl_priv *priv)
cmd.slots[0].width = cpu_to_le16(slot0);
cmd.slots[1].width = cpu_to_le16(slot1);
- ret = iwl_dvm_send_cmd_pdu(priv, REPLY_WIPAN_PARAMS, CMD_SYNC,
+ ret = iwl_dvm_send_cmd_pdu(priv, REPLY_WIPAN_PARAMS, 0,
sizeof(cmd), &cmd);
if (ret)
IWL_ERR(priv, "Error setting PAN parameters (%d)\n", ret);
@@ -823,7 +823,7 @@ static int iwl_check_rxon_cmd(struct iwl_priv *priv,
if ((rxon->flags & (RXON_FLG_CCK_MSK | RXON_FLG_AUTO_DETECT_MSK))
== (RXON_FLG_CCK_MSK | RXON_FLG_AUTO_DETECT_MSK)) {
- IWL_WARN(priv, "CCK and auto detect");
+ IWL_WARN(priv, "CCK and auto detect\n");
errors |= BIT(8);
}
@@ -1395,7 +1395,7 @@ static void iwlagn_chain_noise_reset(struct iwl_priv *priv)
priv->phy_calib_chain_noise_reset_cmd);
ret = iwl_dvm_send_cmd_pdu(priv,
REPLY_PHY_CALIBRATION_CMD,
- CMD_SYNC, sizeof(cmd), &cmd);
+ 0, sizeof(cmd), &cmd);
if (ret)
IWL_ERR(priv,
"Could not send REPLY_PHY_CALIBRATION_CMD\n");
diff --git a/drivers/net/wireless/iwlwifi/dvm/scan.c b/drivers/net/wireless/iwlwifi/dvm/scan.c
index be98b913ed58..43bef901e8f9 100644
--- a/drivers/net/wireless/iwlwifi/dvm/scan.c
+++ b/drivers/net/wireless/iwlwifi/dvm/scan.c
@@ -59,7 +59,7 @@ static int iwl_send_scan_abort(struct iwl_priv *priv)
int ret;
struct iwl_host_cmd cmd = {
.id = REPLY_SCAN_ABORT_CMD,
- .flags = CMD_SYNC | CMD_WANT_SKB,
+ .flags = CMD_WANT_SKB,
};
__le32 *status;
@@ -639,7 +639,6 @@ static int iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
struct iwl_host_cmd cmd = {
.id = REPLY_SCAN_CMD,
.len = { sizeof(struct iwl_scan_cmd), },
- .flags = CMD_SYNC,
};
struct iwl_scan_cmd *scan;
struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
diff --git a/drivers/net/wireless/iwlwifi/dvm/sta.c b/drivers/net/wireless/iwlwifi/dvm/sta.c
index 9cdd91cdf661..6ec86adbe4a1 100644
--- a/drivers/net/wireless/iwlwifi/dvm/sta.c
+++ b/drivers/net/wireless/iwlwifi/dvm/sta.c
@@ -39,7 +39,7 @@ static int iwl_sta_ucode_activate(struct iwl_priv *priv, u8 sta_id)
lockdep_assert_held(&priv->sta_lock);
if (sta_id >= IWLAGN_STATION_COUNT) {
- IWL_ERR(priv, "invalid sta_id %u", sta_id);
+ IWL_ERR(priv, "invalid sta_id %u\n", sta_id);
return -EINVAL;
}
if (!(priv->stations[sta_id].used & IWL_STA_DRIVER_ACTIVE))
@@ -165,7 +165,7 @@ int iwl_send_add_sta(struct iwl_priv *priv,
iwl_free_resp(&cmd);
if (cmd.handler_status)
- IWL_ERR(priv, "%s - error in the CMD response %d", __func__,
+ IWL_ERR(priv, "%s - error in the CMD response %d\n", __func__,
cmd.handler_status);
return cmd.handler_status;
@@ -261,7 +261,7 @@ int iwl_sta_update_ht(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
cmd.station_flags = flags;
cmd.sta.sta_id = sta_id;
- return iwl_send_add_sta(priv, &cmd, CMD_SYNC);
+ return iwl_send_add_sta(priv, &cmd, 0);
}
static void iwl_set_ht_add_station(struct iwl_priv *priv, u8 index,
@@ -413,7 +413,7 @@ int iwl_add_station_common(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
spin_unlock_bh(&priv->sta_lock);
/* Add station to device's station table */
- ret = iwl_send_add_sta(priv, &sta_cmd, CMD_SYNC);
+ ret = iwl_send_add_sta(priv, &sta_cmd, 0);
if (ret) {
spin_lock_bh(&priv->sta_lock);
IWL_ERR(priv, "Adding station %pM failed.\n",
@@ -456,7 +456,6 @@ static int iwl_send_remove_station(struct iwl_priv *priv,
struct iwl_host_cmd cmd = {
.id = REPLY_REMOVE_STA,
.len = { sizeof(struct iwl_rem_sta_cmd), },
- .flags = CMD_SYNC,
.data = { &rm_sta_cmd, },
};
@@ -740,7 +739,7 @@ void iwl_restore_stations(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
send_lq = true;
}
spin_unlock_bh(&priv->sta_lock);
- ret = iwl_send_add_sta(priv, &sta_cmd, CMD_SYNC);
+ ret = iwl_send_add_sta(priv, &sta_cmd, 0);
if (ret) {
spin_lock_bh(&priv->sta_lock);
IWL_ERR(priv, "Adding station %pM failed.\n",
@@ -756,8 +755,7 @@ void iwl_restore_stations(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
* current LQ command
*/
if (send_lq)
- iwl_send_lq_cmd(priv, ctx, &lq,
- CMD_SYNC, true);
+ iwl_send_lq_cmd(priv, ctx, &lq, 0, true);
spin_lock_bh(&priv->sta_lock);
priv->stations[i].used &= ~IWL_STA_UCODE_INPROGRESS;
}
@@ -968,7 +966,7 @@ int iwlagn_add_bssid_station(struct iwl_priv *priv,
return -ENOMEM;
}
- ret = iwl_send_lq_cmd(priv, ctx, link_cmd, CMD_SYNC, true);
+ ret = iwl_send_lq_cmd(priv, ctx, link_cmd, 0, true);
if (ret)
IWL_ERR(priv, "Link quality command failed (%d)\n", ret);
@@ -999,7 +997,6 @@ static int iwl_send_static_wepkey_cmd(struct iwl_priv *priv,
struct iwl_host_cmd cmd = {
.id = ctx->wep_key_cmd,
.data = { wep_cmd, },
- .flags = CMD_SYNC,
};
might_sleep();
@@ -1248,7 +1245,7 @@ int iwl_remove_dynamic_key(struct iwl_priv *priv,
sta_cmd.sta.modify_mask = STA_MODIFY_KEY_MASK;
sta_cmd.mode = STA_CONTROL_MODIFY_MSK;
- return iwl_send_add_sta(priv, &sta_cmd, CMD_SYNC);
+ return iwl_send_add_sta(priv, &sta_cmd, 0);
}
int iwl_set_dynamic_key(struct iwl_priv *priv,
@@ -1284,13 +1281,13 @@ int iwl_set_dynamic_key(struct iwl_priv *priv,
ieee80211_get_key_rx_seq(keyconf, 0, &seq);
ieee80211_get_tkip_rx_p1k(keyconf, addr, seq.tkip.iv32, p1k);
ret = iwlagn_send_sta_key(priv, keyconf, sta_id,
- seq.tkip.iv32, p1k, CMD_SYNC);
+ seq.tkip.iv32, p1k, 0);
break;
case WLAN_CIPHER_SUITE_CCMP:
case WLAN_CIPHER_SUITE_WEP40:
case WLAN_CIPHER_SUITE_WEP104:
ret = iwlagn_send_sta_key(priv, keyconf, sta_id,
- 0, NULL, CMD_SYNC);
+ 0, NULL, 0);
break;
default:
IWL_ERR(priv, "Unknown cipher %x\n", keyconf->cipher);
@@ -1409,7 +1406,7 @@ int iwl_sta_tx_modify_enable_tid(struct iwl_priv *priv, int sta_id, int tid)
memcpy(&sta_cmd, &priv->stations[sta_id].sta, sizeof(struct iwl_addsta_cmd));
spin_unlock_bh(&priv->sta_lock);
- return iwl_send_add_sta(priv, &sta_cmd, CMD_SYNC);
+ return iwl_send_add_sta(priv, &sta_cmd, 0);
}
int iwl_sta_rx_agg_start(struct iwl_priv *priv, struct ieee80211_sta *sta,
@@ -1433,7 +1430,7 @@ int iwl_sta_rx_agg_start(struct iwl_priv *priv, struct ieee80211_sta *sta,
memcpy(&sta_cmd, &priv->stations[sta_id].sta, sizeof(struct iwl_addsta_cmd));
spin_unlock_bh(&priv->sta_lock);
- return iwl_send_add_sta(priv, &sta_cmd, CMD_SYNC);
+ return iwl_send_add_sta(priv, &sta_cmd, 0);
}
int iwl_sta_rx_agg_stop(struct iwl_priv *priv, struct ieee80211_sta *sta,
@@ -1458,7 +1455,7 @@ int iwl_sta_rx_agg_stop(struct iwl_priv *priv, struct ieee80211_sta *sta,
memcpy(&sta_cmd, &priv->stations[sta_id].sta, sizeof(struct iwl_addsta_cmd));
spin_unlock_bh(&priv->sta_lock);
- return iwl_send_add_sta(priv, &sta_cmd, CMD_SYNC);
+ return iwl_send_add_sta(priv, &sta_cmd, 0);
}
diff --git a/drivers/net/wireless/iwlwifi/dvm/tt.c b/drivers/net/wireless/iwlwifi/dvm/tt.c
index 058c5892c427..acb981a0a0aa 100644
--- a/drivers/net/wireless/iwlwifi/dvm/tt.c
+++ b/drivers/net/wireless/iwlwifi/dvm/tt.c
@@ -236,7 +236,7 @@ static void iwl_prepare_ct_kill_task(struct iwl_priv *priv)
{
IWL_DEBUG_TEMP(priv, "Prepare to enter IWL_TI_CT_KILL\n");
/* make request to retrieve statistics information */
- iwl_send_statistics_request(priv, CMD_SYNC, false);
+ iwl_send_statistics_request(priv, 0, false);
/* Reschedule the ct_kill wait timer */
mod_timer(&priv->thermal_throttle.ct_kill_waiting_tm,
jiffies + msecs_to_jiffies(CT_KILL_WAITING_DURATION));
diff --git a/drivers/net/wireless/iwlwifi/dvm/tx.c b/drivers/net/wireless/iwlwifi/dvm/tx.c
index 398dd096674c..3255a1723d17 100644
--- a/drivers/net/wireless/iwlwifi/dvm/tx.c
+++ b/drivers/net/wireless/iwlwifi/dvm/tx.c
@@ -402,10 +402,10 @@ int iwlagn_tx_skb(struct iwl_priv *priv,
/* aggregation is on for this <sta,tid> */
if (info->flags & IEEE80211_TX_CTL_AMPDU &&
tid_data->agg.state != IWL_AGG_ON) {
- IWL_ERR(priv, "TX_CTL_AMPDU while not in AGG:"
- " Tx flags = 0x%08x, agg.state = %d",
+ IWL_ERR(priv,
+ "TX_CTL_AMPDU while not in AGG: Tx flags = 0x%08x, agg.state = %d\n",
info->flags, tid_data->agg.state);
- IWL_ERR(priv, "sta_id = %d, tid = %d seq_num = %d",
+ IWL_ERR(priv, "sta_id = %d, tid = %d seq_num = %d\n",
sta_id, tid,
IEEE80211_SEQ_TO_SN(tid_data->seq_number));
goto drop_unlock_sta;
@@ -416,7 +416,7 @@ int iwlagn_tx_skb(struct iwl_priv *priv,
*/
if (WARN_ONCE(tid_data->agg.state != IWL_AGG_ON &&
tid_data->agg.state != IWL_AGG_OFF,
- "Tx while agg.state = %d", tid_data->agg.state))
+ "Tx while agg.state = %d\n", tid_data->agg.state))
goto drop_unlock_sta;
seq_number = tid_data->seq_number;
@@ -778,8 +778,8 @@ static void iwlagn_check_ratid_empty(struct iwl_priv *priv, int sta_id, u8 tid)
/* There are no packets for this RA / TID in the HW any more */
if (tid_data->agg.ssn == tid_data->next_reclaimed) {
IWL_DEBUG_TX_QUEUES(priv,
- "Can continue DELBA flow ssn = next_recl ="
- " %d", tid_data->next_reclaimed);
+ "Can continue DELBA flow ssn = next_recl = %d\n",
+ tid_data->next_reclaimed);
iwl_trans_txq_disable(priv->trans,
tid_data->agg.txq_id);
iwlagn_dealloc_agg_txq(priv, tid_data->agg.txq_id);
@@ -791,8 +791,8 @@ static void iwlagn_check_ratid_empty(struct iwl_priv *priv, int sta_id, u8 tid)
/* There are no packets for this RA / TID in the HW any more */
if (tid_data->agg.ssn == tid_data->next_reclaimed) {
IWL_DEBUG_TX_QUEUES(priv,
- "Can continue ADDBA flow ssn = next_recl ="
- " %d", tid_data->next_reclaimed);
+ "Can continue ADDBA flow ssn = next_recl = %d\n",
+ tid_data->next_reclaimed);
tid_data->agg.state = IWL_AGG_STARTING;
ieee80211_start_tx_ba_cb_irqsafe(vif, addr, tid);
}
@@ -1216,8 +1216,8 @@ int iwlagn_rx_reply_tx(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb,
ctx->vif->type == NL80211_IFTYPE_STATION) {
/* block and stop all queues */
priv->passive_no_rx = true;
- IWL_DEBUG_TX_QUEUES(priv, "stop all queues: "
- "passive channel");
+ IWL_DEBUG_TX_QUEUES(priv,
+ "stop all queues: passive channel\n");
ieee80211_stop_queues(priv->hw);
IWL_DEBUG_TX_REPLY(priv,
@@ -1271,7 +1271,7 @@ int iwlagn_rx_reply_tx(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb,
while (!skb_queue_empty(&skbs)) {
skb = __skb_dequeue(&skbs);
- ieee80211_tx_status_ni(priv->hw, skb);
+ ieee80211_tx_status(priv->hw, skb);
}
return 0;
@@ -1411,7 +1411,7 @@ int iwlagn_rx_reply_compressed_ba(struct iwl_priv *priv,
while (!skb_queue_empty(&reclaimed_skbs)) {
skb = __skb_dequeue(&reclaimed_skbs);
- ieee80211_tx_status_ni(priv->hw, skb);
+ ieee80211_tx_status(priv->hw, skb);
}
return 0;
diff --git a/drivers/net/wireless/iwlwifi/dvm/ucode.c b/drivers/net/wireless/iwlwifi/dvm/ucode.c
index cf03ef5619d9..d5cee1530597 100644
--- a/drivers/net/wireless/iwlwifi/dvm/ucode.c
+++ b/drivers/net/wireless/iwlwifi/dvm/ucode.c
@@ -172,7 +172,7 @@ static int iwl_send_wimax_coex(struct iwl_priv *priv)
memset(&coex_cmd, 0, sizeof(coex_cmd));
return iwl_dvm_send_cmd_pdu(priv,
- COEX_PRIORITY_TABLE_CMD, CMD_SYNC,
+ COEX_PRIORITY_TABLE_CMD, 0,
sizeof(coex_cmd), &coex_cmd);
}
@@ -205,7 +205,7 @@ void iwl_send_prio_tbl(struct iwl_priv *priv)
memcpy(prio_tbl_cmd.prio_tbl, iwl_bt_prio_tbl,
sizeof(iwl_bt_prio_tbl));
if (iwl_dvm_send_cmd_pdu(priv,
- REPLY_BT_COEX_PRIO_TABLE, CMD_SYNC,
+ REPLY_BT_COEX_PRIO_TABLE, 0,
sizeof(prio_tbl_cmd), &prio_tbl_cmd))
IWL_ERR(priv, "failed to send BT prio tbl command\n");
}
@@ -218,7 +218,7 @@ int iwl_send_bt_env(struct iwl_priv *priv, u8 action, u8 type)
env_cmd.action = action;
env_cmd.type = type;
ret = iwl_dvm_send_cmd_pdu(priv,
- REPLY_BT_COEX_PROT_ENV, CMD_SYNC,
+ REPLY_BT_COEX_PROT_ENV, 0,
sizeof(env_cmd), &env_cmd);
if (ret)
IWL_ERR(priv, "failed to send BT env command\n");
diff --git a/drivers/net/wireless/iwlwifi/iwl-1000.c b/drivers/net/wireless/iwlwifi/iwl-1000.c
index 854ba84ccb73..c3817fae16c0 100644
--- a/drivers/net/wireless/iwlwifi/iwl-1000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-1000.c
@@ -62,6 +62,7 @@ static const struct iwl_base_params iwl1000_base_params = {
.led_compensation = 51,
.wd_timeout = IWL_WATCHDOG_DISABLED,
.max_event_log_size = 128,
+ .scd_chain_ext_wa = true,
};
static const struct iwl_ht_params iwl1000_ht_params = {
diff --git a/drivers/net/wireless/iwlwifi/iwl-2000.c b/drivers/net/wireless/iwlwifi/iwl-2000.c
index 3e63323637f3..21e5d0843a62 100644
--- a/drivers/net/wireless/iwlwifi/iwl-2000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-2000.c
@@ -75,6 +75,7 @@ static const struct iwl_base_params iwl2000_base_params = {
.wd_timeout = IWL_DEF_WD_TIMEOUT,
.max_event_log_size = 512,
.shadow_reg_enable = false, /* TODO: fix bugs using this feature */
+ .scd_chain_ext_wa = true,
};
@@ -88,6 +89,7 @@ static const struct iwl_base_params iwl2030_base_params = {
.wd_timeout = IWL_LONG_WD_TIMEOUT,
.max_event_log_size = 512,
.shadow_reg_enable = false, /* TODO: fix bugs using this feature */
+ .scd_chain_ext_wa = true,
};
static const struct iwl_ht_params iwl2000_ht_params = {
diff --git a/drivers/net/wireless/iwlwifi/iwl-5000.c b/drivers/net/wireless/iwlwifi/iwl-5000.c
index 6674f2c4541c..332bbede39e5 100644
--- a/drivers/net/wireless/iwlwifi/iwl-5000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-5000.c
@@ -61,6 +61,7 @@ static const struct iwl_base_params iwl5000_base_params = {
.led_compensation = 51,
.wd_timeout = IWL_WATCHDOG_DISABLED,
.max_event_log_size = 512,
+ .scd_chain_ext_wa = true,
};
static const struct iwl_ht_params iwl5000_ht_params = {
diff --git a/drivers/net/wireless/iwlwifi/iwl-6000.c b/drivers/net/wireless/iwlwifi/iwl-6000.c
index 8048de90233f..8f2c3c8c6b84 100644
--- a/drivers/net/wireless/iwlwifi/iwl-6000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-6000.c
@@ -85,6 +85,7 @@ static const struct iwl_base_params iwl6000_base_params = {
.wd_timeout = IWL_DEF_WD_TIMEOUT,
.max_event_log_size = 512,
.shadow_reg_enable = false, /* TODO: fix bugs using this feature */
+ .scd_chain_ext_wa = true,
};
static const struct iwl_base_params iwl6050_base_params = {
@@ -97,6 +98,7 @@ static const struct iwl_base_params iwl6050_base_params = {
.wd_timeout = IWL_DEF_WD_TIMEOUT,
.max_event_log_size = 1024,
.shadow_reg_enable = false, /* TODO: fix bugs using this feature */
+ .scd_chain_ext_wa = true,
};
static const struct iwl_base_params iwl6000_g2_base_params = {
@@ -109,6 +111,7 @@ static const struct iwl_base_params iwl6000_g2_base_params = {
.wd_timeout = IWL_LONG_WD_TIMEOUT,
.max_event_log_size = 512,
.shadow_reg_enable = false, /* TODO: fix bugs using this feature */
+ .scd_chain_ext_wa = true,
};
static const struct iwl_ht_params iwl6000_ht_params = {
diff --git a/drivers/net/wireless/iwlwifi/iwl-7000.c b/drivers/net/wireless/iwlwifi/iwl-7000.c
index 4c2d4ef28b22..48730064da73 100644
--- a/drivers/net/wireless/iwlwifi/iwl-7000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-7000.c
@@ -71,12 +71,12 @@
#define IWL3160_UCODE_API_MAX 9
/* Oldest version we won't warn about */
-#define IWL7260_UCODE_API_OK 8
-#define IWL3160_UCODE_API_OK 8
+#define IWL7260_UCODE_API_OK 9
+#define IWL3160_UCODE_API_OK 9
/* Lowest firmware API version supported */
-#define IWL7260_UCODE_API_MIN 7
-#define IWL3160_UCODE_API_MIN 7
+#define IWL7260_UCODE_API_MIN 8
+#define IWL3160_UCODE_API_MIN 8
/* NVM versions */
#define IWL7260_NVM_VERSION 0x0a1d
@@ -98,7 +98,7 @@
#define NVM_HW_SECTION_NUM_FAMILY_7000 0
static const struct iwl_base_params iwl7000_base_params = {
- .eeprom_size = OTP_LOW_IMAGE_SIZE,
+ .eeprom_size = OTP_LOW_IMAGE_SIZE_FAMILY_7000,
.num_of_queues = IWLAGN_NUM_QUEUES,
.pll_cfg_val = 0,
.shadow_ram_support = true,
@@ -107,6 +107,7 @@ static const struct iwl_base_params iwl7000_base_params = {
.max_event_log_size = 512,
.shadow_reg_enable = true,
.pcie_l1_allowed = true,
+ .apmg_wake_up_wa = true,
};
static const struct iwl_ht_params iwl7000_ht_params = {
diff --git a/drivers/net/wireless/iwlwifi/iwl-8000.c b/drivers/net/wireless/iwlwifi/iwl-8000.c
index f5bd82b88592..51c41531d81d 100644
--- a/drivers/net/wireless/iwlwifi/iwl-8000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-8000.c
@@ -83,9 +83,10 @@
#define IWL8000_MODULE_FIRMWARE(api) IWL8000_FW_PRE __stringify(api) ".ucode"
#define NVM_HW_SECTION_NUM_FAMILY_8000 10
+#define DEFAULT_NVM_FILE_FAMILY_8000 "iwl_nvm_8000.bin"
static const struct iwl_base_params iwl8000_base_params = {
- .eeprom_size = OTP_LOW_IMAGE_SIZE,
+ .eeprom_size = OTP_LOW_IMAGE_SIZE_FAMILY_8000,
.num_of_queues = IWLAGN_NUM_QUEUES,
.pll_cfg_val = 0,
.shadow_ram_support = true,
@@ -118,6 +119,7 @@ const struct iwl_cfg iwl8260_2ac_cfg = {
.ht_params = &iwl8000_ht_params,
.nvm_ver = IWL8000_NVM_VERSION,
.nvm_calib_ver = IWL8000_TX_POWER_VERSION,
+ .default_nvm_file = DEFAULT_NVM_FILE_FAMILY_8000,
};
const struct iwl_cfg iwl8260_n_cfg = {
@@ -127,6 +129,7 @@ const struct iwl_cfg iwl8260_n_cfg = {
.ht_params = &iwl8000_ht_params,
.nvm_ver = IWL8000_NVM_VERSION,
.nvm_calib_ver = IWL8000_TX_POWER_VERSION,
+ .default_nvm_file = DEFAULT_NVM_FILE_FAMILY_8000,
};
MODULE_FIRMWARE(IWL8000_MODULE_FIRMWARE(IWL8000_UCODE_API_OK));
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-hw.h b/drivers/net/wireless/iwlwifi/iwl-agn-hw.h
index 7f37fb86837b..04a483d38659 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-hw.h
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-hw.h
@@ -102,9 +102,7 @@
/* EEPROM */
#define IWLAGN_EEPROM_IMG_SIZE 2048
-/* OTP */
-/* lower blocks contain EEPROM image and calibration data */
-#define OTP_LOW_IMAGE_SIZE (2 * 512 * sizeof(u16)) /* 2 KB */
+
/* high blocks contain PAPD data */
#define OTP_HIGH_IMAGE_SIZE_6x00 (6 * 512 * sizeof(u16)) /* 6 KB */
#define OTP_HIGH_IMAGE_SIZE_1000 (0x200 * sizeof(u16)) /* 1024 bytes */
diff --git a/drivers/net/wireless/iwlwifi/iwl-config.h b/drivers/net/wireless/iwlwifi/iwl-config.h
index 3f17dc3f2c8a..b7047905f41a 100644
--- a/drivers/net/wireless/iwlwifi/iwl-config.h
+++ b/drivers/net/wireless/iwlwifi/iwl-config.h
@@ -146,6 +146,9 @@ static inline u8 num_of_ant(u8 mask)
* @wd_timeout: TX queues watchdog timeout
* @max_event_log_size: size of event log buffer size for ucode event logging
* @shadow_reg_enable: HW shadow register support
+ * @apmg_wake_up_wa: should the MAC access REQ be asserted when a command
+ * is in flight. This is due to a HW bug in 7260, 3160 and 7265.
+ * @scd_chain_ext_wa: should the chain extension feature in SCD be disabled.
*/
struct iwl_base_params {
int eeprom_size;
@@ -160,6 +163,8 @@ struct iwl_base_params {
u32 max_event_log_size;
const bool shadow_reg_enable;
const bool pcie_l1_allowed;
+ const bool apmg_wake_up_wa;
+ const bool scd_chain_ext_wa;
};
/*
@@ -188,6 +193,11 @@ struct iwl_ht_params {
#define EEPROM_6000_REG_BAND_24_HT40_CHANNELS 0x80
#define EEPROM_REGULATORY_BAND_NO_HT40 0
+/* lower blocks contain EEPROM image and calibration data */
+#define OTP_LOW_IMAGE_SIZE (2 * 512 * sizeof(u16)) /* 2 KB */
+#define OTP_LOW_IMAGE_SIZE_FAMILY_7000 (16 * 512 * sizeof(u16)) /* 16 KB */
+#define OTP_LOW_IMAGE_SIZE_FAMILY_8000 (32 * 512 * sizeof(u16)) /* 32 KB */
+
struct iwl_eeprom_params {
const u8 regulatory_bands[7];
bool enhanced_txpower;
@@ -264,6 +274,8 @@ struct iwl_cfg {
u8 nvm_hw_section_num;
bool lp_xtal_workaround;
const struct iwl_pwr_tx_backoff *pwr_tx_backoffs;
+ bool no_power_up_nic_in_init;
+ const char *default_nvm_file;
};
/*
diff --git a/drivers/net/wireless/iwlwifi/iwl-debug.c b/drivers/net/wireless/iwlwifi/iwl-debug.c
index 8a44f594528d..09feff4fa226 100644
--- a/drivers/net/wireless/iwlwifi/iwl-debug.c
+++ b/drivers/net/wireless/iwlwifi/iwl-debug.c
@@ -61,8 +61,6 @@
*
*****************************************************************************/
-#define DEBUG
-
#include <linux/device.h>
#include <linux/interrupt.h>
#include <linux/export.h>
@@ -128,8 +126,8 @@ void __iwl_dbg(struct device *dev,
#ifdef CONFIG_IWLWIFI_DEBUG
if (iwl_have_debug_level(level) &&
(!limit || net_ratelimit()))
- dev_dbg(dev, "%c %s %pV", in_interrupt() ? 'I' : 'U',
- function, &vaf);
+ dev_printk(KERN_DEBUG, dev, "%c %s %pV",
+ in_interrupt() ? 'I' : 'U', function, &vaf);
#endif
trace_iwlwifi_dbg(level, in_interrupt(), function, &vaf);
va_end(args);
diff --git a/drivers/net/wireless/iwlwifi/iwl-debug.h b/drivers/net/wireless/iwlwifi/iwl-debug.h
index c8cbdbe15924..295083510e72 100644
--- a/drivers/net/wireless/iwlwifi/iwl-debug.h
+++ b/drivers/net/wireless/iwlwifi/iwl-debug.h
@@ -47,12 +47,32 @@ void __iwl_warn(struct device *dev, const char *fmt, ...) __printf(2, 3);
void __iwl_info(struct device *dev, const char *fmt, ...) __printf(2, 3);
void __iwl_crit(struct device *dev, const char *fmt, ...) __printf(2, 3);
+/* not all compilers can evaluate strlen() at compile time, so use sizeof() */
+#define CHECK_FOR_NEWLINE(f) BUILD_BUG_ON(f[sizeof(f) - 2] != '\n')
+
/* No matter what is m (priv, bus, trans), this will work */
-#define IWL_ERR(m, f, a...) __iwl_err((m)->dev, false, false, f, ## a)
-#define IWL_ERR_DEV(d, f, a...) __iwl_err((d), false, false, f, ## a)
-#define IWL_WARN(m, f, a...) __iwl_warn((m)->dev, f, ## a)
-#define IWL_INFO(m, f, a...) __iwl_info((m)->dev, f, ## a)
-#define IWL_CRIT(m, f, a...) __iwl_crit((m)->dev, f, ## a)
+#define IWL_ERR_DEV(d, f, a...) \
+ do { \
+ CHECK_FOR_NEWLINE(f); \
+ __iwl_err((d), false, false, f, ## a); \
+ } while (0)
+#define IWL_ERR(m, f, a...) \
+ IWL_ERR_DEV((m)->dev, f, ## a)
+#define IWL_WARN(m, f, a...) \
+ do { \
+ CHECK_FOR_NEWLINE(f); \
+ __iwl_warn((m)->dev, f, ## a); \
+ } while (0)
+#define IWL_INFO(m, f, a...) \
+ do { \
+ CHECK_FOR_NEWLINE(f); \
+ __iwl_info((m)->dev, f, ## a); \
+ } while (0)
+#define IWL_CRIT(m, f, a...) \
+ do { \
+ CHECK_FOR_NEWLINE(f); \
+ __iwl_crit((m)->dev, f, ## a); \
+ } while (0)
#if defined(CONFIG_IWLWIFI_DEBUG) || defined(CONFIG_IWLWIFI_DEVICE_TRACING)
void __iwl_dbg(struct device *dev,
@@ -72,12 +92,17 @@ do { \
DUMP_PREFIX_OFFSET, 16, 1, p, len, 1); \
} while (0)
+#define __IWL_DEBUG_DEV(dev, level, limit, fmt, args...) \
+ do { \
+ CHECK_FOR_NEWLINE(fmt); \
+ __iwl_dbg(dev, level, limit, __func__, fmt, ##args); \
+ } while (0)
#define IWL_DEBUG(m, level, fmt, args...) \
- __iwl_dbg((m)->dev, level, false, __func__, fmt, ##args)
+ __IWL_DEBUG_DEV((m)->dev, level, false, fmt, ##args)
#define IWL_DEBUG_DEV(dev, level, fmt, args...) \
- __iwl_dbg((dev), level, false, __func__, fmt, ##args)
+ __IWL_DEBUG_DEV(dev, level, false, fmt, ##args)
#define IWL_DEBUG_LIMIT(m, level, fmt, args...) \
- __iwl_dbg((m)->dev, level, true, __func__, fmt, ##args)
+ __IWL_DEBUG_DEV((m)->dev, level, true, fmt, ##args)
#ifdef CONFIG_IWLWIFI_DEBUG
#define iwl_print_hex_dump(m, level, p, len) \
diff --git a/drivers/net/wireless/iwlwifi/iwl-drv.c b/drivers/net/wireless/iwlwifi/iwl-drv.c
index 0a3e841b44a9..f2a5c12269a3 100644
--- a/drivers/net/wireless/iwlwifi/iwl-drv.c
+++ b/drivers/net/wireless/iwlwifi/iwl-drv.c
@@ -1243,6 +1243,7 @@ struct iwl_mod_params iwlwifi_mod_params = {
.bt_coex_active = true,
.power_level = IWL_POWER_INDEX_1,
.wd_disable = true,
+ .uapsd_disable = false,
/* the rest are 0 by default */
};
IWL_EXPORT_SYMBOL(iwlwifi_mod_params);
@@ -1356,6 +1357,10 @@ MODULE_PARM_DESC(wd_disable,
module_param_named(nvm_file, iwlwifi_mod_params.nvm_file, charp, S_IRUGO);
MODULE_PARM_DESC(nvm_file, "NVM file name");
+module_param_named(uapsd_disable, iwlwifi_mod_params.uapsd_disable,
+ bool, S_IRUGO);
+MODULE_PARM_DESC(uapsd_disable, "disable U-APSD functionality (default: N)");
+
/*
* set bt_coex_active to true, uCode will do kill/defer
* every time the priority line is asserted (BT is sending signals on the
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-error-dump.h b/drivers/net/wireless/iwlwifi/iwl-fw-error-dump.h
index 58c8941c0d95..2953ffceda38 100644
--- a/drivers/net/wireless/iwlwifi/mvm/fw-error-dump.h
+++ b/drivers/net/wireless/iwlwifi/iwl-fw-error-dump.h
@@ -71,10 +71,15 @@
* enum iwl_fw_error_dump_type - types of data in the dump file
* @IWL_FW_ERROR_DUMP_SRAM:
* @IWL_FW_ERROR_DUMP_REG:
+ * @IWL_FW_ERROR_DUMP_RXF:
+ * @IWL_FW_ERROR_DUMP_TXCMD: last TX command data, structured as
+ * &struct iwl_fw_error_dump_txcmd packets
*/
enum iwl_fw_error_dump_type {
IWL_FW_ERROR_DUMP_SRAM = 0,
IWL_FW_ERROR_DUMP_REG = 1,
+ IWL_FW_ERROR_DUMP_RXF = 2,
+ IWL_FW_ERROR_DUMP_TXCMD = 3,
IWL_FW_ERROR_DUMP_MAX,
};
@@ -89,7 +94,7 @@ struct iwl_fw_error_dump_data {
__le32 type;
__le32 len;
__u8 data[];
-} __packed __aligned(4);
+} __packed;
/**
* struct iwl_fw_error_dump_file - the layout of the header of the file
@@ -101,6 +106,29 @@ struct iwl_fw_error_dump_file {
__le32 barker;
__le32 file_len;
u8 data[0];
-} __packed __aligned(4);
+} __packed;
+
+/**
+ * struct iwl_fw_error_dump_txcmd - TX command data
+ * @cmdlen: original length of command
+ * @caplen: captured length of command (may be less)
+ * @data: captured command data, @caplen bytes
+ */
+struct iwl_fw_error_dump_txcmd {
+ __le32 cmdlen;
+ __le32 caplen;
+ u8 data[];
+} __packed;
+
+/**
+ * iwl_mvm_fw_error_next_data - advance fw error dump data pointer
+ * @data: previous data block
+ * Returns: next data block
+ */
+static inline struct iwl_fw_error_dump_data *
+iwl_mvm_fw_error_next_data(struct iwl_fw_error_dump_data *data)
+{
+ return (void *)(data->data + le32_to_cpu(data->len));
+}
#endif /* __fw_error_dump_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-fw.h b/drivers/net/wireless/iwlwifi/iwl-fw.h
index d14f19339d61..0aa7c0085c9f 100644
--- a/drivers/net/wireless/iwlwifi/iwl-fw.h
+++ b/drivers/net/wireless/iwlwifi/iwl-fw.h
@@ -74,29 +74,24 @@
* @IWL_UCODE_TLV_FLAGS_MFP: This uCode image supports MFP (802.11w).
* @IWL_UCODE_TLV_FLAGS_P2P: This uCode image supports P2P.
* @IWL_UCODE_TLV_FLAGS_DW_BC_TABLE: The SCD byte count table is in DWORDS
- * @IWL_UCODE_TLV_FLAGS_UAPSD: This uCode image supports uAPSD
+ * @IWL_UCODE_TLV_FLAGS_UAPSD_SUPPORT: This uCode image supports uAPSD
* @IWL_UCODE_TLV_FLAGS_SHORT_BL: 16 entries of black list instead of 64 in scan
* offload profile config command.
- * @IWL_UCODE_TLV_FLAGS_RX_ENERGY_API: supports rx signal strength api
- * @IWL_UCODE_TLV_FLAGS_TIME_EVENT_API_V2: using the new time event API.
* @IWL_UCODE_TLV_FLAGS_D3_6_IPV6_ADDRS: D3 image supports up to six
* (rather than two) IPv6 addresses
- * @IWL_UCODE_TLV_FLAGS_BF_UPDATED: new beacon filtering API
* @IWL_UCODE_TLV_FLAGS_NO_BASIC_SSID: not sending a probe with the SSID element
* from the probe request template.
- * @IWL_UCODE_TLV_FLAGS_D3_CONTINUITY_API: modified D3 API to allow keeping
- * connection when going back to D0
* @IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_SMALL: new NS offload (small version)
* @IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_LARGE: new NS offload (large version)
- * @IWL_UCODE_TLV_FLAGS_SCHED_SCAN: this uCode image supports scheduled scan.
- * @IWL_UCODE_TLV_FLAGS_STA_KEY_CMD: new ADD_STA and ADD_STA_KEY command API
- * @IWL_UCODE_TLV_FLAGS_DEVICE_PS_CMD: support device wide power command
- * containing CAM (Continuous Active Mode) indication.
+ * @IWL_UCODE_TLV_FLAGS_P2P_PM: P2P client supports PM as a stand alone MAC
* @IWL_UCODE_TLV_FLAGS_P2P_BSS_PS_DCM: support power save on BSS station and
* P2P client interfaces simultaneously if they are in different bindings.
+ * @IWL_UCODE_TLV_FLAGS_P2P_BSS_PS_SCM: support power save on BSS station and
+ * P2P client interfaces simultaneously if they are in same bindings.
* @IWL_UCODE_TLV_FLAGS_P2P_PS_UAPSD: P2P client supports uAPSD power save
* @IWL_UCODE_TLV_FLAGS_BCAST_FILTERING: uCode supports broadcast filtering.
* @IWL_UCODE_TLV_FLAGS_GO_UAPSD: AP/GO interfaces support uAPSD clients
+ * @IWL_UCODE_TLV_FLAGS_EBS_SUPPORT: this uCode image supports EBS.
*/
enum iwl_ucode_tlv_flag {
IWL_UCODE_TLV_FLAGS_PAN = BIT(0),
@@ -104,22 +99,16 @@ enum iwl_ucode_tlv_flag {
IWL_UCODE_TLV_FLAGS_MFP = BIT(2),
IWL_UCODE_TLV_FLAGS_P2P = BIT(3),
IWL_UCODE_TLV_FLAGS_DW_BC_TABLE = BIT(4),
- IWL_UCODE_TLV_FLAGS_NEWBT_COEX = BIT(5),
- IWL_UCODE_TLV_FLAGS_PM_CMD_SUPPORT = BIT(6),
IWL_UCODE_TLV_FLAGS_SHORT_BL = BIT(7),
- IWL_UCODE_TLV_FLAGS_RX_ENERGY_API = BIT(8),
- IWL_UCODE_TLV_FLAGS_TIME_EVENT_API_V2 = BIT(9),
IWL_UCODE_TLV_FLAGS_D3_6_IPV6_ADDRS = BIT(10),
- IWL_UCODE_TLV_FLAGS_BF_UPDATED = BIT(11),
IWL_UCODE_TLV_FLAGS_NO_BASIC_SSID = BIT(12),
- IWL_UCODE_TLV_FLAGS_D3_CONTINUITY_API = BIT(14),
IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_SMALL = BIT(15),
IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_LARGE = BIT(16),
- IWL_UCODE_TLV_FLAGS_SCHED_SCAN = BIT(17),
- IWL_UCODE_TLV_FLAGS_STA_KEY_CMD = BIT(19),
- IWL_UCODE_TLV_FLAGS_DEVICE_PS_CMD = BIT(20),
+ IWL_UCODE_TLV_FLAGS_P2P_PM = BIT(21),
IWL_UCODE_TLV_FLAGS_BSS_P2P_PS_DCM = BIT(22),
+ IWL_UCODE_TLV_FLAGS_BSS_P2P_PS_SCM = BIT(23),
IWL_UCODE_TLV_FLAGS_UAPSD_SUPPORT = BIT(24),
+ IWL_UCODE_TLV_FLAGS_EBS_SUPPORT = BIT(25),
IWL_UCODE_TLV_FLAGS_P2P_PS_UAPSD = BIT(26),
IWL_UCODE_TLV_FLAGS_BCAST_FILTERING = BIT(29),
IWL_UCODE_TLV_FLAGS_GO_UAPSD = BIT(30),
@@ -128,9 +117,11 @@ enum iwl_ucode_tlv_flag {
/**
* enum iwl_ucode_tlv_api - ucode api
* @IWL_UCODE_TLV_API_WOWLAN_CONFIG_TID: wowlan config includes tid field.
+ * @IWL_UCODE_TLV_API_CSA_FLOW: ucode can do unbind-bind flow for CSA.
*/
enum iwl_ucode_tlv_api {
IWL_UCODE_TLV_API_WOWLAN_CONFIG_TID = BIT(0),
+ IWL_UCODE_TLV_API_CSA_FLOW = BIT(4),
};
/**
@@ -183,6 +174,7 @@ enum iwl_ucode_sec {
#define IWL_UCODE_SECTION_MAX 12
#define IWL_API_ARRAY_SIZE 1
#define IWL_CAPABILITIES_ARRAY_SIZE 1
+#define CPU1_CPU2_SEPARATOR_SECTION 0xFFFFCCCC
struct iwl_ucode_capabilities {
u32 max_probe_length;
@@ -205,6 +197,11 @@ struct fw_img {
bool is_dual_cpus;
};
+struct iwl_sf_region {
+ u32 addr;
+ u32 size;
+};
+
/* uCode version contains 4 values: Major/Minor/API/Serial */
#define IWL_UCODE_MAJOR(ver) (((ver) & 0xFF000000) >> 24)
#define IWL_UCODE_MINOR(ver) (((ver) & 0x00FF0000) >> 16)
diff --git a/drivers/net/wireless/iwlwifi/iwl-io.c b/drivers/net/wireless/iwlwifi/iwl-io.c
index 44cc3cf45762..5eef4ae7333b 100644
--- a/drivers/net/wireless/iwlwifi/iwl-io.c
+++ b/drivers/net/wireless/iwlwifi/iwl-io.c
@@ -33,6 +33,7 @@
#include "iwl-io.h"
#include "iwl-csr.h"
#include "iwl-debug.h"
+#include "iwl-prph.h"
#include "iwl-fh.h"
#define IWL_POLL_INTERVAL 10 /* microseconds */
@@ -183,6 +184,23 @@ void iwl_clear_bits_prph(struct iwl_trans *trans, u32 ofs, u32 mask)
}
IWL_EXPORT_SYMBOL(iwl_clear_bits_prph);
+void iwl_force_nmi(struct iwl_trans *trans)
+{
+ /*
+ * In HW previous to the 8000 HW family, and in the 8000 HW family
+ * itself when the revision step==0, the DEVICE_SET_NMI_REG is used
+ * to force an NMI. Otherwise, a different register -
+ * DEVICE_SET_NMI_8000B_REG - is used.
+ */
+ if ((trans->cfg->device_family != IWL_DEVICE_FAMILY_8000) ||
+ ((trans->hw_rev & 0xc) == 0x0))
+ iwl_write_prph(trans, DEVICE_SET_NMI_REG, DEVICE_SET_NMI_VAL);
+ else
+ iwl_write_prph(trans, DEVICE_SET_NMI_8000B_REG,
+ DEVICE_SET_NMI_8000B_VAL);
+}
+IWL_EXPORT_SYMBOL(iwl_force_nmi);
+
static const char *get_fh_string(int cmd)
{
#define IWL_CMD(x) case x: return #x
diff --git a/drivers/net/wireless/iwlwifi/iwl-io.h b/drivers/net/wireless/iwlwifi/iwl-io.h
index 665ddd9dbbc4..705d12c079e8 100644
--- a/drivers/net/wireless/iwlwifi/iwl-io.h
+++ b/drivers/net/wireless/iwlwifi/iwl-io.h
@@ -80,6 +80,7 @@ void iwl_set_bits_prph(struct iwl_trans *trans, u32 ofs, u32 mask);
void iwl_set_bits_mask_prph(struct iwl_trans *trans, u32 ofs,
u32 bits, u32 mask);
void iwl_clear_bits_prph(struct iwl_trans *trans, u32 ofs, u32 mask);
+void iwl_force_nmi(struct iwl_trans *trans);
/* Error handling */
int iwl_dump_fh(struct iwl_trans *trans, char **buf);
diff --git a/drivers/net/wireless/iwlwifi/iwl-modparams.h b/drivers/net/wireless/iwlwifi/iwl-modparams.h
index d994317db85b..d051857729ab 100644
--- a/drivers/net/wireless/iwlwifi/iwl-modparams.h
+++ b/drivers/net/wireless/iwlwifi/iwl-modparams.h
@@ -119,6 +119,7 @@ struct iwl_mod_params {
#endif
int ant_coupling;
char *nvm_file;
+ bool uapsd_disable;
};
#endif /* #__iwl_modparams_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-nvm-parse.c b/drivers/net/wireless/iwlwifi/iwl-nvm-parse.c
index 6be30c698506..85eee79c495c 100644
--- a/drivers/net/wireless/iwlwifi/iwl-nvm-parse.c
+++ b/drivers/net/wireless/iwlwifi/iwl-nvm-parse.c
@@ -62,6 +62,7 @@
#include <linux/types.h>
#include <linux/slab.h>
#include <linux/export.h>
+#include <linux/etherdevice.h>
#include "iwl-drv.h"
#include "iwl-modparams.h"
#include "iwl-nvm-parse.h"
@@ -127,19 +128,20 @@ static const u8 iwl_nvm_channels[] = {
static const u8 iwl_nvm_channels_family_8000[] = {
/* 2.4 GHz */
- 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13,
+ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
/* 5 GHz */
36, 40, 44, 48, 52, 56, 60, 64, 68, 72, 76, 80, 84, 88, 92,
96, 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144,
149, 153, 157, 161, 165, 169, 173, 177, 181
};
-#define IWL_NUM_CHANNELS ARRAY_SIZE(iwl_nvm_channels)
+#define IWL_NUM_CHANNELS ARRAY_SIZE(iwl_nvm_channels)
#define IWL_NUM_CHANNELS_FAMILY_8000 ARRAY_SIZE(iwl_nvm_channels_family_8000)
-#define NUM_2GHZ_CHANNELS 14
-#define FIRST_2GHZ_HT_MINUS 5
-#define LAST_2GHZ_HT_PLUS 9
-#define LAST_5GHZ_HT 161
+#define NUM_2GHZ_CHANNELS 14
+#define NUM_2GHZ_CHANNELS_FAMILY_8000 14
+#define FIRST_2GHZ_HT_MINUS 5
+#define LAST_2GHZ_HT_PLUS 9
+#define LAST_5GHZ_HT 161
#define DEFAULT_MAX_TX_POWER 16
@@ -202,21 +204,23 @@ static int iwl_init_channel_map(struct device *dev, const struct iwl_cfg *cfg,
struct ieee80211_channel *channel;
u16 ch_flags;
bool is_5ghz;
- int num_of_ch;
+ int num_of_ch, num_2ghz_channels;
const u8 *nvm_chan;
if (cfg->device_family != IWL_DEVICE_FAMILY_8000) {
num_of_ch = IWL_NUM_CHANNELS;
nvm_chan = &iwl_nvm_channels[0];
+ num_2ghz_channels = NUM_2GHZ_CHANNELS;
} else {
num_of_ch = IWL_NUM_CHANNELS_FAMILY_8000;
nvm_chan = &iwl_nvm_channels_family_8000[0];
+ num_2ghz_channels = NUM_2GHZ_CHANNELS_FAMILY_8000;
}
for (ch_idx = 0; ch_idx < num_of_ch; ch_idx++) {
ch_flags = __le16_to_cpup(nvm_ch_flags + ch_idx);
- if (ch_idx >= NUM_2GHZ_CHANNELS &&
+ if (ch_idx >= num_2ghz_channels &&
!data->sku_cap_band_52GHz_enable)
ch_flags &= ~NVM_CHANNEL_VALID;
@@ -225,7 +229,7 @@ static int iwl_init_channel_map(struct device *dev, const struct iwl_cfg *cfg,
"Ch. %d Flags %x [%sGHz] - No traffic\n",
nvm_chan[ch_idx],
ch_flags,
- (ch_idx >= NUM_2GHZ_CHANNELS) ?
+ (ch_idx >= num_2ghz_channels) ?
"5.2" : "2.4");
continue;
}
@@ -234,7 +238,7 @@ static int iwl_init_channel_map(struct device *dev, const struct iwl_cfg *cfg,
n_channels++;
channel->hw_value = nvm_chan[ch_idx];
- channel->band = (ch_idx < NUM_2GHZ_CHANNELS) ?
+ channel->band = (ch_idx < num_2ghz_channels) ?
IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ;
channel->center_freq =
ieee80211_channel_to_frequency(
@@ -242,7 +246,7 @@ static int iwl_init_channel_map(struct device *dev, const struct iwl_cfg *cfg,
/* TODO: Need to be dependent to the NVM */
channel->flags = IEEE80211_CHAN_NO_HT40;
- if (ch_idx < NUM_2GHZ_CHANNELS &&
+ if (ch_idx < num_2ghz_channels &&
(ch_flags & NVM_CHANNEL_40MHZ)) {
if (nvm_chan[ch_idx] <= LAST_2GHZ_HT_PLUS)
channel->flags &= ~IEEE80211_CHAN_NO_HT40PLUS;
@@ -250,7 +254,7 @@ static int iwl_init_channel_map(struct device *dev, const struct iwl_cfg *cfg,
channel->flags &= ~IEEE80211_CHAN_NO_HT40MINUS;
} else if (nvm_chan[ch_idx] <= LAST_5GHZ_HT &&
(ch_flags & NVM_CHANNEL_40MHZ)) {
- if ((ch_idx - NUM_2GHZ_CHANNELS) % 2 == 0)
+ if ((ch_idx - num_2ghz_channels) % 2 == 0)
channel->flags &= ~IEEE80211_CHAN_NO_HT40PLUS;
else
channel->flags &= ~IEEE80211_CHAN_NO_HT40MINUS;
@@ -447,13 +451,7 @@ static void iwl_set_hw_address(const struct iwl_cfg *cfg,
struct iwl_nvm_data *data,
const __le16 *nvm_sec)
{
- u8 hw_addr[ETH_ALEN];
-
- if (cfg->device_family != IWL_DEVICE_FAMILY_8000)
- memcpy(hw_addr, nvm_sec + HW_ADDR, ETH_ALEN);
- else
- memcpy(hw_addr, nvm_sec + MAC_ADDRESS_OVERRIDE_FAMILY_8000,
- ETH_ALEN);
+ const u8 *hw_addr = (const u8 *)(nvm_sec + HW_ADDR);
/* The byte order is little endian 16 bit, meaning 214365 */
data->hw_addr[0] = hw_addr[1];
@@ -464,6 +462,41 @@ static void iwl_set_hw_address(const struct iwl_cfg *cfg,
data->hw_addr[5] = hw_addr[4];
}
+static void iwl_set_hw_address_family_8000(const struct iwl_cfg *cfg,
+ struct iwl_nvm_data *data,
+ const __le16 *mac_override,
+ const __le16 *nvm_hw)
+{
+ const u8 *hw_addr;
+
+ if (mac_override) {
+ hw_addr = (const u8 *)(mac_override +
+ MAC_ADDRESS_OVERRIDE_FAMILY_8000);
+
+ /* The byte order is little endian 16 bit, meaning 214365 */
+ data->hw_addr[0] = hw_addr[1];
+ data->hw_addr[1] = hw_addr[0];
+ data->hw_addr[2] = hw_addr[3];
+ data->hw_addr[3] = hw_addr[2];
+ data->hw_addr[4] = hw_addr[5];
+ data->hw_addr[5] = hw_addr[4];
+
+ if (is_valid_ether_addr(hw_addr))
+ return;
+ }
+
+ /* take the MAC address from the OTP */
+ hw_addr = (const u8 *)(nvm_hw + HW_ADDR0_FAMILY_8000);
+ data->hw_addr[0] = hw_addr[3];
+ data->hw_addr[1] = hw_addr[2];
+ data->hw_addr[2] = hw_addr[1];
+ data->hw_addr[3] = hw_addr[0];
+
+ hw_addr = (const u8 *)(nvm_hw + HW_ADDR1_FAMILY_8000);
+ data->hw_addr[4] = hw_addr[1];
+ data->hw_addr[5] = hw_addr[0];
+}
+
struct iwl_nvm_data *
iwl_parse_nvm_data(struct device *dev, const struct iwl_cfg *cfg,
const __le16 *nvm_hw, const __le16 *nvm_sw,
@@ -523,7 +556,7 @@ iwl_parse_nvm_data(struct device *dev, const struct iwl_cfg *cfg,
rx_chains);
} else {
/* MAC address in family 8000 */
- iwl_set_hw_address(cfg, data, mac_override);
+ iwl_set_hw_address_family_8000(cfg, data, mac_override, nvm_hw);
iwl_init_sbands(dev, cfg, data, regulatory,
sku & NVM_SKU_CAP_11AC_ENABLE, tx_chains,
diff --git a/drivers/net/wireless/iwlwifi/iwl-op-mode.h b/drivers/net/wireless/iwlwifi/iwl-op-mode.h
index ea29504ac617..99785c892f96 100644
--- a/drivers/net/wireless/iwlwifi/iwl-op-mode.h
+++ b/drivers/net/wireless/iwlwifi/iwl-op-mode.h
@@ -63,6 +63,7 @@
#ifndef __iwl_op_mode_h__
#define __iwl_op_mode_h__
+#include <linux/netdevice.h>
#include <linux/debugfs.h>
struct iwl_op_mode;
@@ -112,8 +113,11 @@ struct iwl_cfg;
* @stop: stop the op_mode. Must free all the memory allocated.
* May sleep
* @rx: Rx notification to the op_mode. rxb is the Rx buffer itself. Cmd is the
- * HCMD this Rx responds to.
- * This callback may sleep, it is called from a threaded IRQ handler.
+ * HCMD this Rx responds to. Can't sleep.
+ * @napi_add: NAPI initialisation. The transport is fully responsible for NAPI,
+ * but the higher layers need to know about it (in particular mac80211 to
+ * to able to call the right NAPI RX functions); this function is needed
+ * to eventually call netif_napi_add() with higher layer involvement.
* @queue_full: notifies that a HW queue is full.
* Must be atomic and called with BH disabled.
* @queue_not_full: notifies that a HW queue is not full any more.
@@ -143,6 +147,11 @@ struct iwl_op_mode_ops {
void (*stop)(struct iwl_op_mode *op_mode);
int (*rx)(struct iwl_op_mode *op_mode, struct iwl_rx_cmd_buffer *rxb,
struct iwl_device_cmd *cmd);
+ void (*napi_add)(struct iwl_op_mode *op_mode,
+ struct napi_struct *napi,
+ struct net_device *napi_dev,
+ int (*poll)(struct napi_struct *, int),
+ int weight);
void (*queue_full)(struct iwl_op_mode *op_mode, int queue);
void (*queue_not_full)(struct iwl_op_mode *op_mode, int queue);
bool (*hw_rf_kill)(struct iwl_op_mode *op_mode, bool state);
@@ -180,7 +189,6 @@ static inline int iwl_op_mode_rx(struct iwl_op_mode *op_mode,
struct iwl_rx_cmd_buffer *rxb,
struct iwl_device_cmd *cmd)
{
- might_sleep();
return op_mode->ops->rx(op_mode, rxb, cmd);
}
@@ -249,4 +257,15 @@ static inline int iwl_op_mode_exit_d0i3(struct iwl_op_mode *op_mode)
return op_mode->ops->exit_d0i3(op_mode);
}
+static inline void iwl_op_mode_napi_add(struct iwl_op_mode *op_mode,
+ struct napi_struct *napi,
+ struct net_device *napi_dev,
+ int (*poll)(struct napi_struct *, int),
+ int weight)
+{
+ if (!op_mode->ops->napi_add)
+ return;
+ op_mode->ops->napi_add(op_mode, napi, napi_dev, poll, weight);
+}
+
#endif /* __iwl_op_mode_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-phy-db.c b/drivers/net/wireless/iwlwifi/iwl-phy-db.c
index b761ac4822a3..d4fb5cad07ea 100644
--- a/drivers/net/wireless/iwlwifi/iwl-phy-db.c
+++ b/drivers/net/wireless/iwlwifi/iwl-phy-db.c
@@ -345,7 +345,6 @@ static int iwl_send_phy_db_cmd(struct iwl_phy_db *phy_db, u16 type,
struct iwl_phy_db_cmd phy_db_cmd;
struct iwl_host_cmd cmd = {
.id = PHY_DB_CMD,
- .flags = CMD_SYNC,
};
IWL_DEBUG_INFO(phy_db->trans,
@@ -393,13 +392,13 @@ static int iwl_phy_db_send_all_channel_groups(
entry->data);
if (err) {
IWL_ERR(phy_db->trans,
- "Can't SEND phy_db section %d (%d), err %d",
+ "Can't SEND phy_db section %d (%d), err %d\n",
type, i, err);
return err;
}
IWL_DEBUG_INFO(phy_db->trans,
- "Sent PHY_DB HCMD, type = %d num = %d",
+ "Sent PHY_DB HCMD, type = %d num = %d\n",
type, i);
}
@@ -451,7 +450,7 @@ int iwl_send_phy_db_data(struct iwl_phy_db *phy_db)
IWL_NUM_PAPD_CH_GROUPS);
if (err) {
IWL_ERR(phy_db->trans,
- "Cannot send channel specific PAPD groups");
+ "Cannot send channel specific PAPD groups\n");
return err;
}
@@ -461,7 +460,7 @@ int iwl_send_phy_db_data(struct iwl_phy_db *phy_db)
IWL_NUM_TXP_CH_GROUPS);
if (err) {
IWL_ERR(phy_db->trans,
- "Cannot send channel specific TX power groups");
+ "Cannot send channel specific TX power groups\n");
return err;
}
diff --git a/drivers/net/wireless/iwlwifi/iwl-prph.h b/drivers/net/wireless/iwlwifi/iwl-prph.h
index 5f657c501406..4997e27672b3 100644
--- a/drivers/net/wireless/iwlwifi/iwl-prph.h
+++ b/drivers/net/wireless/iwlwifi/iwl-prph.h
@@ -105,6 +105,9 @@
/* Device NMI register */
#define DEVICE_SET_NMI_REG 0x00a01c30
+#define DEVICE_SET_NMI_VAL 0x1
+#define DEVICE_SET_NMI_8000B_REG 0x00a01c24
+#define DEVICE_SET_NMI_8000B_VAL 0x1000000
/* Shared registers (0x0..0x3ff, via target indirect or periphery */
#define SHR_BASE 0x00a10000
@@ -348,4 +351,12 @@ enum secure_load_status_reg {
#define LMPM_SECURE_TIME_OUT (100)
+/* Rx FIFO */
+#define RXF_SIZE_ADDR (0xa00c88)
+#define RXF_SIZE_BYTE_CND_POS (7)
+#define RXF_SIZE_BYTE_CNT_MSK (0x3ff << RXF_SIZE_BYTE_CND_POS)
+
+#define RXF_LD_FENCE_OFFSET_ADDR (0xa00c10)
+#define RXF_FIFO_RD_FENCE_ADDR (0xa00c0c)
+
#endif /* __iwl_prph_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-trans.h b/drivers/net/wireless/iwlwifi/iwl-trans.h
index 8cdb0dd618a6..34d49e171fb4 100644
--- a/drivers/net/wireless/iwlwifi/iwl-trans.h
+++ b/drivers/net/wireless/iwlwifi/iwl-trans.h
@@ -189,10 +189,9 @@ static inline u32 iwl_rx_packet_payload_len(const struct iwl_rx_packet *pkt)
/**
* enum CMD_MODE - how to send the host commands ?
*
- * @CMD_SYNC: The caller will be stalled until the fw responds to the command
* @CMD_ASYNC: Return right away and don't wait for the response
- * @CMD_WANT_SKB: valid only with CMD_SYNC. The caller needs the buffer of the
- * response. The caller needs to call iwl_free_resp when done.
+ * @CMD_WANT_SKB: Not valid with CMD_ASYNC. The caller needs the buffer of
+ * the response. The caller needs to call iwl_free_resp when done.
* @CMD_HIGH_PRIO: The command is high priority - it goes to the front of the
* command queue, but after other high priority commands. valid only
* with CMD_ASYNC.
@@ -202,7 +201,6 @@ static inline u32 iwl_rx_packet_payload_len(const struct iwl_rx_packet *pkt)
* (i.e. mark it as non-idle).
*/
enum CMD_MODE {
- CMD_SYNC = 0,
CMD_ASYNC = BIT(0),
CMD_WANT_SKB = BIT(1),
CMD_SEND_IN_RFKILL = BIT(2),
@@ -427,7 +425,7 @@ struct iwl_trans;
* @send_cmd:send a host command. Must return -ERFKILL if RFkill is asserted.
* If RFkill is asserted in the middle of a SYNC host command, it must
* return -ERFKILL straight away.
- * May sleep only if CMD_SYNC is set
+ * May sleep only if CMD_ASYNC is not set
* @tx: send an skb
* Must be atomic
* @reclaim: free packet until ssn. Returns a list of freed packets.
@@ -437,8 +435,7 @@ struct iwl_trans;
* this one. The op_mode must not configure the HCMD queue. May sleep.
* @txq_disable: de-configure a Tx queue to send AMPDUs
* Must be atomic
- * @wait_tx_queue_empty: wait until all tx queues are empty
- * May sleep
+ * @wait_tx_queue_empty: wait until tx queues are empty. May sleep.
* @dbgfs_register: add the dbgfs files under this directory. Files will be
* automatically deleted.
* @write8: write a u8 to a register at offset ofs from the BAR
@@ -464,6 +461,11 @@ struct iwl_trans;
* @unref: release a reference previously taken with @ref. Note that
* initially the reference count is 1, making an initial @unref
* necessary to allow low power states.
+ * @dump_data: fill a data dump with debug data, maybe containing last
+ * TX'ed commands and similar. When called with a NULL buffer and
+ * zero buffer length, provide only the (estimated) required buffer
+ * length. Return the used buffer length.
+ * Note that the transport must fill in the proper file headers.
*/
struct iwl_trans_ops {
@@ -471,6 +473,8 @@ struct iwl_trans_ops {
void (*op_mode_leave)(struct iwl_trans *iwl_trans);
int (*start_fw)(struct iwl_trans *trans, const struct fw_img *fw,
bool run_in_rfkill);
+ int (*update_sf)(struct iwl_trans *trans,
+ struct iwl_sf_region *st_fwrd_space);
void (*fw_alive)(struct iwl_trans *trans, u32 scd_addr);
void (*stop_device)(struct iwl_trans *trans);
@@ -490,7 +494,7 @@ struct iwl_trans_ops {
void (*txq_disable)(struct iwl_trans *trans, int queue);
int (*dbgfs_register)(struct iwl_trans *trans, struct dentry* dir);
- int (*wait_tx_queue_empty)(struct iwl_trans *trans);
+ int (*wait_tx_queue_empty)(struct iwl_trans *trans, u32 txq_bm);
void (*write8)(struct iwl_trans *trans, u32 ofs, u8 val);
void (*write32)(struct iwl_trans *trans, u32 ofs, u32 val);
@@ -512,6 +516,10 @@ struct iwl_trans_ops {
u32 value);
void (*ref)(struct iwl_trans *trans);
void (*unref)(struct iwl_trans *trans);
+
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+ u32 (*dump_data)(struct iwl_trans *trans, void *buf, u32 buflen);
+#endif
};
/**
@@ -630,6 +638,17 @@ static inline int iwl_trans_start_fw(struct iwl_trans *trans,
return trans->ops->start_fw(trans, fw, run_in_rfkill);
}
+static inline int iwl_trans_update_sf(struct iwl_trans *trans,
+ struct iwl_sf_region *st_fwrd_space)
+{
+ might_sleep();
+
+ if (trans->ops->update_sf)
+ return trans->ops->update_sf(trans, st_fwrd_space);
+
+ return 0;
+}
+
static inline void iwl_trans_stop_device(struct iwl_trans *trans)
{
might_sleep();
@@ -665,6 +684,16 @@ static inline void iwl_trans_unref(struct iwl_trans *trans)
trans->ops->unref(trans);
}
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+static inline u32 iwl_trans_dump_data(struct iwl_trans *trans,
+ void *buf, u32 buflen)
+{
+ if (!trans->ops->dump_data)
+ return 0;
+ return trans->ops->dump_data(trans, buf, buflen);
+}
+#endif
+
static inline int iwl_trans_send_cmd(struct iwl_trans *trans,
struct iwl_host_cmd *cmd)
{
@@ -678,7 +707,7 @@ static inline int iwl_trans_send_cmd(struct iwl_trans *trans,
return -EIO;
if (unlikely(trans->state != IWL_TRANS_FW_ALIVE)) {
- IWL_ERR(trans, "%s bad state = %d", __func__, trans->state);
+ IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state);
return -EIO;
}
@@ -720,7 +749,7 @@ static inline int iwl_trans_tx(struct iwl_trans *trans, struct sk_buff *skb,
return -EIO;
if (unlikely(trans->state != IWL_TRANS_FW_ALIVE))
- IWL_ERR(trans, "%s bad state = %d", __func__, trans->state);
+ IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state);
return trans->ops->tx(trans, skb, dev_cmd, queue);
}
@@ -729,7 +758,7 @@ static inline void iwl_trans_reclaim(struct iwl_trans *trans, int queue,
int ssn, struct sk_buff_head *skbs)
{
if (unlikely(trans->state != IWL_TRANS_FW_ALIVE))
- IWL_ERR(trans, "%s bad state = %d", __func__, trans->state);
+ IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state);
trans->ops->reclaim(trans, queue, ssn, skbs);
}
@@ -746,7 +775,7 @@ static inline void iwl_trans_txq_enable(struct iwl_trans *trans, int queue,
might_sleep();
if (unlikely((trans->state != IWL_TRANS_FW_ALIVE)))
- IWL_ERR(trans, "%s bad state = %d", __func__, trans->state);
+ IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state);
trans->ops->txq_enable(trans, queue, fifo, sta_id, tid,
frame_limit, ssn);
@@ -759,12 +788,13 @@ static inline void iwl_trans_ac_txq_enable(struct iwl_trans *trans, int queue,
IWL_MAX_TID_COUNT, IWL_FRAME_LIMIT, 0);
}
-static inline int iwl_trans_wait_tx_queue_empty(struct iwl_trans *trans)
+static inline int iwl_trans_wait_tx_queue_empty(struct iwl_trans *trans,
+ u32 txq_bm)
{
if (unlikely(trans->state != IWL_TRANS_FW_ALIVE))
- IWL_ERR(trans, "%s bad state = %d", __func__, trans->state);
+ IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state);
- return trans->ops->wait_tx_queue_empty(trans);
+ return trans->ops->wait_tx_queue_empty(trans, txq_bm);
}
static inline int iwl_trans_dbgfs_register(struct iwl_trans *trans,
diff --git a/drivers/net/wireless/iwlwifi/mvm/Makefile b/drivers/net/wireless/iwlwifi/mvm/Makefile
index ccdd3b7c4cce..c30d7f64ec1e 100644
--- a/drivers/net/wireless/iwlwifi/mvm/Makefile
+++ b/drivers/net/wireless/iwlwifi/mvm/Makefile
@@ -3,8 +3,9 @@ iwlmvm-y += fw.o mac80211.o nvm.o ops.o phy-ctxt.o mac-ctxt.o
iwlmvm-y += utils.o rx.o tx.o binding.o quota.o sta.o sf.o
iwlmvm-y += scan.o time-event.o rs.o
iwlmvm-y += power.o coex.o
-iwlmvm-y += led.o tt.o offloading.o
+iwlmvm-y += tt.o offloading.o
iwlmvm-$(CONFIG_IWLWIFI_DEBUGFS) += debugfs.o debugfs-vif.o
+iwlmvm-$(CONFIG_IWLWIFI_LEDS) += led.o
iwlmvm-$(CONFIG_PM_SLEEP) += d3.o
ccflags-y += -D__CHECK_ENDIAN__ -I$(src)/../
diff --git a/drivers/net/wireless/iwlwifi/mvm/coex.c b/drivers/net/wireless/iwlwifi/mvm/coex.c
index 0489314425cb..c8c3b38228f0 100644
--- a/drivers/net/wireless/iwlwifi/mvm/coex.c
+++ b/drivers/net/wireless/iwlwifi/mvm/coex.c
@@ -104,12 +104,9 @@ static const u8 iwl_bt_prio_tbl[BT_COEX_PRIO_TBL_EVT_MAX] = {
#define BT_DISABLE_REDUCED_TXPOWER_THRESHOLD (-65)
#define BT_ANTENNA_COUPLING_THRESHOLD (30)
-int iwl_send_bt_prio_tbl(struct iwl_mvm *mvm)
+static int iwl_send_bt_prio_tbl(struct iwl_mvm *mvm)
{
- if (!(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_NEWBT_COEX))
- return 0;
-
- return iwl_mvm_send_cmd_pdu(mvm, BT_COEX_PRIO_TABLE, CMD_SYNC,
+ return iwl_mvm_send_cmd_pdu(mvm, BT_COEX_PRIO_TABLE, 0,
sizeof(struct iwl_bt_coex_prio_tbl_cmd),
&iwl_bt_prio_tbl);
}
@@ -127,10 +124,10 @@ const u32 iwl_bt_cts_kill_msk[BT_KILL_MSK_MAX] = {
};
static const __le32 iwl_bt_prio_boost[BT_COEX_BOOST_SIZE] = {
- cpu_to_le32(0xf0f0f0f0),
- cpu_to_le32(0xc0c0c0c0),
- cpu_to_le32(0xfcfcfcfc),
- cpu_to_le32(0xff00ff00),
+ cpu_to_le32(0xf0f0f0f0), /* 50% */
+ cpu_to_le32(0xc0c0c0c0), /* 25% */
+ cpu_to_le32(0xfcfcfcfc), /* 75% */
+ cpu_to_le32(0xfefefefe), /* 87.5% */
};
static const __le32 iwl_single_shared_ant[BT_COEX_MAX_LUT][BT_COEX_LUT_SIZE] = {
@@ -303,8 +300,8 @@ static const __le64 iwl_ci_mask[][3] = {
};
static const __le32 iwl_bt_mprio_lut[BT_COEX_MULTI_PRIO_LUT_SIZE] = {
- cpu_to_le32(0x22002200),
- cpu_to_le32(0x33113311),
+ cpu_to_le32(0x28412201),
+ cpu_to_le32(0x11118451),
};
struct corunning_block_luts {
@@ -568,13 +565,13 @@ int iwl_send_bt_init_conf(struct iwl_mvm *mvm)
.id = BT_CONFIG,
.len = { sizeof(*bt_cmd), },
.dataflags = { IWL_HCMD_DFL_NOCOPY, },
- .flags = CMD_SYNC,
};
int ret;
u32 flags;
- if (!(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_NEWBT_COEX))
- return 0;
+ ret = iwl_send_bt_prio_tbl(mvm);
+ if (ret)
+ return ret;
bt_cmd = kzalloc(sizeof(*bt_cmd), GFP_KERNEL);
if (!bt_cmd)
@@ -582,10 +579,12 @@ int iwl_send_bt_init_conf(struct iwl_mvm *mvm)
cmd.data[0] = bt_cmd;
bt_cmd->max_kill = 5;
- bt_cmd->bt4_antenna_isolation_thr = BT_ANTENNA_COUPLING_THRESHOLD,
- bt_cmd->bt4_antenna_isolation = iwlwifi_mod_params.ant_coupling,
- bt_cmd->bt4_tx_tx_delta_freq_thr = 15,
- bt_cmd->bt4_tx_rx_max_freq0 = 15,
+ bt_cmd->bt4_antenna_isolation_thr = BT_ANTENNA_COUPLING_THRESHOLD;
+ bt_cmd->bt4_antenna_isolation = iwlwifi_mod_params.ant_coupling;
+ bt_cmd->bt4_tx_tx_delta_freq_thr = 15;
+ bt_cmd->bt4_tx_rx_max_freq0 = 15;
+ bt_cmd->override_primary_lut = BT_COEX_INVALID_LUT;
+ bt_cmd->override_secondary_lut = BT_COEX_INVALID_LUT;
flags = iwlwifi_mod_params.bt_coex_active ?
BT_COEX_NW : BT_COEX_DISABLE;
@@ -663,7 +662,6 @@ static int iwl_mvm_bt_udpate_ctrl_kill_msk(struct iwl_mvm *mvm,
.data[0] = &bt_cmd,
.len = { sizeof(*bt_cmd), },
.dataflags = { IWL_HCMD_DFL_NOCOPY, },
- .flags = CMD_SYNC,
};
int ret = 0;
@@ -717,7 +715,8 @@ static int iwl_mvm_bt_udpate_ctrl_kill_msk(struct iwl_mvm *mvm,
return ret;
}
-int iwl_mvm_bt_coex_reduced_txp(struct iwl_mvm *mvm, u8 sta_id, bool enable)
+static int iwl_mvm_bt_coex_reduced_txp(struct iwl_mvm *mvm, u8 sta_id,
+ bool enable)
{
struct iwl_bt_coex_cmd *bt_cmd;
/* Send ASYNC since this can be sent from an atomic context */
@@ -735,8 +734,7 @@ int iwl_mvm_bt_coex_reduced_txp(struct iwl_mvm *mvm, u8 sta_id, bool enable)
return 0;
/* nothing to do */
- if (mvmsta->bt_reduced_txpower_dbg ||
- mvmsta->bt_reduced_txpower == enable)
+ if (mvmsta->bt_reduced_txpower == enable)
return 0;
bt_cmd = kzalloc(sizeof(*bt_cmd), GFP_ATOMIC);
@@ -803,23 +801,10 @@ static void iwl_mvm_bt_notif_iterator(void *_data, u8 *mac,
switch (vif->type) {
case NL80211_IFTYPE_STATION:
+ /* Count BSSes vifs */
+ data->num_bss_ifaces++;
/* default smps_mode for BSS / P2P client is AUTOMATIC */
smps_mode = IEEE80211_SMPS_AUTOMATIC;
- data->num_bss_ifaces++;
-
- /*
- * Count unassoc BSSes, relax SMSP constraints
- * and disable reduced Tx Power
- */
- if (!vif->bss_conf.assoc) {
- iwl_mvm_update_smps(mvm, vif, IWL_MVM_SMPS_REQ_BT_COEX,
- smps_mode);
- if (iwl_mvm_bt_coex_reduced_txp(mvm,
- mvmvif->ap_sta_id,
- false))
- IWL_ERR(mvm, "Couldn't send BT_CONFIG cmd\n");
- return;
- }
break;
case NL80211_IFTYPE_AP:
/* default smps_mode for AP / GO is OFF */
@@ -845,8 +830,12 @@ static void iwl_mvm_bt_notif_iterator(void *_data, u8 *mac,
/* ... relax constraints and disable rssi events */
iwl_mvm_update_smps(mvm, vif, IWL_MVM_SMPS_REQ_BT_COEX,
smps_mode);
- if (vif->type == NL80211_IFTYPE_STATION)
+ data->reduced_tx_power = false;
+ if (vif->type == NL80211_IFTYPE_STATION) {
+ iwl_mvm_bt_coex_reduced_txp(mvm, mvmvif->ap_sta_id,
+ false);
iwl_mvm_bt_coex_enable_rssi_event(mvm, vif, false, 0);
+ }
return;
}
@@ -857,6 +846,11 @@ static void iwl_mvm_bt_notif_iterator(void *_data, u8 *mac,
smps_mode = vif->type == NL80211_IFTYPE_AP ?
IEEE80211_SMPS_OFF :
IEEE80211_SMPS_DYNAMIC;
+
+ /* relax SMPS contraints for next association */
+ if (!vif->bss_conf.assoc)
+ smps_mode = IEEE80211_SMPS_AUTOMATIC;
+
IWL_DEBUG_COEX(data->mvm,
"mac %d: bt_status %d bt_activity_grading %d smps_req %d\n",
mvmvif->id, data->notif->bt_status, bt_activity_grading,
@@ -903,22 +897,18 @@ static void iwl_mvm_bt_notif_iterator(void *_data, u8 *mac,
/* if secondary is not NULL, it might be a GO */
data->secondary = chanctx_conf;
- /* don't reduce the Tx power if in loose scheme */
+ /*
+ * don't reduce the Tx power if one of these is true:
+ * we are in LOOSE
+ * single share antenna product
+ * BT is active
+ * we are associated
+ */
if (iwl_get_coex_type(mvm, vif) == BT_COEX_LOOSE_LUT ||
- mvm->cfg->bt_shared_single_ant) {
- data->reduced_tx_power = false;
- iwl_mvm_bt_coex_enable_rssi_event(mvm, vif, false, 0);
- return;
- }
-
- /* reduced Txpower only if BT is on, so ...*/
- if (!data->notif->bt_status) {
- /* ... cancel reduced Tx power ... */
- if (iwl_mvm_bt_coex_reduced_txp(mvm, mvmvif->ap_sta_id, false))
- IWL_ERR(mvm, "Couldn't send BT_CONFIG cmd\n");
+ mvm->cfg->bt_shared_single_ant || !vif->bss_conf.assoc ||
+ !data->notif->bt_status) {
data->reduced_tx_power = false;
-
- /* ... and there is no need to get reports on RSSI any more. */
+ iwl_mvm_bt_coex_reduced_txp(mvm, mvmvif->ap_sta_id, false);
iwl_mvm_bt_coex_enable_rssi_event(mvm, vif, false, 0);
return;
}
@@ -1022,9 +1012,9 @@ static void iwl_mvm_bt_coex_notif_handle(struct iwl_mvm *mvm)
/* Don't spam the fw with the same command over and over */
if (memcmp(&cmd, &mvm->last_bt_ci_cmd, sizeof(cmd))) {
- if (iwl_mvm_send_cmd_pdu(mvm, BT_COEX_CI, CMD_SYNC,
+ if (iwl_mvm_send_cmd_pdu(mvm, BT_COEX_CI, 0,
sizeof(cmd), &cmd))
- IWL_ERR(mvm, "Failed to send BT_CI cmd");
+ IWL_ERR(mvm, "Failed to send BT_CI cmd\n");
memcpy(&mvm->last_bt_ci_cmd, &cmd, sizeof(cmd));
}
@@ -1039,7 +1029,6 @@ static void iwl_mvm_bt_coex_notif_handle(struct iwl_mvm *mvm)
IWL_ERR(mvm, "Failed to update the ctrl_kill_msk\n");
}
-/* upon association, the fw will send in BT Coex notification */
int iwl_mvm_rx_bt_coex_notif(struct iwl_mvm *mvm,
struct iwl_rx_cmd_buffer *rxb,
struct iwl_device_cmd *dev_cmd)
@@ -1215,6 +1204,17 @@ bool iwl_mvm_bt_coex_is_mimo_allowed(struct iwl_mvm *mvm,
return iwl_get_coex_type(mvm, mvmsta->vif) == BT_COEX_TIGHT_LUT;
}
+bool iwl_mvm_bt_coex_is_tpc_allowed(struct iwl_mvm *mvm,
+ enum ieee80211_band band)
+{
+ u32 bt_activity = le32_to_cpu(mvm->last_bt_notif.bt_activity_grading);
+
+ if (band != IEEE80211_BAND_2GHZ)
+ return false;
+
+ return bt_activity >= BT_LOW_TRAFFIC;
+}
+
u8 iwl_mvm_bt_coex_tx_prio(struct iwl_mvm *mvm, struct ieee80211_hdr *hdr,
struct ieee80211_tx_info *info, u8 ac)
{
@@ -1249,9 +1249,6 @@ u8 iwl_mvm_bt_coex_tx_prio(struct iwl_mvm *mvm, struct ieee80211_hdr *hdr,
void iwl_mvm_bt_coex_vif_change(struct iwl_mvm *mvm)
{
- if (!(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_NEWBT_COEX))
- return;
-
iwl_mvm_bt_coex_notif_handle(mvm);
}
@@ -1270,7 +1267,6 @@ int iwl_mvm_rx_ant_coupling_notif(struct iwl_mvm *mvm,
.id = BT_CONFIG,
.len = { sizeof(*bt_cmd), },
.dataflags = { IWL_HCMD_DFL_NOCOPY, },
- .flags = CMD_SYNC,
};
if (!IWL_MVM_BT_COEX_CORUNNING)
diff --git a/drivers/net/wireless/iwlwifi/mvm/d3.c b/drivers/net/wireless/iwlwifi/mvm/d3.c
index e56f5a0edf85..645b3cfc29a5 100644
--- a/drivers/net/wireless/iwlwifi/mvm/d3.c
+++ b/drivers/net/wireless/iwlwifi/mvm/d3.c
@@ -193,8 +193,7 @@ static void iwl_mvm_wowlan_program_keys(struct ieee80211_hw *hw,
wkc.wep_key.key_offset = data->wep_key_idx;
}
- ret = iwl_mvm_send_cmd_pdu(mvm, WEP_KEY, CMD_SYNC,
- sizeof(wkc), &wkc);
+ ret = iwl_mvm_send_cmd_pdu(mvm, WEP_KEY, 0, sizeof(wkc), &wkc);
data->error = ret != 0;
mvm->ptk_ivlen = key->iv_len;
@@ -341,7 +340,6 @@ static int iwl_mvm_send_patterns(struct iwl_mvm *mvm,
struct iwl_host_cmd cmd = {
.id = WOWLAN_PATTERNS,
.dataflags[0] = IWL_HCMD_DFL_NOCOPY,
- .flags = CMD_SYNC,
};
int i, err;
@@ -518,7 +516,6 @@ static int iwl_mvm_send_remote_wake_cfg(struct iwl_mvm *mvm,
.id = REMOTE_WAKE_CONFIG_CMD,
.len = { sizeof(*cfg), },
.dataflags = { IWL_HCMD_DFL_NOCOPY, },
- .flags = CMD_SYNC,
};
int ret;
@@ -666,10 +663,8 @@ static int iwl_mvm_d3_reprogram(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
if (WARN_ON(!vif->bss_conf.assoc))
return -EINVAL;
- /* hack */
- vif->bss_conf.assoc = false;
+
ret = iwl_mvm_mac_ctxt_add(mvm, vif);
- vif->bss_conf.assoc = true;
if (ret)
return ret;
@@ -705,7 +700,7 @@ static int iwl_mvm_d3_reprogram(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
return ret;
rcu_assign_pointer(mvm->fw_id_to_mac_id[mvmvif->ap_sta_id], ap_sta);
- ret = iwl_mvm_mac_ctxt_changed(mvm, vif);
+ ret = iwl_mvm_mac_ctxt_changed(mvm, vif, false);
if (ret)
return ret;
@@ -719,7 +714,7 @@ static int iwl_mvm_d3_reprogram(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
for (i = 1; i < MAX_BINDINGS; i++)
quota_cmd.quotas[i].id_and_color = cpu_to_le32(FW_CTXT_INVALID);
- ret = iwl_mvm_send_cmd_pdu(mvm, TIME_QUOTA_CMD, CMD_SYNC,
+ ret = iwl_mvm_send_cmd_pdu(mvm, TIME_QUOTA_CMD, 0,
sizeof(quota_cmd), &quota_cmd);
if (ret)
IWL_ERR(mvm, "Failed to send quota: %d\n", ret);
@@ -739,15 +734,13 @@ static int iwl_mvm_get_last_nonqos_seq(struct iwl_mvm *mvm,
};
struct iwl_host_cmd cmd = {
.id = NON_QOS_TX_COUNTER_CMD,
- .flags = CMD_SYNC | CMD_WANT_SKB,
+ .flags = CMD_WANT_SKB,
};
int err;
u32 size;
- if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_D3_CONTINUITY_API) {
- cmd.data[0] = &query_cmd;
- cmd.len[0] = sizeof(query_cmd);
- }
+ cmd.data[0] = &query_cmd;
+ cmd.len[0] = sizeof(query_cmd);
err = iwl_mvm_send_cmd(mvm, &cmd);
if (err)
@@ -758,10 +751,8 @@ static int iwl_mvm_get_last_nonqos_seq(struct iwl_mvm *mvm,
err = -EINVAL;
} else {
err = le16_to_cpup((__le16 *)cmd.resp_pkt->data);
- /* new API returns next, not last-used seqno */
- if (mvm->fw->ucode_capa.flags &
- IWL_UCODE_TLV_FLAGS_D3_CONTINUITY_API)
- err = (u16) (err - 0x10);
+ /* firmware returns next, not last-used seqno */
+ err = (u16) (err - 0x10);
}
iwl_free_resp(&cmd);
@@ -785,11 +776,7 @@ void iwl_mvm_set_last_nonqos_seq(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
mvmvif->seqno_valid = false;
- if (!(mvm->fw->ucode_capa.flags &
- IWL_UCODE_TLV_FLAGS_D3_CONTINUITY_API))
- return;
-
- if (iwl_mvm_send_cmd_pdu(mvm, NON_QOS_TX_COUNTER_CMD, CMD_SYNC,
+ if (iwl_mvm_send_cmd_pdu(mvm, NON_QOS_TX_COUNTER_CMD, 0,
sizeof(query_cmd), &query_cmd))
IWL_ERR(mvm, "failed to set non-QoS seqno\n");
}
@@ -804,7 +791,7 @@ iwl_mvm_send_wowlan_config_cmd(struct iwl_mvm *mvm,
if (mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_WOWLAN_CONFIG_TID)
cmd_len = sizeof(*cmd);
- return iwl_mvm_send_cmd_pdu(mvm, WOWLAN_CONFIGURATION, CMD_SYNC,
+ return iwl_mvm_send_cmd_pdu(mvm, WOWLAN_CONFIGURATION, 0,
cmd_len, cmd);
}
@@ -833,7 +820,7 @@ static int __iwl_mvm_suspend(struct ieee80211_hw *hw,
};
struct iwl_host_cmd d3_cfg_cmd = {
.id = D3_CONFIG_CMD,
- .flags = CMD_SYNC | CMD_WANT_SKB,
+ .flags = CMD_WANT_SKB,
.data[0] = &d3_cfg_cmd_data,
.len[0] = sizeof(d3_cfg_cmd_data),
};
@@ -983,7 +970,6 @@ static int __iwl_mvm_suspend(struct ieee80211_hw *hw,
if (key_data.use_rsc_tsc) {
struct iwl_host_cmd rsc_tsc_cmd = {
.id = WOWLAN_TSC_RSC_PARAM,
- .flags = CMD_SYNC,
.data[0] = key_data.rsc_tsc,
.dataflags[0] = IWL_HCMD_DFL_NOCOPY,
.len[0] = sizeof(*key_data.rsc_tsc),
@@ -997,7 +983,7 @@ static int __iwl_mvm_suspend(struct ieee80211_hw *hw,
if (key_data.use_tkip) {
ret = iwl_mvm_send_cmd_pdu(mvm,
WOWLAN_TKIP_PARAM,
- CMD_SYNC, sizeof(tkip_cmd),
+ 0, sizeof(tkip_cmd),
&tkip_cmd);
if (ret)
goto out;
@@ -1014,8 +1000,7 @@ static int __iwl_mvm_suspend(struct ieee80211_hw *hw,
kek_kck_cmd.replay_ctr = mvmvif->rekey_data.replay_ctr;
ret = iwl_mvm_send_cmd_pdu(mvm,
- WOWLAN_KEK_KCK_MATERIAL,
- CMD_SYNC,
+ WOWLAN_KEK_KCK_MATERIAL, 0,
sizeof(kek_kck_cmd),
&kek_kck_cmd);
if (ret)
@@ -1031,7 +1016,7 @@ static int __iwl_mvm_suspend(struct ieee80211_hw *hw,
if (ret)
goto out;
- ret = iwl_mvm_send_proto_offload(mvm, vif, false, CMD_SYNC);
+ ret = iwl_mvm_send_proto_offload(mvm, vif, false, 0);
if (ret)
goto out;
@@ -1043,7 +1028,7 @@ static int __iwl_mvm_suspend(struct ieee80211_hw *hw,
if (ret)
goto out;
- ret = iwl_mvm_power_update_mac(mvm, vif);
+ ret = iwl_mvm_power_update_mac(mvm);
if (ret)
goto out;
@@ -1082,6 +1067,15 @@ static int __iwl_mvm_suspend(struct ieee80211_hw *hw,
int iwl_mvm_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
{
+ struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+
+ if (iwl_mvm_is_d0i3_supported(mvm)) {
+ mutex_lock(&mvm->d0i3_suspend_mutex);
+ __set_bit(D0I3_DEFER_WAKEUP, &mvm->d0i3_suspend_flags);
+ mutex_unlock(&mvm->d0i3_suspend_mutex);
+ return 0;
+ }
+
return __iwl_mvm_suspend(hw, wowlan, false);
}
@@ -1277,7 +1271,7 @@ static void iwl_mvm_set_tkip_rx_seq(struct tkip_sc *scs,
}
static void iwl_mvm_set_key_rx_seq(struct ieee80211_key_conf *key,
- struct iwl_wowlan_status_v6 *status)
+ struct iwl_wowlan_status *status)
{
union iwl_all_tsc_rsc *rsc = &status->gtk.rsc.all_tsc_rsc;
@@ -1294,7 +1288,7 @@ static void iwl_mvm_set_key_rx_seq(struct ieee80211_key_conf *key,
}
struct iwl_mvm_d3_gtk_iter_data {
- struct iwl_wowlan_status_v6 *status;
+ struct iwl_wowlan_status *status;
void *last_gtk;
u32 cipher;
bool find_phase, unhandled_cipher;
@@ -1370,7 +1364,7 @@ static void iwl_mvm_d3_update_gtks(struct ieee80211_hw *hw,
static bool iwl_mvm_setup_connection_keep(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
- struct iwl_wowlan_status_v6 *status)
+ struct iwl_wowlan_status *status)
{
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
struct iwl_mvm_d3_gtk_iter_data gtkdata = {
@@ -1465,10 +1459,10 @@ static bool iwl_mvm_query_wakeup_reasons(struct iwl_mvm *mvm,
} err_info;
struct iwl_host_cmd cmd = {
.id = WOWLAN_GET_STATUSES,
- .flags = CMD_SYNC | CMD_WANT_SKB,
+ .flags = CMD_WANT_SKB,
};
struct iwl_wowlan_status_data status;
- struct iwl_wowlan_status_v6 *status_v6;
+ struct iwl_wowlan_status *fw_status;
int ret, len, status_size, i;
bool keep;
struct ieee80211_sta *ap_sta;
@@ -1491,7 +1485,7 @@ static bool iwl_mvm_query_wakeup_reasons(struct iwl_mvm *mvm,
}
/* only for tracing for now */
- ret = iwl_mvm_send_cmd_pdu(mvm, OFFLOADS_QUERY_CMD, CMD_SYNC, 0, NULL);
+ ret = iwl_mvm_send_cmd_pdu(mvm, OFFLOADS_QUERY_CMD, 0, 0, NULL);
if (ret)
IWL_ERR(mvm, "failed to query offload statistics (%d)\n", ret);
@@ -1505,10 +1499,7 @@ static bool iwl_mvm_query_wakeup_reasons(struct iwl_mvm *mvm,
if (!cmd.resp_pkt)
goto out_unlock;
- if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_D3_CONTINUITY_API)
- status_size = sizeof(struct iwl_wowlan_status_v6);
- else
- status_size = sizeof(struct iwl_wowlan_status_v4);
+ status_size = sizeof(*fw_status);
len = iwl_rx_packet_payload_len(cmd.resp_pkt);
if (len < status_size) {
@@ -1516,35 +1507,18 @@ static bool iwl_mvm_query_wakeup_reasons(struct iwl_mvm *mvm,
goto out_free_resp;
}
- if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_D3_CONTINUITY_API) {
- status_v6 = (void *)cmd.resp_pkt->data;
-
- status.pattern_number = le16_to_cpu(status_v6->pattern_number);
- for (i = 0; i < 8; i++)
- status.qos_seq_ctr[i] =
- le16_to_cpu(status_v6->qos_seq_ctr[i]);
- status.wakeup_reasons = le32_to_cpu(status_v6->wakeup_reasons);
- status.wake_packet_length =
- le32_to_cpu(status_v6->wake_packet_length);
- status.wake_packet_bufsize =
- le32_to_cpu(status_v6->wake_packet_bufsize);
- status.wake_packet = status_v6->wake_packet;
- } else {
- struct iwl_wowlan_status_v4 *status_v4;
- status_v6 = NULL;
- status_v4 = (void *)cmd.resp_pkt->data;
-
- status.pattern_number = le16_to_cpu(status_v4->pattern_number);
- for (i = 0; i < 8; i++)
- status.qos_seq_ctr[i] =
- le16_to_cpu(status_v4->qos_seq_ctr[i]);
- status.wakeup_reasons = le32_to_cpu(status_v4->wakeup_reasons);
- status.wake_packet_length =
- le32_to_cpu(status_v4->wake_packet_length);
- status.wake_packet_bufsize =
- le32_to_cpu(status_v4->wake_packet_bufsize);
- status.wake_packet = status_v4->wake_packet;
- }
+ fw_status = (void *)cmd.resp_pkt->data;
+
+ status.pattern_number = le16_to_cpu(fw_status->pattern_number);
+ for (i = 0; i < 8; i++)
+ status.qos_seq_ctr[i] =
+ le16_to_cpu(fw_status->qos_seq_ctr[i]);
+ status.wakeup_reasons = le32_to_cpu(fw_status->wakeup_reasons);
+ status.wake_packet_length =
+ le32_to_cpu(fw_status->wake_packet_length);
+ status.wake_packet_bufsize =
+ le32_to_cpu(fw_status->wake_packet_bufsize);
+ status.wake_packet = fw_status->wake_packet;
if (len != status_size + ALIGN(status.wake_packet_bufsize, 4)) {
IWL_ERR(mvm, "Invalid WoWLAN status response!\n");
@@ -1571,7 +1545,7 @@ static bool iwl_mvm_query_wakeup_reasons(struct iwl_mvm *mvm,
iwl_mvm_report_wakeup_reasons(mvm, vif, &status);
- keep = iwl_mvm_setup_connection_keep(mvm, vif, status_v6);
+ keep = iwl_mvm_setup_connection_keep(mvm, vif, fw_status);
iwl_free_resp(&cmd);
return keep;
@@ -1674,6 +1648,19 @@ int iwl_mvm_resume(struct ieee80211_hw *hw)
{
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+ if (iwl_mvm_is_d0i3_supported(mvm)) {
+ bool exit_now;
+
+ mutex_lock(&mvm->d0i3_suspend_mutex);
+ __clear_bit(D0I3_DEFER_WAKEUP, &mvm->d0i3_suspend_flags);
+ exit_now = __test_and_clear_bit(D0I3_PENDING_WAKEUP,
+ &mvm->d0i3_suspend_flags);
+ mutex_unlock(&mvm->d0i3_suspend_mutex);
+ if (exit_now)
+ _iwl_mvm_exit_d0i3(mvm);
+ return 0;
+ }
+
return __iwl_mvm_resume(mvm, false);
}
diff --git a/drivers/net/wireless/iwlwifi/mvm/debugfs-vif.c b/drivers/net/wireless/iwlwifi/mvm/debugfs-vif.c
index 9b59e1d7ae71..2e90ff795c13 100644
--- a/drivers/net/wireless/iwlwifi/mvm/debugfs-vif.c
+++ b/drivers/net/wireless/iwlwifi/mvm/debugfs-vif.c
@@ -103,10 +103,6 @@ static void iwl_dbgfs_update_pm(struct iwl_mvm *mvm,
IWL_DEBUG_POWER(mvm, "tx_data_timeout=%d\n", val);
dbgfs_pm->tx_data_timeout = val;
break;
- case MVM_DEBUGFS_PM_DISABLE_POWER_OFF:
- IWL_DEBUG_POWER(mvm, "disable_power_off=%d\n", val);
- dbgfs_pm->disable_power_off = val;
- break;
case MVM_DEBUGFS_PM_LPRX_ENA:
IWL_DEBUG_POWER(mvm, "lprx %s\n", val ? "enabled" : "disabled");
dbgfs_pm->lprx_ena = val;
@@ -154,12 +150,6 @@ static ssize_t iwl_dbgfs_pm_params_write(struct ieee80211_vif *vif, char *buf,
if (sscanf(buf + 16, "%d", &val) != 1)
return -EINVAL;
param = MVM_DEBUGFS_PM_TX_DATA_TIMEOUT;
- } else if (!strncmp("disable_power_off=", buf, 18) &&
- !(mvm->fw->ucode_capa.flags &
- IWL_UCODE_TLV_FLAGS_DEVICE_PS_CMD)) {
- if (sscanf(buf + 18, "%d", &val) != 1)
- return -EINVAL;
- param = MVM_DEBUGFS_PM_DISABLE_POWER_OFF;
} else if (!strncmp("lprx=", buf, 5)) {
if (sscanf(buf + 5, "%d", &val) != 1)
return -EINVAL;
@@ -185,7 +175,7 @@ static ssize_t iwl_dbgfs_pm_params_write(struct ieee80211_vif *vif, char *buf,
mutex_lock(&mvm->mutex);
iwl_dbgfs_update_pm(mvm, vif, param, val);
- ret = iwl_mvm_power_update_mac(mvm, vif);
+ ret = iwl_mvm_power_update_mac(mvm);
mutex_unlock(&mvm->mutex);
return ret ?: count;
@@ -272,10 +262,9 @@ static ssize_t iwl_dbgfs_mac_params_read(struct file *file,
struct iwl_mvm_sta *mvm_sta = (void *)sta->drv_priv;
pos += scnprintf(buf+pos, bufsz-pos,
- "ap_sta_id %d - reduced Tx power %d force %d\n",
+ "ap_sta_id %d - reduced Tx power %d\n",
ap_sta_id,
- mvm_sta->bt_reduced_txpower,
- mvm_sta->bt_reduced_txpower_dbg);
+ mvm_sta->bt_reduced_txpower);
}
}
@@ -293,41 +282,6 @@ static ssize_t iwl_dbgfs_mac_params_read(struct file *file,
return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
}
-static ssize_t iwl_dbgfs_reduced_txp_write(struct ieee80211_vif *vif,
- char *buf, size_t count,
- loff_t *ppos)
-{
- struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
- struct iwl_mvm *mvm = mvmvif->mvm;
- struct iwl_mvm_sta *mvmsta;
- bool reduced_tx_power;
- int ret;
-
- if (mvmvif->ap_sta_id >= ARRAY_SIZE(mvm->fw_id_to_mac_id))
- return -ENOTCONN;
-
- if (strtobool(buf, &reduced_tx_power) != 0)
- return -EINVAL;
-
- mutex_lock(&mvm->mutex);
-
- mvmsta = iwl_mvm_sta_from_staid_protected(mvm, mvmvif->ap_sta_id);
- if (IS_ERR_OR_NULL(mvmsta)) {
- mutex_unlock(&mvm->mutex);
- return -ENOTCONN;
- }
-
- mvmsta->bt_reduced_txpower_dbg = false;
- ret = iwl_mvm_bt_coex_reduced_txp(mvm, mvmvif->ap_sta_id,
- reduced_tx_power);
- if (!ret)
- mvmsta->bt_reduced_txpower_dbg = true;
-
- mutex_unlock(&mvm->mutex);
-
- return ret ? : count;
-}
-
static void iwl_dbgfs_update_bf(struct ieee80211_vif *vif,
enum iwl_dbgfs_bf_mask param, int value)
{
@@ -462,9 +416,9 @@ static ssize_t iwl_dbgfs_bf_params_write(struct ieee80211_vif *vif, char *buf,
mutex_lock(&mvm->mutex);
iwl_dbgfs_update_bf(vif, param, value);
if (param == MVM_DEBUGFS_BF_ENABLE_BEACON_FILTER && !value)
- ret = iwl_mvm_disable_beacon_filter(mvm, vif, CMD_SYNC);
+ ret = iwl_mvm_disable_beacon_filter(mvm, vif, 0);
else
- ret = iwl_mvm_enable_beacon_filter(mvm, vif, CMD_SYNC);
+ ret = iwl_mvm_enable_beacon_filter(mvm, vif, 0);
mutex_unlock(&mvm->mutex);
return ret ?: count;
@@ -568,7 +522,6 @@ MVM_DEBUGFS_READ_FILE_OPS(mac_params);
MVM_DEBUGFS_READ_WRITE_FILE_OPS(pm_params, 32);
MVM_DEBUGFS_READ_WRITE_FILE_OPS(bf_params, 256);
MVM_DEBUGFS_READ_WRITE_FILE_OPS(low_latency, 10);
-MVM_DEBUGFS_WRITE_FILE_OPS(reduced_txp, 10);
void iwl_mvm_vif_dbgfs_register(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
{
@@ -592,8 +545,7 @@ void iwl_mvm_vif_dbgfs_register(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
return;
}
- if ((mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_PM_CMD_SUPPORT) &&
- iwlmvm_mod_params.power_scheme != IWL_POWER_SCHEME_CAM &&
+ if (iwlmvm_mod_params.power_scheme != IWL_POWER_SCHEME_CAM &&
((vif->type == NL80211_IFTYPE_STATION && !vif->p2p) ||
(vif->type == NL80211_IFTYPE_STATION && vif->p2p &&
mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_BSS_P2P_PS_DCM)))
@@ -601,7 +553,6 @@ void iwl_mvm_vif_dbgfs_register(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
S_IRUSR);
MVM_DEBUGFS_ADD_FILE_VIF(mac_params, mvmvif->dbgfs_dir, S_IRUSR);
- MVM_DEBUGFS_ADD_FILE_VIF(reduced_txp, mvmvif->dbgfs_dir, S_IWUSR);
MVM_DEBUGFS_ADD_FILE_VIF(low_latency, mvmvif->dbgfs_dir,
S_IRUSR | S_IWUSR);
diff --git a/drivers/net/wireless/iwlwifi/mvm/debugfs.c b/drivers/net/wireless/iwlwifi/mvm/debugfs.c
index 1b52deea6081..29ca72695eaa 100644
--- a/drivers/net/wireless/iwlwifi/mvm/debugfs.c
+++ b/drivers/net/wireless/iwlwifi/mvm/debugfs.c
@@ -65,9 +65,8 @@
#include "mvm.h"
#include "sta.h"
#include "iwl-io.h"
-#include "iwl-prph.h"
#include "debugfs.h"
-#include "fw-error-dump.h"
+#include "iwl-fw-error-dump.h"
static ssize_t iwl_dbgfs_tx_flush_write(struct iwl_mvm *mvm, char *buf,
size_t count, loff_t *ppos)
@@ -136,9 +135,6 @@ static int iwl_dbgfs_fw_error_dump_open(struct inode *inode, struct file *file)
file->private_data = mvm->fw_error_dump;
mvm->fw_error_dump = NULL;
- kfree(mvm->fw_error_sram);
- mvm->fw_error_sram = NULL;
- mvm->fw_error_sram_len = 0;
ret = 0;
out:
@@ -684,7 +680,7 @@ static ssize_t iwl_dbgfs_fw_restart_write(struct iwl_mvm *mvm, char *buf,
mvm->restart_fw++;
/* take the return value to make compiler happy - it will fail anyway */
- ret = iwl_mvm_send_cmd_pdu(mvm, REPLY_ERROR, CMD_SYNC, 0, NULL);
+ ret = iwl_mvm_send_cmd_pdu(mvm, REPLY_ERROR, 0, 0, NULL);
mutex_unlock(&mvm->mutex);
@@ -694,7 +690,7 @@ static ssize_t iwl_dbgfs_fw_restart_write(struct iwl_mvm *mvm, char *buf,
static ssize_t iwl_dbgfs_fw_nmi_write(struct iwl_mvm *mvm, char *buf,
size_t count, loff_t *ppos)
{
- iwl_write_prph(mvm->trans, DEVICE_SET_NMI_REG, 1);
+ iwl_force_nmi(mvm->trans);
return count;
}
@@ -841,7 +837,7 @@ static ssize_t iwl_dbgfs_bcast_filters_write(struct iwl_mvm *mvm, char *buf,
/* send updated bcast filtering configuration */
if (mvm->dbgfs_bcast_filtering.override &&
iwl_mvm_bcast_filter_build_cmd(mvm, &cmd))
- err = iwl_mvm_send_cmd_pdu(mvm, BCAST_FILTER_CMD, CMD_SYNC,
+ err = iwl_mvm_send_cmd_pdu(mvm, BCAST_FILTER_CMD, 0,
sizeof(cmd), &cmd);
mutex_unlock(&mvm->mutex);
@@ -913,7 +909,7 @@ static ssize_t iwl_dbgfs_bcast_filters_macs_write(struct iwl_mvm *mvm,
/* send updated bcast filtering configuration */
if (mvm->dbgfs_bcast_filtering.override &&
iwl_mvm_bcast_filter_build_cmd(mvm, &cmd))
- err = iwl_mvm_send_cmd_pdu(mvm, BCAST_FILTER_CMD, CMD_SYNC,
+ err = iwl_mvm_send_cmd_pdu(mvm, BCAST_FILTER_CMD, 0,
sizeof(cmd), &cmd);
mutex_unlock(&mvm->mutex);
@@ -1004,6 +1000,7 @@ static ssize_t iwl_dbgfs_d0i3_refs_read(struct file *file,
PRINT_MVM_REF(IWL_MVM_REF_P2P_CLIENT);
PRINT_MVM_REF(IWL_MVM_REF_AP_IBSS);
PRINT_MVM_REF(IWL_MVM_REF_USER);
+ PRINT_MVM_REF(IWL_MVM_REF_EXIT_WORK);
return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
}
@@ -1108,9 +1105,9 @@ MVM_DEBUGFS_READ_WRITE_FILE_OPS(scan_ant_rxchain, 8);
MVM_DEBUGFS_READ_WRITE_FILE_OPS(d0i3_refs, 8);
static const struct file_operations iwl_dbgfs_fw_error_dump_ops = {
- .open = iwl_dbgfs_fw_error_dump_open,
- .read = iwl_dbgfs_fw_error_dump_read,
- .release = iwl_dbgfs_fw_error_dump_release,
+ .open = iwl_dbgfs_fw_error_dump_open,
+ .read = iwl_dbgfs_fw_error_dump_read,
+ .release = iwl_dbgfs_fw_error_dump_release,
};
#ifdef CONFIG_IWLWIFI_BCAST_FILTERING
@@ -1138,9 +1135,8 @@ int iwl_mvm_dbgfs_register(struct iwl_mvm *mvm, struct dentry *dbgfs_dir)
MVM_DEBUGFS_ADD_FILE(fw_error_dump, dbgfs_dir, S_IRUSR);
MVM_DEBUGFS_ADD_FILE(bt_notif, dbgfs_dir, S_IRUSR);
MVM_DEBUGFS_ADD_FILE(bt_cmd, dbgfs_dir, S_IRUSR);
- if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_DEVICE_PS_CMD)
- MVM_DEBUGFS_ADD_FILE(disable_power_off, mvm->debugfs_dir,
- S_IRUSR | S_IWUSR);
+ MVM_DEBUGFS_ADD_FILE(disable_power_off, mvm->debugfs_dir,
+ S_IRUSR | S_IWUSR);
MVM_DEBUGFS_ADD_FILE(fw_rx_stats, mvm->debugfs_dir, S_IRUSR);
MVM_DEBUGFS_ADD_FILE(drv_rx_stats, mvm->debugfs_dir, S_IRUSR);
MVM_DEBUGFS_ADD_FILE(fw_restart, mvm->debugfs_dir, S_IWUSR);
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-coex.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-coex.h
index 21877e5966a8..5fe82c29c8ad 100644
--- a/drivers/net/wireless/iwlwifi/mvm/fw-api-coex.h
+++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-coex.h
@@ -141,7 +141,8 @@ enum iwl_bt_coex_lut_type {
BT_COEX_TX_DIS_LUT,
BT_COEX_MAX_LUT,
-};
+ BT_COEX_INVALID_LUT = 0xff,
+}; /* BT_COEX_DECISION_LUT_INDEX_API_E_VER_1 */
#define BT_COEX_LUT_SIZE (12)
#define BT_COEX_CORUN_LUT_SIZE (32)
@@ -154,19 +155,23 @@ enum iwl_bt_coex_lut_type {
* @flags:&enum iwl_bt_coex_flags
* @max_kill:
* @bt_reduced_tx_power: enum %iwl_bt_reduced_tx_power
- * @bt4_antenna_isolation:
- * @bt4_antenna_isolation_thr:
- * @bt4_tx_tx_delta_freq_thr:
- * @bt4_tx_rx_max_freq0:
- * @bt_prio_boost:
+ * @override_primary_lut: enum %iwl_bt_coex_lut_type: BT_COEX_INVALID_LUT
+ * should be set by default
+ * @override_secondary_lut: enum %iwl_bt_coex_lut_type: BT_COEX_INVALID_LUT
+ * should be set by default
+ * @bt4_antenna_isolation: antenna isolation
+ * @bt4_antenna_isolation_thr: antenna threshold value
+ * @bt4_tx_tx_delta_freq_thr: TxTx delta frequency
+ * @bt4_tx_rx_max_freq0: TxRx max frequency
+ * @bt_prio_boost: BT priority boost registers
* @wifi_tx_prio_boost: SW boost of wifi tx priority
* @wifi_rx_prio_boost: SW boost of wifi rx priority
- * @kill_ack_msk:
- * @kill_cts_msk:
- * @decision_lut:
- * @bt4_multiprio_lut:
- * @bt4_corun_lut20:
- * @bt4_corun_lut40:
+ * @kill_ack_msk: kill ACK mask. 1 - Tx ACK, 0 - kill Tx of ACK.
+ * @kill_cts_msk: kill CTS mask. 1 - Tx CTS, 0 - kill Tx of CTS.
+ * @decision_lut: PTA decision LUT, per Prio-Ch
+ * @bt4_multiprio_lut: multi priority LUT configuration
+ * @bt4_corun_lut20: co-running 20 MHz LUT configuration
+ * @bt4_corun_lut40: co-running 40 MHz LUT configuration
* @valid_bit_msk: enum %iwl_bt_coex_valid_bit_msk
*
* The structure is used for the BT_COEX command.
@@ -175,7 +180,8 @@ struct iwl_bt_coex_cmd {
__le32 flags;
u8 max_kill;
u8 bt_reduced_tx_power;
- u8 reserved[2];
+ u8 override_primary_lut;
+ u8 override_secondary_lut;
u8 bt4_antenna_isolation;
u8 bt4_antenna_isolation_thr;
@@ -194,7 +200,7 @@ struct iwl_bt_coex_cmd {
__le32 bt4_corun_lut40[BT_COEX_CORUN_LUT_SIZE];
__le32 valid_bit_msk;
-} __packed; /* BT_COEX_CMD_API_S_VER_3 */
+} __packed; /* BT_COEX_CMD_API_S_VER_5 */
/**
* struct iwl_bt_coex_ci_cmd - bt coex channel inhibition command
@@ -282,7 +288,7 @@ enum iwl_bt_activity_grading {
BT_ON_NO_CONNECTION = 1,
BT_LOW_TRAFFIC = 2,
BT_HIGH_TRAFFIC = 3,
-};
+}; /* BT_COEX_BT_ACTIVITY_GRADING_API_E_VER_1 */
/**
* struct iwl_bt_coex_profile_notif - notification about BT coex
@@ -310,7 +316,7 @@ struct iwl_bt_coex_profile_notif {
__le32 primary_ch_lut;
__le32 secondary_ch_lut;
__le32 bt_activity_grading;
-} __packed; /* BT_COEX_PROFILE_NTFY_API_S_VER_2 */
+} __packed; /* BT_COEX_PROFILE_NTFY_API_S_VER_3 */
enum iwl_bt_coex_prio_table_event {
BT_COEX_PRIO_TBL_EVT_INIT_CALIB1 = 0,
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-d3.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-d3.h
index 10fcc1a79ebd..13696fe419b7 100644
--- a/drivers/net/wireless/iwlwifi/mvm/fw-api-d3.h
+++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-d3.h
@@ -345,21 +345,6 @@ enum iwl_wowlan_wakeup_reason {
IWL_WOWLAN_WAKEUP_BY_REM_WAKE_WAKEUP_PACKET = BIT(12),
}; /* WOWLAN_WAKE_UP_REASON_API_E_VER_2 */
-struct iwl_wowlan_status_v4 {
- __le64 replay_ctr;
- __le16 pattern_number;
- __le16 non_qos_seq_ctr;
- __le16 qos_seq_ctr[8];
- __le32 wakeup_reasons;
- __le32 rekey_status;
- __le32 num_of_gtk_rekeys;
- __le32 transmitted_ndps;
- __le32 received_beacons;
- __le32 wake_packet_length;
- __le32 wake_packet_bufsize;
- u8 wake_packet[]; /* can be truncated from _length to _bufsize */
-} __packed; /* WOWLAN_STATUSES_API_S_VER_4 */
-
struct iwl_wowlan_gtk_status {
u8 key_index;
u8 reserved[3];
@@ -368,7 +353,7 @@ struct iwl_wowlan_gtk_status {
struct iwl_wowlan_rsc_tsc_params_cmd rsc;
} __packed;
-struct iwl_wowlan_status_v6 {
+struct iwl_wowlan_status {
struct iwl_wowlan_gtk_status gtk;
__le64 replay_ctr;
__le16 pattern_number;
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-rs.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-rs.h
index 39148b5bb332..8bb5b94bf963 100644
--- a/drivers/net/wireless/iwlwifi/mvm/fw-api-rs.h
+++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-rs.h
@@ -334,7 +334,7 @@ enum {
*/
struct iwl_lq_cmd {
u8 sta_id;
- u8 reserved1;
+ u8 reduced_tpc;
u16 control;
/* LINK_QUAL_GENERAL_PARAMS_API_S_VER_1 */
u8 flags;
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h
index d73a89ecd78a..6959fda3fe09 100644
--- a/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h
+++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h
@@ -169,8 +169,12 @@ enum iwl_scan_type {
SCAN_TYPE_DISCOVERY_FORCED = 6,
}; /* SCAN_ACTIVITY_TYPE_E_VER_1 */
-/* Maximal number of channels to scan */
-#define MAX_NUM_SCAN_CHANNELS 0x24
+/**
+ * Maximal number of channels to scan
+ * it should be equal to:
+ * max(IWL_NUM_CHANNELS, IWL_NUM_CHANNELS_FAMILY_8000)
+ */
+#define MAX_NUM_SCAN_CHANNELS 50
/**
* struct iwl_scan_cmd - scan request command
@@ -534,13 +538,16 @@ struct iwl_scan_offload_schedule {
*
* IWL_SCAN_OFFLOAD_FLAG_PASS_ALL: pass all results - no filtering.
* IWL_SCAN_OFFLOAD_FLAG_CACHED_CHANNEL: add cached channels to partial scan.
- * IWL_SCAN_OFFLOAD_FLAG_ENERGY_SCAN: use energy based scan before partial scan
- * on A band.
+ * IWL_SCAN_OFFLOAD_FLAG_EBS_QUICK_MODE: EBS duration is 100mSec - typical
+ * beacon period. Finding channel activity in this mode is not guaranteed.
+ * IWL_SCAN_OFFLOAD_FLAG_EBS_ACCURATE_MODE: EBS duration is 200mSec.
+ * Assuming beacon period is 100ms finding channel activity is guaranteed.
*/
enum iwl_scan_offload_flags {
IWL_SCAN_OFFLOAD_FLAG_PASS_ALL = BIT(0),
IWL_SCAN_OFFLOAD_FLAG_CACHED_CHANNEL = BIT(2),
- IWL_SCAN_OFFLOAD_FLAG_ENERGY_SCAN = BIT(3),
+ IWL_SCAN_OFFLOAD_FLAG_EBS_QUICK_MODE = BIT(5),
+ IWL_SCAN_OFFLOAD_FLAG_EBS_ACCURATE_MODE = BIT(6),
};
/**
@@ -563,17 +570,24 @@ enum iwl_scan_offload_compleate_status {
IWL_SCAN_OFFLOAD_ABORTED = 2,
};
+enum iwl_scan_ebs_status {
+ IWL_SCAN_EBS_SUCCESS,
+ IWL_SCAN_EBS_FAILED,
+ IWL_SCAN_EBS_CHAN_NOT_FOUND,
+};
+
/**
* iwl_scan_offload_complete - SCAN_OFFLOAD_COMPLETE_NTF_API_S_VER_1
* @last_schedule_line: last schedule line executed (fast or regular)
* @last_schedule_iteration: last scan iteration executed before scan abort
* @status: enum iwl_scan_offload_compleate_status
+ * @ebs_status: last EBS status, see IWL_SCAN_EBS_*
*/
struct iwl_scan_offload_complete {
u8 last_schedule_line;
u8 last_schedule_iteration;
u8 status;
- u8 reserved;
+ u8 ebs_status;
} __packed;
/**
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-sta.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-sta.h
index d63647867262..39cebee8016f 100644
--- a/drivers/net/wireless/iwlwifi/mvm/fw-api-sta.h
+++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-sta.h
@@ -255,22 +255,19 @@ struct iwl_mvm_keyinfo {
} __packed;
/**
- * struct iwl_mvm_add_sta_cmd_v5 - Add/modify a station in the fw's sta table.
+ * struct iwl_mvm_add_sta_cmd - Add/modify a station in the fw's sta table.
* ( REPLY_ADD_STA = 0x18 )
* @add_modify: 1: modify existing, 0: add new station
- * @unicast_tx_key_id: unicast tx key id. Relevant only when unicast key sent
- * @multicast_tx_key_id: multicast tx key id. Relevant only when multicast key
- * sent
+ * @awake_acs:
+ * @tid_disable_tx: is tid BIT(tid) enabled for Tx. Clear BIT(x) to enable
+ * AMPDU for tid x. Set %STA_MODIFY_TID_DISABLE_TX to change this field.
* @mac_id_n_color: the Mac context this station belongs to
* @addr[ETH_ALEN]: station's MAC address
* @sta_id: index of station in uCode's station table
* @modify_mask: STA_MODIFY_*, selects which parameters to modify vs. leave
* alone. 1 - modify, 0 - don't change.
- * @key: look at %iwl_mvm_keyinfo
* @station_flags: look at %iwl_sta_flags
* @station_flags_msk: what of %station_flags have changed
- * @tid_disable_tx: is tid BIT(tid) enabled for Tx. Clear BIT(x) to enable
- * AMPDU for tid x. Set %STA_MODIFY_TID_DISABLE_TX to change this field.
* @add_immediate_ba_tid: tid for which to add block-ack support (Rx)
* Set %STA_MODIFY_ADD_BA_TID to use this field, and also set
* add_immediate_ba_ssn.
@@ -294,40 +291,7 @@ struct iwl_mvm_keyinfo {
* ADD_STA sets up the table entry for one station, either creating a new
* entry, or modifying a pre-existing one.
*/
-struct iwl_mvm_add_sta_cmd_v5 {
- u8 add_modify;
- u8 unicast_tx_key_id;
- u8 multicast_tx_key_id;
- u8 reserved1;
- __le32 mac_id_n_color;
- u8 addr[ETH_ALEN];
- __le16 reserved2;
- u8 sta_id;
- u8 modify_mask;
- __le16 reserved3;
- struct iwl_mvm_keyinfo key;
- __le32 station_flags;
- __le32 station_flags_msk;
- __le16 tid_disable_tx;
- __le16 reserved4;
- u8 add_immediate_ba_tid;
- u8 remove_immediate_ba_tid;
- __le16 add_immediate_ba_ssn;
- __le16 sleep_tx_count;
- __le16 sleep_state_flags;
- __le16 assoc_id;
- __le16 beamform_flags;
- __le32 tfd_queue_msk;
-} __packed; /* ADD_STA_CMD_API_S_VER_5 */
-
-/**
- * struct iwl_mvm_add_sta_cmd_v7 - Add / modify a station
- * VER_7 of this command is quite similar to VER_5 except
- * exclusion of all fields related to the security key installation.
- * It only differs from VER_6 by the "awake_acs" field that is
- * reserved and ignored in VER_6.
- */
-struct iwl_mvm_add_sta_cmd_v7 {
+struct iwl_mvm_add_sta_cmd {
u8 add_modify;
u8 awake_acs;
__le16 tid_disable_tx;
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-tx.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-tx.h
index 8e122f3a7a74..6cc5f52b807f 100644
--- a/drivers/net/wireless/iwlwifi/mvm/fw-api-tx.h
+++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-tx.h
@@ -482,7 +482,8 @@ struct iwl_mvm_tx_resp {
u8 pa_integ_res_b[3];
u8 pa_integ_res_c[3];
__le16 measurement_req_id;
- __le16 reserved;
+ u8 reduced_tpc;
+ u8 reserved;
__le32 tfd_info;
__le16 seq_ctl;
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api.h b/drivers/net/wireless/iwlwifi/mvm/fw-api.h
index 6e75b52588de..309a9b9a94fe 100644
--- a/drivers/net/wireless/iwlwifi/mvm/fw-api.h
+++ b/drivers/net/wireless/iwlwifi/mvm/fw-api.h
@@ -71,6 +71,7 @@
#include "fw-api-power.h"
#include "fw-api-d3.h"
#include "fw-api-coex.h"
+#include "fw-api-scan.h"
/* maximal number of Tx queues in any platform */
#define IWL_MVM_MAX_QUEUES 20
@@ -604,52 +605,7 @@ enum {
TE_V1_NOTIF_INTERNAL_FRAG_END = BIT(7),
}; /* MAC_EVENT_ACTION_API_E_VER_2 */
-
-/**
- * struct iwl_time_event_cmd_api_v1 - configuring Time Events
- * with struct MAC_TIME_EVENT_DATA_API_S_VER_1 (see also
- * with version 2. determined by IWL_UCODE_TLV_FLAGS)
- * ( TIME_EVENT_CMD = 0x29 )
- * @id_and_color: ID and color of the relevant MAC
- * @action: action to perform, one of FW_CTXT_ACTION_*
- * @id: this field has two meanings, depending on the action:
- * If the action is ADD, then it means the type of event to add.
- * For all other actions it is the unique event ID assigned when the
- * event was added by the FW.
- * @apply_time: When to start the Time Event (in GP2)
- * @max_delay: maximum delay to event's start (apply time), in TU
- * @depends_on: the unique ID of the event we depend on (if any)
- * @interval: interval between repetitions, in TU
- * @interval_reciprocal: 2^32 / interval
- * @duration: duration of event in TU
- * @repeat: how many repetitions to do, can be TE_REPEAT_ENDLESS
- * @dep_policy: one of TE_V1_INDEPENDENT, TE_V1_DEP_OTHER, TE_V1_DEP_TSF
- * and TE_V1_EVENT_SOCIOPATHIC
- * @is_present: 0 or 1, are we present or absent during the Time Event
- * @max_frags: maximal number of fragments the Time Event can be divided to
- * @notify: notifications using TE_V1_NOTIF_* (whom to notify when)
- */
-struct iwl_time_event_cmd_v1 {
- /* COMMON_INDEX_HDR_API_S_VER_1 */
- __le32 id_and_color;
- __le32 action;
- __le32 id;
- /* MAC_TIME_EVENT_DATA_API_S_VER_1 */
- __le32 apply_time;
- __le32 max_delay;
- __le32 dep_policy;
- __le32 depends_on;
- __le32 is_present;
- __le32 max_frags;
- __le32 interval;
- __le32 interval_reciprocal;
- __le32 duration;
- __le32 repeat;
- __le32 notify;
-} __packed; /* MAC_TIME_EVENT_CMD_API_S_VER_1 */
-
-
-/* Time event - defines for command API v2 */
+/* Time event - defines for command API */
/*
* @TE_V2_FRAG_NONE: fragmentation of the time event is NOT allowed.
@@ -680,7 +636,7 @@ enum {
#define TE_V2_PLACEMENT_POS 12
#define TE_V2_ABSENCE_POS 15
-/* Time event policy values (for time event cmd api v2)
+/* Time event policy values
* A notification (both event and fragment) includes a status indicating weather
* the FW was able to schedule the event or not. For fragment start/end
* notification the status is always success. There is no start/end fragment
@@ -727,7 +683,7 @@ enum {
};
/**
- * struct iwl_time_event_cmd_api_v2 - configuring Time Events
+ * struct iwl_time_event_cmd_api - configuring Time Events
* with struct MAC_TIME_EVENT_DATA_API_S_VER_2 (see also
* with version 1. determined by IWL_UCODE_TLV_FLAGS)
* ( TIME_EVENT_CMD = 0x29 )
@@ -750,7 +706,7 @@ enum {
* TE_EVENT_SOCIOPATHIC
* using TE_ABSENCE and using TE_NOTIF_*
*/
-struct iwl_time_event_cmd_v2 {
+struct iwl_time_event_cmd {
/* COMMON_INDEX_HDR_API_S_VER_1 */
__le32 id_and_color;
__le32 action;
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw.c b/drivers/net/wireless/iwlwifi/mvm/fw.c
index 7ce20062f32d..883e702152d5 100644
--- a/drivers/net/wireless/iwlwifi/mvm/fw.c
+++ b/drivers/net/wireless/iwlwifi/mvm/fw.c
@@ -99,7 +99,7 @@ static int iwl_send_tx_ant_cfg(struct iwl_mvm *mvm, u8 valid_tx_ant)
};
IWL_DEBUG_FW(mvm, "select valid tx ant: %u\n", valid_tx_ant);
- return iwl_mvm_send_cmd_pdu(mvm, TX_ANT_CONFIGURATION_CMD, CMD_SYNC,
+ return iwl_mvm_send_cmd_pdu(mvm, TX_ANT_CONFIGURATION_CMD, 0,
sizeof(tx_ant_cmd), &tx_ant_cmd);
}
@@ -137,6 +137,8 @@ static bool iwl_alive_fn(struct iwl_notif_wait_data *notif_wait,
alive_data->scd_base_addr = le32_to_cpu(palive2->scd_base_ptr);
mvm->umac_error_event_table =
le32_to_cpu(palive2->error_info_addr);
+ mvm->sf_space.addr = le32_to_cpu(palive2->st_fwrd_addr);
+ mvm->sf_space.size = le32_to_cpu(palive2->st_fwrd_size);
alive_data->valid = le16_to_cpu(palive2->status) ==
IWL_ALIVE_STATUS_OK;
@@ -180,6 +182,7 @@ static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm,
int ret, i;
enum iwl_ucode_type old_type = mvm->cur_ucode;
static const u8 alive_cmd[] = { MVM_ALIVE };
+ struct iwl_sf_region st_fwrd_space;
fw = iwl_get_ucode_image(mvm, ucode_type);
if (WARN_ON(!fw))
@@ -215,6 +218,14 @@ static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm,
return -EIO;
}
+ /*
+ * update the sdio allocation according to the pointer we get in the
+ * alive notification.
+ */
+ st_fwrd_space.addr = mvm->sf_space.addr;
+ st_fwrd_space.size = mvm->sf_space.size;
+ ret = iwl_trans_update_sf(mvm->trans, &st_fwrd_space);
+
iwl_trans_fw_alive(mvm->trans, alive_data.scd_base_addr);
/*
@@ -256,7 +267,7 @@ static int iwl_send_phy_cfg_cmd(struct iwl_mvm *mvm)
IWL_DEBUG_INFO(mvm, "Sending Phy CFG command: 0x%x\n",
phy_cfg_cmd.phy_cfg);
- return iwl_mvm_send_cmd_pdu(mvm, PHY_CONFIGURATION_CMD, CMD_SYNC,
+ return iwl_mvm_send_cmd_pdu(mvm, PHY_CONFIGURATION_CMD, 0,
sizeof(phy_cfg_cmd), &phy_cfg_cmd);
}
@@ -288,14 +299,14 @@ int iwl_run_init_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm)
goto error;
}
- ret = iwl_send_bt_prio_tbl(mvm);
+ ret = iwl_send_bt_init_conf(mvm);
if (ret)
goto error;
/* Read the NVM only at driver load time, no need to do this twice */
if (read_nvm) {
/* Read nvm */
- ret = iwl_nvm_init(mvm);
+ ret = iwl_nvm_init(mvm, true);
if (ret) {
IWL_ERR(mvm, "Failed to read NVM: %d\n", ret);
goto error;
@@ -303,7 +314,7 @@ int iwl_run_init_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm)
}
/* In case we read the NVM from external file, load it to the NIC */
- if (iwlwifi_mod_params.nvm_file)
+ if (mvm->nvm_file_name)
iwl_mvm_load_nvm_to_nic(mvm);
ret = iwl_nvm_check_version(mvm->nvm_data, mvm->trans);
@@ -424,10 +435,6 @@ int iwl_mvm_up(struct iwl_mvm *mvm)
if (ret)
goto error;
- ret = iwl_send_bt_prio_tbl(mvm);
- if (ret)
- goto error;
-
ret = iwl_send_bt_init_conf(mvm);
if (ret)
goto error;
@@ -468,12 +475,6 @@ int iwl_mvm_up(struct iwl_mvm *mvm)
/* Initialize tx backoffs to the minimal possible */
iwl_mvm_tt_tx_backoff(mvm, 0);
- if (!(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_PM_CMD_SUPPORT)) {
- ret = iwl_power_legacy_set_cam_mode(mvm);
- if (ret)
- goto error;
- }
-
ret = iwl_mvm_power_update_device(mvm);
if (ret)
goto error;
diff --git a/drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c b/drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c
index 9ccec10bba16..8b5302777632 100644
--- a/drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c
+++ b/drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c
@@ -667,12 +667,9 @@ static void iwl_mvm_mac_ctxt_cmd_common(struct iwl_mvm *mvm,
if (vif->bss_conf.qos)
cmd->qos_flags |= cpu_to_le32(MAC_QOS_FLG_UPDATE_EDCA);
- /* Don't use cts to self as the fw doesn't support it currently. */
if (vif->bss_conf.use_cts_prot) {
cmd->protection_flags |= cpu_to_le32(MAC_PROT_FLG_TGG_PROTECT);
- if (IWL_UCODE_API(mvm->fw->ucode_ver) >= 8)
- cmd->protection_flags |=
- cpu_to_le32(MAC_PROT_FLG_SELF_CTS_EN);
+ cmd->protection_flags |= cpu_to_le32(MAC_PROT_FLG_SELF_CTS_EN);
}
IWL_DEBUG_RATE(mvm, "use_cts_prot %d, ht_operation_mode %d\n",
vif->bss_conf.use_cts_prot,
@@ -688,7 +685,7 @@ static void iwl_mvm_mac_ctxt_cmd_common(struct iwl_mvm *mvm,
static int iwl_mvm_mac_ctxt_send_cmd(struct iwl_mvm *mvm,
struct iwl_mac_ctx_cmd *cmd)
{
- int ret = iwl_mvm_send_cmd_pdu(mvm, MAC_CONTEXT_CMD, CMD_SYNC,
+ int ret = iwl_mvm_send_cmd_pdu(mvm, MAC_CONTEXT_CMD, 0,
sizeof(*cmd), cmd);
if (ret)
IWL_ERR(mvm, "Failed to send MAC context (action:%d): %d\n",
@@ -696,19 +693,39 @@ static int iwl_mvm_mac_ctxt_send_cmd(struct iwl_mvm *mvm,
return ret;
}
-/*
- * Fill the specific data for mac context of type station or p2p client
- */
-static void iwl_mvm_mac_ctxt_cmd_fill_sta(struct iwl_mvm *mvm,
- struct ieee80211_vif *vif,
- struct iwl_mac_data_sta *ctxt_sta,
- bool force_assoc_off)
+static int iwl_mvm_mac_ctxt_cmd_sta(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ u32 action, bool force_assoc_off)
{
+ struct iwl_mac_ctx_cmd cmd = {};
+ struct iwl_mac_data_sta *ctxt_sta;
+
+ WARN_ON(vif->type != NL80211_IFTYPE_STATION);
+
+ /* Fill the common data for all mac context types */
+ iwl_mvm_mac_ctxt_cmd_common(mvm, vif, &cmd, action);
+
+ if (vif->p2p) {
+ struct ieee80211_p2p_noa_attr *noa =
+ &vif->bss_conf.p2p_noa_attr;
+
+ cmd.p2p_sta.ctwin = cpu_to_le32(noa->oppps_ctwindow &
+ IEEE80211_P2P_OPPPS_CTWINDOW_MASK);
+ ctxt_sta = &cmd.p2p_sta.sta;
+ } else {
+ ctxt_sta = &cmd.sta;
+ }
+
/* We need the dtim_period to set the MAC as associated */
if (vif->bss_conf.assoc && vif->bss_conf.dtim_period &&
!force_assoc_off) {
u32 dtim_offs;
+ /* Allow beacons to pass through as long as we are not
+ * associated, or we do not have dtim period information.
+ */
+ cmd.filter_flags |= cpu_to_le32(MAC_FILTER_IN_BEACON);
+
/*
* The DTIM count counts down, so when it is N that means N
* more beacon intervals happen until the DTIM TBTT. Therefore
@@ -755,51 +772,6 @@ static void iwl_mvm_mac_ctxt_cmd_fill_sta(struct iwl_mvm *mvm,
ctxt_sta->listen_interval = cpu_to_le32(mvm->hw->conf.listen_interval);
ctxt_sta->assoc_id = cpu_to_le32(vif->bss_conf.aid);
-}
-
-static int iwl_mvm_mac_ctxt_cmd_station(struct iwl_mvm *mvm,
- struct ieee80211_vif *vif,
- u32 action)
-{
- struct iwl_mac_ctx_cmd cmd = {};
-
- WARN_ON(vif->type != NL80211_IFTYPE_STATION || vif->p2p);
-
- /* Fill the common data for all mac context types */
- iwl_mvm_mac_ctxt_cmd_common(mvm, vif, &cmd, action);
-
- /* Allow beacons to pass through as long as we are not associated,or we
- * do not have dtim period information */
- if (!vif->bss_conf.assoc || !vif->bss_conf.dtim_period)
- cmd.filter_flags |= cpu_to_le32(MAC_FILTER_IN_BEACON);
- else
- cmd.filter_flags &= ~cpu_to_le32(MAC_FILTER_IN_BEACON);
-
- /* Fill the data specific for station mode */
- iwl_mvm_mac_ctxt_cmd_fill_sta(mvm, vif, &cmd.sta,
- action == FW_CTXT_ACTION_ADD);
-
- return iwl_mvm_mac_ctxt_send_cmd(mvm, &cmd);
-}
-
-static int iwl_mvm_mac_ctxt_cmd_p2p_client(struct iwl_mvm *mvm,
- struct ieee80211_vif *vif,
- u32 action)
-{
- struct iwl_mac_ctx_cmd cmd = {};
- struct ieee80211_p2p_noa_attr *noa = &vif->bss_conf.p2p_noa_attr;
-
- WARN_ON(vif->type != NL80211_IFTYPE_STATION || !vif->p2p);
-
- /* Fill the common data for all mac context types */
- iwl_mvm_mac_ctxt_cmd_common(mvm, vif, &cmd, action);
-
- /* Fill the data specific for station mode */
- iwl_mvm_mac_ctxt_cmd_fill_sta(mvm, vif, &cmd.p2p_sta.sta,
- action == FW_CTXT_ACTION_ADD);
-
- cmd.p2p_sta.ctwin = cpu_to_le32(noa->oppps_ctwindow &
- IEEE80211_P2P_OPPPS_CTWINDOW_MASK);
return iwl_mvm_mac_ctxt_send_cmd(mvm, &cmd);
}
@@ -1137,16 +1109,12 @@ static int iwl_mvm_mac_ctxt_cmd_go(struct iwl_mvm *mvm,
}
static int iwl_mvm_mac_ctx_send(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
- u32 action)
+ u32 action, bool force_assoc_off)
{
switch (vif->type) {
case NL80211_IFTYPE_STATION:
- if (!vif->p2p)
- return iwl_mvm_mac_ctxt_cmd_station(mvm, vif,
- action);
- else
- return iwl_mvm_mac_ctxt_cmd_p2p_client(mvm, vif,
- action);
+ return iwl_mvm_mac_ctxt_cmd_sta(mvm, vif, action,
+ force_assoc_off);
break;
case NL80211_IFTYPE_AP:
if (!vif->p2p)
@@ -1176,7 +1144,8 @@ int iwl_mvm_mac_ctxt_add(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
vif->addr, ieee80211_vif_type_p2p(vif)))
return -EIO;
- ret = iwl_mvm_mac_ctx_send(mvm, vif, FW_CTXT_ACTION_ADD);
+ ret = iwl_mvm_mac_ctx_send(mvm, vif, FW_CTXT_ACTION_ADD,
+ true);
if (ret)
return ret;
@@ -1187,7 +1156,8 @@ int iwl_mvm_mac_ctxt_add(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
return 0;
}
-int iwl_mvm_mac_ctxt_changed(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
+int iwl_mvm_mac_ctxt_changed(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ bool force_assoc_off)
{
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
@@ -1195,7 +1165,8 @@ int iwl_mvm_mac_ctxt_changed(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
vif->addr, ieee80211_vif_type_p2p(vif)))
return -EIO;
- return iwl_mvm_mac_ctx_send(mvm, vif, FW_CTXT_ACTION_MODIFY);
+ return iwl_mvm_mac_ctx_send(mvm, vif, FW_CTXT_ACTION_MODIFY,
+ force_assoc_off);
}
int iwl_mvm_mac_ctxt_remove(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
@@ -1214,7 +1185,7 @@ int iwl_mvm_mac_ctxt_remove(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
mvmvif->color));
cmd.action = cpu_to_le32(FW_CTXT_ACTION_REMOVE);
- ret = iwl_mvm_send_cmd_pdu(mvm, MAC_CONTEXT_CMD, CMD_SYNC,
+ ret = iwl_mvm_send_cmd_pdu(mvm, MAC_CONTEXT_CMD, 0,
sizeof(cmd), &cmd);
if (ret) {
IWL_ERR(mvm, "Failed to remove MAC context: %d\n", ret);
@@ -1240,11 +1211,23 @@ int iwl_mvm_rx_beacon_notif(struct iwl_mvm *mvm,
u32 rate __maybe_unused =
le32_to_cpu(beacon->beacon_notify_hdr.initial_rate);
+ lockdep_assert_held(&mvm->mutex);
+
IWL_DEBUG_RX(mvm, "beacon status %#x retries:%d tsf:0x%16llX rate:%d\n",
status & TX_STATUS_MSK,
beacon->beacon_notify_hdr.failure_frame,
le64_to_cpu(beacon->tsf),
rate);
+
+ if (unlikely(mvm->csa_vif && mvm->csa_vif->csa_active)) {
+ if (!ieee80211_csa_is_complete(mvm->csa_vif)) {
+ iwl_mvm_mac_ctxt_beacon_changed(mvm, mvm->csa_vif);
+ } else {
+ ieee80211_csa_finish(mvm->csa_vif);
+ mvm->csa_vif = NULL;
+ }
+ }
+
return 0;
}
diff --git a/drivers/net/wireless/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/iwlwifi/mvm/mac80211.c
index 8735ef1f44ae..7215f5980186 100644
--- a/drivers/net/wireless/iwlwifi/mvm/mac80211.c
+++ b/drivers/net/wireless/iwlwifi/mvm/mac80211.c
@@ -295,7 +295,9 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
!iwlwifi_mod_params.sw_crypto)
hw->flags |= IEEE80211_HW_MFP_CAPABLE;
- if (0 && mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_UAPSD_SUPPORT) {
+ if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_UAPSD_SUPPORT &&
+ IWL_UCODE_API(mvm->fw->ucode_ver) >= 9 &&
+ !iwlwifi_mod_params.uapsd_disable) {
hw->flags |= IEEE80211_HW_SUPPORTS_UAPSD;
hw->uapsd_queues = IWL_UAPSD_AC_INFO;
hw->uapsd_max_sp_len = IWL_UAPSD_MAX_SP;
@@ -309,11 +311,8 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
BIT(NL80211_IFTYPE_P2P_CLIENT) |
BIT(NL80211_IFTYPE_AP) |
BIT(NL80211_IFTYPE_P2P_GO) |
- BIT(NL80211_IFTYPE_P2P_DEVICE);
-
- /* IBSS has bugs in older versions */
- if (IWL_UCODE_API(mvm->fw->ucode_ver) >= 8)
- hw->wiphy->interface_modes |= BIT(NL80211_IFTYPE_ADHOC);
+ BIT(NL80211_IFTYPE_P2P_DEVICE) |
+ BIT(NL80211_IFTYPE_ADHOC);
hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN;
hw->wiphy->regulatory_flags |= REGULATORY_CUSTOM_REG |
@@ -322,6 +321,9 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_GO_UAPSD)
hw->wiphy->flags |= WIPHY_FLAG_AP_UAPSD;
+ if (mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_CSA_FLOW)
+ hw->wiphy->flags |= WIPHY_FLAG_HAS_CHANNEL_SWITCH;
+
hw->wiphy->iface_combinations = iwl_mvm_iface_combinations;
hw->wiphy->n_iface_combinations =
ARRAY_SIZE(iwl_mvm_iface_combinations);
@@ -365,14 +367,11 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
else
hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
- if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_SCHED_SCAN) {
- hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_SCHED_SCAN;
- hw->wiphy->max_sched_scan_ssids = PROBE_OPTION_MAX;
- hw->wiphy->max_match_sets = IWL_SCAN_MAX_PROFILES;
- /* we create the 802.11 header and zero length SSID IE. */
- hw->wiphy->max_sched_scan_ie_len =
- SCAN_OFFLOAD_PROBE_REQ_SIZE - 24 - 2;
- }
+ hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_SCHED_SCAN;
+ hw->wiphy->max_sched_scan_ssids = PROBE_OPTION_MAX;
+ hw->wiphy->max_match_sets = IWL_SCAN_MAX_PROFILES;
+ /* we create the 802.11 header and zero length SSID IE. */
+ hw->wiphy->max_sched_scan_ie_len = SCAN_OFFLOAD_PROBE_REQ_SIZE - 24 - 2;
hw->wiphy->features |= NL80211_FEATURE_P2P_GO_CTWIN |
NL80211_FEATURE_P2P_GO_OPPPS;
@@ -386,7 +385,11 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
}
#ifdef CONFIG_PM_SLEEP
- if (mvm->fw->img[IWL_UCODE_WOWLAN].sec[0].len &&
+ if (iwl_mvm_is_d0i3_supported(mvm) &&
+ device_can_wakeup(mvm->trans->dev)) {
+ mvm->wowlan.flags = WIPHY_WOWLAN_ANY;
+ hw->wiphy->wowlan = &mvm->wowlan;
+ } else if (mvm->fw->img[IWL_UCODE_WOWLAN].sec[0].len &&
mvm->trans->ops->d3_suspend &&
mvm->trans->ops->d3_resume &&
device_can_wakeup(mvm->trans->dev)) {
@@ -540,13 +543,22 @@ static int iwl_mvm_mac_ampdu_action(struct ieee80211_hw *hw,
return -EACCES;
/* return from D0i3 before starting a new Tx aggregation */
- if (action == IEEE80211_AMPDU_TX_START) {
+ switch (action) {
+ case IEEE80211_AMPDU_TX_START:
+ case IEEE80211_AMPDU_TX_STOP_CONT:
+ case IEEE80211_AMPDU_TX_STOP_FLUSH:
+ case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
+ case IEEE80211_AMPDU_TX_OPERATIONAL:
iwl_mvm_ref(mvm, IWL_MVM_REF_TX_AGG);
tx_agg_ref = true;
/*
- * wait synchronously until D0i3 exit to get the correct
- * sequence number for the tid
+ * for tx start, wait synchronously until D0i3 exit to
+ * get the correct sequence number for the tid.
+ * additionally, some other ampdu actions use direct
+ * target access, which is not handled automatically
+ * by the trans layer (unlike commands), so wait for
+ * d0i3 exit in these cases as well.
*/
if (!wait_event_timeout(mvm->d0i3_exit_waitq,
!test_bit(IWL_MVM_STATUS_IN_D0I3, &mvm->status), HZ)) {
@@ -554,6 +566,9 @@ static int iwl_mvm_mac_ampdu_action(struct ieee80211_hw *hw,
iwl_mvm_unref(mvm, IWL_MVM_REF_TX_AGG);
return -EIO;
}
+ break;
+ default:
+ break;
}
mutex_lock(&mvm->mutex);
@@ -758,7 +773,7 @@ static int iwl_mvm_set_tx_power(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
.pwr_restriction = cpu_to_le16(tx_power),
};
- return iwl_mvm_send_cmd_pdu(mvm, REDUCE_TX_POWER_CMD, CMD_SYNC,
+ return iwl_mvm_send_cmd_pdu(mvm, REDUCE_TX_POWER_CMD, 0,
sizeof(reduce_txpwr_cmd),
&reduce_txpwr_cmd);
}
@@ -817,18 +832,17 @@ static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw,
if (ret)
goto out_release;
- ret = iwl_mvm_power_update_mac(mvm, vif);
+ ret = iwl_mvm_power_update_mac(mvm);
if (ret)
goto out_release;
/* beacon filtering */
- ret = iwl_mvm_disable_beacon_filter(mvm, vif, CMD_SYNC);
+ ret = iwl_mvm_disable_beacon_filter(mvm, vif, 0);
if (ret)
goto out_remove_mac;
- if (!mvm->bf_allowed_vif && false &&
- vif->type == NL80211_IFTYPE_STATION && !vif->p2p &&
- mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_BF_UPDATED){
+ if (!mvm->bf_allowed_vif &&
+ vif->type == NL80211_IFTYPE_STATION && !vif->p2p) {
mvm->bf_allowed_vif = mvmvif;
vif->driver_flags |= IEEE80211_VIF_BEACON_FILTER |
IEEE80211_VIF_SUPPORTS_CQM_RSSI;
@@ -969,7 +983,7 @@ static void iwl_mvm_mac_remove_interface(struct ieee80211_hw *hw,
if (mvm->vif_count && vif->type != NL80211_IFTYPE_P2P_DEVICE)
mvm->vif_count--;
- iwl_mvm_power_update_mac(mvm, vif);
+ iwl_mvm_power_update_mac(mvm);
iwl_mvm_mac_ctxt_remove(mvm, vif);
out_release:
@@ -1223,10 +1237,14 @@ static int iwl_mvm_configure_bcast_filter(struct iwl_mvm *mvm,
if (!(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_BCAST_FILTERING))
return 0;
+ /* bcast filtering isn't supported for P2P client */
+ if (vif->p2p)
+ return 0;
+
if (!iwl_mvm_bcast_filter_build_cmd(mvm, &cmd))
return 0;
- return iwl_mvm_send_cmd_pdu(mvm, BCAST_FILTER_CMD, CMD_SYNC,
+ return iwl_mvm_send_cmd_pdu(mvm, BCAST_FILTER_CMD, 0,
sizeof(cmd), &cmd);
}
#else
@@ -1253,7 +1271,7 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm,
if (changes & BSS_CHANGED_ASSOC && bss_conf->assoc)
iwl_mvm_mac_ctxt_recalc_tsf_id(mvm, vif);
- ret = iwl_mvm_mac_ctxt_changed(mvm, vif);
+ ret = iwl_mvm_mac_ctxt_changed(mvm, vif, false);
if (ret)
IWL_ERR(mvm, "failed to update MAC %pM\n", vif->addr);
@@ -1333,10 +1351,10 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm,
iwl_mvm_remove_time_event(mvm, mvmvif,
&mvmvif->time_event_data);
iwl_mvm_sf_update(mvm, vif, false);
- WARN_ON(iwl_mvm_enable_beacon_filter(mvm, vif, CMD_SYNC));
+ WARN_ON(iwl_mvm_enable_beacon_filter(mvm, vif, 0));
} else if (changes & (BSS_CHANGED_PS | BSS_CHANGED_P2P_PS |
BSS_CHANGED_QOS)) {
- ret = iwl_mvm_power_update_mac(mvm, vif);
+ ret = iwl_mvm_power_update_mac(mvm);
if (ret)
IWL_ERR(mvm, "failed to update power mode\n");
}
@@ -1347,16 +1365,19 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm,
}
if (changes & BSS_CHANGED_CQM) {
- IWL_DEBUG_MAC80211(mvm, "cqm info_changed");
+ IWL_DEBUG_MAC80211(mvm, "cqm info_changed\n");
/* reset cqm events tracking */
mvmvif->bf_data.last_cqm_event = 0;
- ret = iwl_mvm_update_beacon_filter(mvm, vif, false, CMD_SYNC);
- if (ret)
- IWL_ERR(mvm, "failed to update CQM thresholds\n");
+ if (mvmvif->bf_data.bf_enabled) {
+ ret = iwl_mvm_enable_beacon_filter(mvm, vif, 0);
+ if (ret)
+ IWL_ERR(mvm,
+ "failed to update CQM thresholds\n");
+ }
}
if (changes & BSS_CHANGED_ARP_FILTER) {
- IWL_DEBUG_MAC80211(mvm, "arp filter changed");
+ IWL_DEBUG_MAC80211(mvm, "arp filter changed\n");
iwl_mvm_configure_bcast_filter(mvm, vif);
}
}
@@ -1402,7 +1423,7 @@ static int iwl_mvm_start_ap_ibss(struct ieee80211_hw *hw,
mvmvif->ap_ibss_active = true;
/* power updated needs to be done before quotas */
- iwl_mvm_power_update_mac(mvm, vif);
+ iwl_mvm_power_update_mac(mvm);
ret = iwl_mvm_update_quotas(mvm, vif);
if (ret)
@@ -1410,7 +1431,7 @@ static int iwl_mvm_start_ap_ibss(struct ieee80211_hw *hw,
/* Need to update the P2P Device MAC (only GO, IBSS is single vif) */
if (vif->p2p && mvm->p2p_device_vif)
- iwl_mvm_mac_ctxt_changed(mvm, mvm->p2p_device_vif);
+ iwl_mvm_mac_ctxt_changed(mvm, mvm->p2p_device_vif, false);
iwl_mvm_ref(mvm, IWL_MVM_REF_AP_IBSS);
@@ -1420,7 +1441,7 @@ static int iwl_mvm_start_ap_ibss(struct ieee80211_hw *hw,
return 0;
out_quota_failed:
- iwl_mvm_power_update_mac(mvm, vif);
+ iwl_mvm_power_update_mac(mvm);
mvmvif->ap_ibss_active = false;
iwl_mvm_send_rm_bcast_sta(mvm, &mvmvif->bcast_sta);
out_unbind:
@@ -1450,13 +1471,13 @@ static void iwl_mvm_stop_ap_ibss(struct ieee80211_hw *hw,
/* Need to update the P2P Device MAC (only GO, IBSS is single vif) */
if (vif->p2p && mvm->p2p_device_vif)
- iwl_mvm_mac_ctxt_changed(mvm, mvm->p2p_device_vif);
+ iwl_mvm_mac_ctxt_changed(mvm, mvm->p2p_device_vif, false);
iwl_mvm_update_quotas(mvm, NULL);
iwl_mvm_send_rm_bcast_sta(mvm, &mvmvif->bcast_sta);
iwl_mvm_binding_remove_vif(mvm, vif);
- iwl_mvm_power_update_mac(mvm, vif);
+ iwl_mvm_power_update_mac(mvm);
iwl_mvm_mac_ctxt_remove(mvm, vif);
@@ -1477,7 +1498,7 @@ iwl_mvm_bss_info_changed_ap_ibss(struct iwl_mvm *mvm,
if (changes & (BSS_CHANGED_ERP_CTS_PROT | BSS_CHANGED_HT |
BSS_CHANGED_BANDWIDTH) &&
- iwl_mvm_mac_ctxt_changed(mvm, vif))
+ iwl_mvm_mac_ctxt_changed(mvm, vif, false))
IWL_ERR(mvm, "failed to update MAC %pM\n", vif->addr);
/* Need to send a new beacon template to the FW */
@@ -1495,6 +1516,9 @@ static void iwl_mvm_bss_info_changed(struct ieee80211_hw *hw,
mutex_lock(&mvm->mutex);
+ if (changes & BSS_CHANGED_IDLE && !bss_conf->idle)
+ iwl_mvm_sched_scan_stop(mvm, true);
+
switch (vif->type) {
case NL80211_IFTYPE_STATION:
iwl_mvm_bss_info_changed_station(mvm, vif, bss_conf, changes);
@@ -1525,7 +1549,7 @@ static int iwl_mvm_mac_hw_scan(struct ieee80211_hw *hw,
switch (mvm->scan_status) {
case IWL_MVM_SCAN_SCHED:
- ret = iwl_mvm_sched_scan_stop(mvm);
+ ret = iwl_mvm_sched_scan_stop(mvm, true);
if (ret) {
ret = -EBUSY;
goto out;
@@ -1697,6 +1721,11 @@ static int iwl_mvm_mac_sta_state(struct ieee80211_hw *hw,
ret = iwl_mvm_add_sta(mvm, vif, sta);
} else if (old_state == IEEE80211_STA_NONE &&
new_state == IEEE80211_STA_AUTH) {
+ /*
+ * EBS may be disabled due to previous failures reported by FW.
+ * Reset EBS status here assuming environment has been changed.
+ */
+ mvm->last_ebs_successful = true;
ret = 0;
} else if (old_state == IEEE80211_STA_AUTH &&
new_state == IEEE80211_STA_ASSOC) {
@@ -1708,14 +1737,12 @@ static int iwl_mvm_mac_sta_state(struct ieee80211_hw *hw,
} else if (old_state == IEEE80211_STA_ASSOC &&
new_state == IEEE80211_STA_AUTHORIZED) {
/* enable beacon filtering */
- if (vif->bss_conf.dtim_period)
- WARN_ON(iwl_mvm_enable_beacon_filter(mvm, vif,
- CMD_SYNC));
+ WARN_ON(iwl_mvm_enable_beacon_filter(mvm, vif, 0));
ret = 0;
} else if (old_state == IEEE80211_STA_AUTHORIZED &&
new_state == IEEE80211_STA_ASSOC) {
/* disable beacon filtering */
- WARN_ON(iwl_mvm_disable_beacon_filter(mvm, vif, CMD_SYNC));
+ WARN_ON(iwl_mvm_disable_beacon_filter(mvm, vif, 0));
ret = 0;
} else if (old_state == IEEE80211_STA_ASSOC &&
new_state == IEEE80211_STA_AUTH) {
@@ -1772,7 +1799,7 @@ static int iwl_mvm_mac_conf_tx(struct ieee80211_hw *hw,
int ret;
mutex_lock(&mvm->mutex);
- ret = iwl_mvm_mac_ctxt_changed(mvm, vif);
+ ret = iwl_mvm_mac_ctxt_changed(mvm, vif, false);
mutex_unlock(&mvm->mutex);
return ret;
}
@@ -1865,7 +1892,7 @@ static int iwl_mvm_mac_sched_scan_stop(struct ieee80211_hw *hw,
int ret;
mutex_lock(&mvm->mutex);
- ret = iwl_mvm_sched_scan_stop(mvm);
+ ret = iwl_mvm_sched_scan_stop(mvm, false);
mutex_unlock(&mvm->mutex);
iwl_mvm_wait_for_async_handlers(mvm);
@@ -2161,10 +2188,10 @@ static void iwl_mvm_change_chanctx(struct ieee80211_hw *hw,
return;
mutex_lock(&mvm->mutex);
+ iwl_mvm_bt_coex_vif_change(mvm);
iwl_mvm_phy_ctxt_changed(mvm, phy_ctxt, &ctx->min_def,
ctx->rx_chains_static,
ctx->rx_chains_dynamic);
- iwl_mvm_bt_coex_vif_change(mvm);
mutex_unlock(&mvm->mutex);
}
@@ -2184,6 +2211,11 @@ static int iwl_mvm_assign_vif_chanctx(struct ieee80211_hw *hw,
switch (vif->type) {
case NL80211_IFTYPE_AP:
+ /* Unless it's a CSA flow we have nothing to do here */
+ if (vif->csa_active) {
+ mvmvif->ap_ibss_active = true;
+ break;
+ }
case NL80211_IFTYPE_ADHOC:
/*
* The AP binding flow is handled as part of the start_ap flow
@@ -2207,7 +2239,7 @@ static int iwl_mvm_assign_vif_chanctx(struct ieee80211_hw *hw,
* Power state must be updated before quotas,
* otherwise fw will complain.
*/
- iwl_mvm_power_update_mac(mvm, vif);
+ iwl_mvm_power_update_mac(mvm);
/* Setting the quota at this stage is only required for monitor
* interfaces. For the other types, the bss_info changed flow
@@ -2220,11 +2252,17 @@ static int iwl_mvm_assign_vif_chanctx(struct ieee80211_hw *hw,
goto out_remove_binding;
}
+ /* Handle binding during CSA */
+ if (vif->type == NL80211_IFTYPE_AP) {
+ iwl_mvm_update_quotas(mvm, vif);
+ iwl_mvm_mac_ctxt_changed(mvm, vif, false);
+ }
+
goto out_unlock;
out_remove_binding:
iwl_mvm_binding_remove_vif(mvm, vif);
- iwl_mvm_power_update_mac(mvm, vif);
+ iwl_mvm_power_update_mac(mvm);
out_unlock:
mutex_unlock(&mvm->mutex);
if (ret)
@@ -2244,22 +2282,29 @@ static void iwl_mvm_unassign_vif_chanctx(struct ieee80211_hw *hw,
iwl_mvm_remove_time_event(mvm, mvmvif, &mvmvif->time_event_data);
switch (vif->type) {
- case NL80211_IFTYPE_AP:
case NL80211_IFTYPE_ADHOC:
goto out_unlock;
case NL80211_IFTYPE_MONITOR:
mvmvif->monitor_active = false;
iwl_mvm_update_quotas(mvm, NULL);
break;
+ case NL80211_IFTYPE_AP:
+ /* This part is triggered only during CSA */
+ if (!vif->csa_active || !mvmvif->ap_ibss_active)
+ goto out_unlock;
+
+ mvmvif->ap_ibss_active = false;
+ iwl_mvm_update_quotas(mvm, NULL);
+ /*TODO: bt_coex notification here? */
default:
break;
}
iwl_mvm_binding_remove_vif(mvm, vif);
- iwl_mvm_power_update_mac(mvm, vif);
out_unlock:
mvmvif->phy_ctxt = NULL;
+ iwl_mvm_power_update_mac(mvm);
mutex_unlock(&mvm->mutex);
}
@@ -2323,9 +2368,8 @@ static int __iwl_mvm_mac_testmode_cmd(struct iwl_mvm *mvm,
return -EINVAL;
if (nla_get_u32(tb[IWL_MVM_TM_ATTR_BEACON_FILTER_STATE]))
- return iwl_mvm_enable_beacon_filter(mvm, vif,
- CMD_SYNC);
- return iwl_mvm_disable_beacon_filter(mvm, vif, CMD_SYNC);
+ return iwl_mvm_enable_beacon_filter(mvm, vif, 0);
+ return iwl_mvm_disable_beacon_filter(mvm, vif, 0);
}
return -EOPNOTSUPP;
@@ -2346,6 +2390,53 @@ static int iwl_mvm_mac_testmode_cmd(struct ieee80211_hw *hw,
}
#endif
+static void iwl_mvm_channel_switch_beacon(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct cfg80211_chan_def *chandef)
+{
+ struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+
+ mutex_lock(&mvm->mutex);
+ if (WARN(mvm->csa_vif && mvm->csa_vif->csa_active,
+ "Another CSA is already in progress"))
+ goto out_unlock;
+
+ IWL_DEBUG_MAC80211(mvm, "CSA started to freq %d\n",
+ chandef->center_freq1);
+ mvm->csa_vif = vif;
+
+out_unlock:
+ mutex_unlock(&mvm->mutex);
+}
+
+static void iwl_mvm_mac_flush(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif, u32 queues, bool drop)
+{
+ struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+ struct iwl_mvm_vif *mvmvif;
+ struct iwl_mvm_sta *mvmsta;
+
+ if (!vif || vif->type != NL80211_IFTYPE_STATION)
+ return;
+
+ mutex_lock(&mvm->mutex);
+ mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ mvmsta = iwl_mvm_sta_from_staid_protected(mvm, mvmvif->ap_sta_id);
+
+ if (WARN_ON_ONCE(!mvmsta))
+ goto done;
+
+ if (drop) {
+ if (iwl_mvm_flush_tx_path(mvm, mvmsta->tfd_queue_msk, true))
+ IWL_ERR(mvm, "flush request fail\n");
+ } else {
+ iwl_trans_wait_tx_queue_empty(mvm->trans,
+ mvmsta->tfd_queue_msk);
+ }
+done:
+ mutex_unlock(&mvm->mutex);
+}
+
const struct ieee80211_ops iwl_mvm_hw_ops = {
.tx = iwl_mvm_mac_tx,
.ampdu_action = iwl_mvm_mac_ampdu_action,
@@ -2369,6 +2460,7 @@ const struct ieee80211_ops iwl_mvm_hw_ops = {
.sta_rc_update = iwl_mvm_sta_rc_update,
.conf_tx = iwl_mvm_mac_conf_tx,
.mgd_prepare_tx = iwl_mvm_mac_mgd_prepare_tx,
+ .flush = iwl_mvm_mac_flush,
.sched_scan_start = iwl_mvm_mac_sched_scan_start,
.sched_scan_stop = iwl_mvm_mac_sched_scan_stop,
.set_key = iwl_mvm_mac_set_key,
@@ -2388,6 +2480,8 @@ const struct ieee80211_ops iwl_mvm_hw_ops = {
.set_tim = iwl_mvm_set_tim,
+ .channel_switch_beacon = iwl_mvm_channel_switch_beacon,
+
CFG80211_TESTMODE_CMD(iwl_mvm_mac_testmode_cmd)
#ifdef CONFIG_PM_SLEEP
diff --git a/drivers/net/wireless/iwlwifi/mvm/mvm.h b/drivers/net/wireless/iwlwifi/mvm/mvm.h
index f1ec0986c3c9..fcc6c29482d0 100644
--- a/drivers/net/wireless/iwlwifi/mvm/mvm.h
+++ b/drivers/net/wireless/iwlwifi/mvm/mvm.h
@@ -164,7 +164,6 @@ enum iwl_dbgfs_pm_mask {
MVM_DEBUGFS_PM_SKIP_DTIM_PERIODS = BIT(2),
MVM_DEBUGFS_PM_RX_DATA_TIMEOUT = BIT(3),
MVM_DEBUGFS_PM_TX_DATA_TIMEOUT = BIT(4),
- MVM_DEBUGFS_PM_DISABLE_POWER_OFF = BIT(5),
MVM_DEBUGFS_PM_LPRX_ENA = BIT(6),
MVM_DEBUGFS_PM_LPRX_RSSI_THRESHOLD = BIT(7),
MVM_DEBUGFS_PM_SNOOZE_ENABLE = BIT(8),
@@ -177,7 +176,6 @@ struct iwl_dbgfs_pm {
u32 tx_data_timeout;
bool skip_over_dtim;
u8 skip_dtim_periods;
- bool disable_power_off;
bool lprx_ena;
u32 lprx_rssi_threshold;
bool snooze_ena;
@@ -232,6 +230,7 @@ enum iwl_mvm_ref_type {
IWL_MVM_REF_USER,
IWL_MVM_REF_TX,
IWL_MVM_REF_TX_AGG,
+ IWL_MVM_REF_EXIT_WORK,
IWL_MVM_REF_COUNT,
};
@@ -265,6 +264,7 @@ struct iwl_mvm_vif_bf_data {
* @uploaded: indicates the MAC context has been added to the device
* @ap_ibss_active: indicates that AP/IBSS is configured and that the interface
* should get quota etc.
+ * @pm_enabled - Indicate if MAC power management is allowed
* @monitor_active: indicates that monitor context is configured, and that the
* interface should get quota etc.
* @low_latency: indicates that this interface is in low-latency mode
@@ -283,6 +283,7 @@ struct iwl_mvm_vif {
bool uploaded;
bool ap_ibss_active;
+ bool pm_enabled;
bool monitor_active;
bool low_latency;
struct iwl_mvm_vif_bf_data bf_data;
@@ -451,6 +452,11 @@ struct iwl_mvm_frame_stats {
int last_frame_idx;
};
+enum {
+ D0I3_DEFER_WAKEUP,
+ D0I3_PENDING_WAKEUP,
+};
+
struct iwl_mvm {
/* for logger access */
struct device *dev;
@@ -484,6 +490,7 @@ struct iwl_mvm {
u32 log_event_table;
u32 umac_error_event_table;
bool support_umac_log;
+ struct iwl_sf_region sf_space;
u32 ampdu_ref;
@@ -495,6 +502,7 @@ struct iwl_mvm {
u8 queue_to_mac80211[IWL_MAX_HW_QUEUES];
atomic_t queue_stop_count[IWL_MAX_HW_QUEUES];
+ const char *nvm_file_name;
struct iwl_nvm_data *nvm_data;
/* NVM sections */
struct iwl_nvm_section nvm_sections[NVM_MAX_NUM_SECTIONS];
@@ -535,6 +543,8 @@ struct iwl_mvm {
/* Internal station */
struct iwl_mvm_int_sta aux_sta;
+ bool last_ebs_successful;
+
u8 scan_last_antenna_idx; /* to toggle TX between antennas */
u8 mgmt_last_antenna_idx;
@@ -578,8 +588,12 @@ struct iwl_mvm {
void *fw_error_dump;
void *fw_error_sram;
u32 fw_error_sram_len;
+ u32 *fw_error_rxf;
+ u32 fw_error_rxf_len;
+#ifdef CONFIG_IWLWIFI_LEDS
struct led_classdev led;
+#endif
struct ieee80211_vif *p2p_device_vif;
@@ -601,6 +615,9 @@ struct iwl_mvm {
bool d0i3_offloading;
struct work_struct d0i3_exit_work;
struct sk_buff_head d0i3_tx;
+ /* protect d0i3_suspend_flags */
+ struct mutex d0i3_suspend_mutex;
+ unsigned long d0i3_suspend_flags;
/* sync d0i3_tx queue and IWL_MVM_STATUS_IN_D0I3 status flag */
spinlock_t d0i3_tx_lock;
wait_queue_head_t d0i3_exit_waitq;
@@ -629,8 +646,8 @@ struct iwl_mvm {
/* Indicate if device power save is allowed */
bool ps_disabled;
- /* Indicate if device power management is allowed */
- bool pm_disabled;
+
+ struct ieee80211_vif *csa_vif;
};
/* Extract MVM priv from op_mode and _hw */
@@ -705,6 +722,7 @@ void iwl_mvm_dump_nic_error_log(struct iwl_mvm *mvm);
#ifdef CONFIG_IWLWIFI_DEBUGFS
void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm);
void iwl_mvm_fw_error_sram_dump(struct iwl_mvm *mvm);
+void iwl_mvm_fw_error_rxf_dump(struct iwl_mvm *mvm);
#endif
u8 first_antenna(u8 mask);
u8 iwl_mvm_next_antenna(struct iwl_mvm *mvm, u8 valid, u8 last_idx);
@@ -745,7 +763,7 @@ int iwl_mvm_rx_statistics(struct iwl_mvm *mvm,
struct iwl_device_cmd *cmd);
/* NVM */
-int iwl_nvm_init(struct iwl_mvm *mvm);
+int iwl_nvm_init(struct iwl_mvm *mvm, bool read_nvm_from_nic);
int iwl_mvm_load_nvm_to_nic(struct iwl_mvm *mvm);
int iwl_mvm_up(struct iwl_mvm *mvm);
@@ -796,7 +814,8 @@ void iwl_mvm_phy_ctxt_unref(struct iwl_mvm *mvm,
int iwl_mvm_mac_ctxt_init(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
void iwl_mvm_mac_ctxt_release(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
int iwl_mvm_mac_ctxt_add(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
-int iwl_mvm_mac_ctxt_changed(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
+int iwl_mvm_mac_ctxt_changed(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ bool force_assoc_off);
int iwl_mvm_mac_ctxt_remove(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
u32 iwl_mvm_mac_get_queues_mask(struct iwl_mvm *mvm,
struct ieee80211_vif *vif);
@@ -840,7 +859,7 @@ int iwl_mvm_config_sched_scan_profiles(struct iwl_mvm *mvm,
struct cfg80211_sched_scan_request *req);
int iwl_mvm_sched_scan_start(struct iwl_mvm *mvm,
struct cfg80211_sched_scan_request *req);
-int iwl_mvm_sched_scan_stop(struct iwl_mvm *mvm);
+int iwl_mvm_sched_scan_stop(struct iwl_mvm *mvm, bool notify);
int iwl_mvm_rx_sched_scan_results(struct iwl_mvm *mvm,
struct iwl_rx_cmd_buffer *rxb,
struct iwl_device_cmd *cmd);
@@ -874,10 +893,8 @@ void iwl_mvm_update_frame_stats(struct iwl_mvm *mvm,
int rs_pretty_print_rate(char *buf, const u32 rate);
/* power management */
-int iwl_power_legacy_set_cam_mode(struct iwl_mvm *mvm);
-
int iwl_mvm_power_update_device(struct iwl_mvm *mvm);
-int iwl_mvm_power_update_mac(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
+int iwl_mvm_power_update_mac(struct iwl_mvm *mvm);
int iwl_mvm_power_mac_dbgfs_read(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
char *buf, int bufsz);
@@ -886,8 +903,18 @@ int iwl_mvm_power_uapsd_misbehaving_ap_notif(struct iwl_mvm *mvm,
struct iwl_rx_cmd_buffer *rxb,
struct iwl_device_cmd *cmd);
+#ifdef CONFIG_IWLWIFI_LEDS
int iwl_mvm_leds_init(struct iwl_mvm *mvm);
void iwl_mvm_leds_exit(struct iwl_mvm *mvm);
+#else
+static inline int iwl_mvm_leds_init(struct iwl_mvm *mvm)
+{
+ return 0;
+}
+static inline void iwl_mvm_leds_exit(struct iwl_mvm *mvm)
+{
+}
+#endif
/* D3 (WoWLAN, NetDetect) */
int iwl_mvm_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan);
@@ -922,9 +949,9 @@ int iwl_mvm_send_proto_offload(struct iwl_mvm *mvm,
void iwl_mvm_ref(struct iwl_mvm *mvm, enum iwl_mvm_ref_type ref_type);
void iwl_mvm_unref(struct iwl_mvm *mvm, enum iwl_mvm_ref_type ref_type);
void iwl_mvm_d0i3_enable_tx(struct iwl_mvm *mvm, __le16 *qos_seq);
+int _iwl_mvm_exit_d0i3(struct iwl_mvm *mvm);
/* BT Coex */
-int iwl_send_bt_prio_tbl(struct iwl_mvm *mvm);
int iwl_send_bt_init_conf(struct iwl_mvm *mvm);
int iwl_mvm_rx_bt_coex_notif(struct iwl_mvm *mvm,
struct iwl_rx_cmd_buffer *rxb,
@@ -936,9 +963,10 @@ u16 iwl_mvm_coex_agg_time_limit(struct iwl_mvm *mvm,
struct ieee80211_sta *sta);
bool iwl_mvm_bt_coex_is_mimo_allowed(struct iwl_mvm *mvm,
struct ieee80211_sta *sta);
+bool iwl_mvm_bt_coex_is_tpc_allowed(struct iwl_mvm *mvm,
+ enum ieee80211_band band);
u8 iwl_mvm_bt_coex_tx_prio(struct iwl_mvm *mvm, struct ieee80211_hdr *hdr,
struct ieee80211_tx_info *info, u8 ac);
-int iwl_mvm_bt_coex_reduced_txp(struct iwl_mvm *mvm, u8 sta_id, bool enable);
enum iwl_bt_kill_msk {
BT_KILL_MSK_DEFAULT,
@@ -969,17 +997,11 @@ int iwl_mvm_enable_beacon_filter(struct iwl_mvm *mvm,
int iwl_mvm_disable_beacon_filter(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
u32 flags);
-int iwl_mvm_update_beacon_abort(struct iwl_mvm *mvm,
- struct ieee80211_vif *vif, bool enable);
-int iwl_mvm_update_beacon_filter(struct iwl_mvm *mvm,
- struct ieee80211_vif *vif,
- bool force,
- u32 flags);
-
/* SMPS */
void iwl_mvm_update_smps(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
enum iwl_mvm_smps_type_request req_type,
enum ieee80211_smps_mode smps_request);
+bool iwl_mvm_rx_diversity_allowed(struct iwl_mvm *mvm);
/* Low latency */
int iwl_mvm_update_low_latency(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
diff --git a/drivers/net/wireless/iwlwifi/mvm/nvm.c b/drivers/net/wireless/iwlwifi/mvm/nvm.c
index cf2d09f53782..808f78f6fbf9 100644
--- a/drivers/net/wireless/iwlwifi/mvm/nvm.c
+++ b/drivers/net/wireless/iwlwifi/mvm/nvm.c
@@ -74,6 +74,12 @@
#define NVM_WRITE_OPCODE 1
#define NVM_READ_OPCODE 0
+/* load nvm chunk response */
+enum {
+ READ_NVM_CHUNK_SUCCEED = 0,
+ READ_NVM_CHUNK_NOT_VALID_ADDRESS = 1
+};
+
/*
* prepare the NVM host command w/ the pointers to the nvm buffer
* and send it to fw
@@ -90,7 +96,7 @@ static int iwl_nvm_write_chunk(struct iwl_mvm *mvm, u16 section,
struct iwl_host_cmd cmd = {
.id = NVM_ACCESS_CMD,
.len = { sizeof(struct iwl_nvm_access_cmd), length },
- .flags = CMD_SYNC | CMD_SEND_IN_RFKILL,
+ .flags = CMD_SEND_IN_RFKILL,
.data = { &nvm_access_cmd, data },
/* data may come from vmalloc, so use _DUP */
.dataflags = { 0, IWL_HCMD_DFL_DUP },
@@ -112,7 +118,7 @@ static int iwl_nvm_read_chunk(struct iwl_mvm *mvm, u16 section,
struct iwl_rx_packet *pkt;
struct iwl_host_cmd cmd = {
.id = NVM_ACCESS_CMD,
- .flags = CMD_SYNC | CMD_WANT_SKB | CMD_SEND_IN_RFKILL,
+ .flags = CMD_WANT_SKB | CMD_SEND_IN_RFKILL,
.data = { &nvm_access_cmd, },
};
int ret, bytes_read, offset_read;
@@ -139,10 +145,26 @@ static int iwl_nvm_read_chunk(struct iwl_mvm *mvm, u16 section,
offset_read = le16_to_cpu(nvm_resp->offset);
resp_data = nvm_resp->data;
if (ret) {
- IWL_ERR(mvm,
- "NVM access command failed with status %d (device: %s)\n",
- ret, mvm->cfg->name);
- ret = -EINVAL;
+ if ((offset != 0) &&
+ (ret == READ_NVM_CHUNK_NOT_VALID_ADDRESS)) {
+ /*
+ * meaning of NOT_VALID_ADDRESS:
+ * driver try to read chunk from address that is
+ * multiple of 2K and got an error since addr is empty.
+ * meaning of (offset != 0): driver already
+ * read valid data from another chunk so this case
+ * is not an error.
+ */
+ IWL_DEBUG_EEPROM(mvm->trans->dev,
+ "NVM access command failed on offset 0x%x since that section size is multiple 2K\n",
+ offset);
+ ret = 0;
+ } else {
+ IWL_DEBUG_EEPROM(mvm->trans->dev,
+ "NVM access command failed with status %d (device: %s)\n",
+ ret, mvm->cfg->name);
+ ret = -EIO;
+ }
goto exit;
}
@@ -211,9 +233,9 @@ static int iwl_nvm_read_section(struct iwl_mvm *mvm, u16 section,
while (ret == length) {
ret = iwl_nvm_read_chunk(mvm, section, offset, length, data);
if (ret < 0) {
- IWL_ERR(mvm,
- "Cannot read NVM from section %d offset %d, length %d\n",
- section, offset, length);
+ IWL_DEBUG_EEPROM(mvm->trans->dev,
+ "Cannot read NVM from section %d offset %d, length %d\n",
+ section, offset, length);
return ret;
}
offset += ret;
@@ -238,13 +260,20 @@ iwl_parse_nvm_sections(struct iwl_mvm *mvm)
return NULL;
}
} else {
+ /* SW and REGULATORY sections are mandatory */
if (!mvm->nvm_sections[NVM_SECTION_TYPE_SW].data ||
- !mvm->nvm_sections[NVM_SECTION_TYPE_MAC_OVERRIDE].data ||
!mvm->nvm_sections[NVM_SECTION_TYPE_REGULATORY].data) {
IWL_ERR(mvm,
"Can't parse empty family 8000 NVM sections\n");
return NULL;
}
+ /* MAC_OVERRIDE or at least HW section must exist */
+ if (!mvm->nvm_sections[mvm->cfg->nvm_hw_section_num].data &&
+ !mvm->nvm_sections[NVM_SECTION_TYPE_MAC_OVERRIDE].data) {
+ IWL_ERR(mvm,
+ "Can't parse mac_address, empty sections\n");
+ return NULL;
+ }
}
if (WARN_ON(!mvm->cfg))
@@ -311,16 +340,16 @@ static int iwl_mvm_read_external_nvm(struct iwl_mvm *mvm)
* get here after that we assume the NVM request can be satisfied
* synchronously.
*/
- ret = request_firmware(&fw_entry, iwlwifi_mod_params.nvm_file,
+ ret = request_firmware(&fw_entry, mvm->nvm_file_name,
mvm->trans->dev);
if (ret) {
IWL_ERR(mvm, "ERROR: %s isn't available %d\n",
- iwlwifi_mod_params.nvm_file, ret);
+ mvm->nvm_file_name, ret);
return ret;
}
IWL_INFO(mvm, "Loaded NVM file %s (%zu bytes)\n",
- iwlwifi_mod_params.nvm_file, fw_entry->size);
+ mvm->nvm_file_name, fw_entry->size);
if (fw_entry->size < sizeof(*file_sec)) {
IWL_ERR(mvm, "NVM file too small\n");
@@ -427,53 +456,28 @@ int iwl_mvm_load_nvm_to_nic(struct iwl_mvm *mvm)
return ret;
}
-int iwl_nvm_init(struct iwl_mvm *mvm)
+int iwl_nvm_init(struct iwl_mvm *mvm, bool read_nvm_from_nic)
{
- int ret, i, section;
+ int ret, section;
u8 *nvm_buffer, *temp;
- int nvm_to_read[NVM_MAX_NUM_SECTIONS];
- int num_of_sections_to_read;
if (WARN_ON_ONCE(mvm->cfg->nvm_hw_section_num >= NVM_MAX_NUM_SECTIONS))
return -EINVAL;
- /* load external NVM if configured */
- if (iwlwifi_mod_params.nvm_file) {
- /* move to External NVM flow */
- ret = iwl_mvm_read_external_nvm(mvm);
- if (ret)
- return ret;
- } else {
- /* list of NVM sections we are allowed/need to read */
- if (mvm->trans->cfg->device_family != IWL_DEVICE_FAMILY_8000) {
- nvm_to_read[0] = mvm->cfg->nvm_hw_section_num;
- nvm_to_read[1] = NVM_SECTION_TYPE_SW;
- nvm_to_read[2] = NVM_SECTION_TYPE_CALIBRATION;
- nvm_to_read[3] = NVM_SECTION_TYPE_PRODUCTION;
- num_of_sections_to_read = 4;
- } else {
- nvm_to_read[0] = NVM_SECTION_TYPE_SW;
- nvm_to_read[1] = NVM_SECTION_TYPE_CALIBRATION;
- nvm_to_read[2] = NVM_SECTION_TYPE_PRODUCTION;
- nvm_to_read[3] = NVM_SECTION_TYPE_REGULATORY;
- nvm_to_read[4] = NVM_SECTION_TYPE_MAC_OVERRIDE;
- num_of_sections_to_read = 5;
- }
-
+ /* load NVM values from nic */
+ if (read_nvm_from_nic) {
/* Read From FW NVM */
IWL_DEBUG_EEPROM(mvm->trans->dev, "Read from NVM\n");
- /* TODO: find correct NVM max size for a section */
nvm_buffer = kmalloc(mvm->cfg->base_params->eeprom_size,
GFP_KERNEL);
if (!nvm_buffer)
return -ENOMEM;
- for (i = 0; i < num_of_sections_to_read; i++) {
- section = nvm_to_read[i];
+ for (section = 0; section < NVM_MAX_NUM_SECTIONS; section++) {
/* we override the constness for initial read */
ret = iwl_nvm_read_section(mvm, section, nvm_buffer);
if (ret < 0)
- break;
+ continue;
temp = kmemdup(nvm_buffer, ret, GFP_KERNEL);
if (!temp) {
ret = -ENOMEM;
@@ -502,15 +506,21 @@ int iwl_nvm_init(struct iwl_mvm *mvm)
mvm->nvm_hw_blob.size = ret;
break;
}
- WARN(1, "section: %d", section);
}
#endif
}
kfree(nvm_buffer);
- if (ret < 0)
+ }
+
+ /* load external NVM if configured */
+ if (mvm->nvm_file_name) {
+ /* move to External NVM flow */
+ ret = iwl_mvm_read_external_nvm(mvm);
+ if (ret)
return ret;
}
+ /* parse the relevant nvm sections */
mvm->nvm_data = iwl_parse_nvm_sections(mvm);
if (!mvm->nvm_data)
return -ENODATA;
diff --git a/drivers/net/wireless/iwlwifi/mvm/ops.c b/drivers/net/wireless/iwlwifi/mvm/ops.c
index 9545d7fdd4bf..cc2f7de396de 100644
--- a/drivers/net/wireless/iwlwifi/mvm/ops.c
+++ b/drivers/net/wireless/iwlwifi/mvm/ops.c
@@ -79,8 +79,8 @@
#include "iwl-prph.h"
#include "rs.h"
#include "fw-api-scan.h"
-#include "fw-error-dump.h"
#include "time-event.h"
+#include "iwl-fw-error-dump.h"
/*
* module name, copyright, version, etc.
@@ -220,7 +220,7 @@ static const struct iwl_rx_handlers iwl_mvm_rx_handlers[] = {
RX_HANDLER(BA_NOTIF, iwl_mvm_rx_ba_notif, false),
RX_HANDLER(BT_PROFILE_NOTIFICATION, iwl_mvm_rx_bt_coex_notif, true),
- RX_HANDLER(BEACON_NOTIFICATION, iwl_mvm_rx_beacon_notif, false),
+ RX_HANDLER(BEACON_NOTIFICATION, iwl_mvm_rx_beacon_notif, true),
RX_HANDLER(STATISTICS_NOTIFICATION, iwl_mvm_rx_statistics, true),
RX_HANDLER(ANTENNA_COUPLING_NOTIFICATION,
iwl_mvm_rx_ant_coupling_notif, true),
@@ -402,6 +402,7 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
mvm->sf_state = SF_UNINIT;
mutex_init(&mvm->mutex);
+ mutex_init(&mvm->d0i3_suspend_mutex);
spin_lock_init(&mvm->async_handlers_lock);
INIT_LIST_HEAD(&mvm->time_event_list);
INIT_LIST_HEAD(&mvm->async_handlers_list);
@@ -465,13 +466,24 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
min_backoff = calc_min_backoff(trans, cfg);
iwl_mvm_tt_initialize(mvm, min_backoff);
+ /* set the nvm_file_name according to priority */
+ if (iwlwifi_mod_params.nvm_file)
+ mvm->nvm_file_name = iwlwifi_mod_params.nvm_file;
+ else
+ mvm->nvm_file_name = mvm->cfg->default_nvm_file;
+
+ if (WARN(cfg->no_power_up_nic_in_init && !mvm->nvm_file_name,
+ "not allowing power-up and not having nvm_file\n"))
+ goto out_free;
/*
- * If the NVM exists in an external file,
- * there is no need to unnecessarily power up the NIC at driver load
+ * Even if nvm exists in the nvm_file driver should read agin the nvm
+ * from the nic because there might be entries that exist in the OTP
+ * and not in the file.
+ * for nics with no_power_up_nic_in_init: rely completley on nvm_file
*/
- if (iwlwifi_mod_params.nvm_file) {
- err = iwl_nvm_init(mvm);
+ if (cfg->no_power_up_nic_in_init && mvm->nvm_file_name) {
+ err = iwl_nvm_init(mvm, false);
if (err)
goto out_free;
} else {
@@ -518,7 +530,7 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
out_free:
iwl_phy_db_free(mvm->phy_db);
kfree(mvm->scan_cmd);
- if (!iwlwifi_mod_params.nvm_file)
+ if (!cfg->no_power_up_nic_in_init || !mvm->nvm_file_name)
iwl_trans_op_mode_leave(trans);
ieee80211_free_hw(mvm->hw);
return NULL;
@@ -538,6 +550,7 @@ static void iwl_op_mode_mvm_stop(struct iwl_op_mode *op_mode)
kfree(mvm->scan_cmd);
vfree(mvm->fw_error_dump);
kfree(mvm->fw_error_sram);
+ kfree(mvm->fw_error_rxf);
kfree(mvm->mcast_filter_cmd);
mvm->mcast_filter_cmd = NULL;
@@ -814,6 +827,7 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm)
struct iwl_fw_error_dump_file *dump_file;
struct iwl_fw_error_dump_data *dump_data;
u32 file_len;
+ u32 trans_len;
lockdep_assert_held(&mvm->mutex);
@@ -821,8 +835,13 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm)
return;
file_len = mvm->fw_error_sram_len +
+ mvm->fw_error_rxf_len +
sizeof(*dump_file) +
- sizeof(*dump_data);
+ sizeof(*dump_data) * 2;
+
+ trans_len = iwl_trans_dump_data(mvm->trans, NULL, 0);
+ if (trans_len)
+ file_len += trans_len;
dump_file = vmalloc(file_len);
if (!dump_file)
@@ -833,7 +852,12 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm)
dump_file->barker = cpu_to_le32(IWL_FW_ERROR_DUMP_BARKER);
dump_file->file_len = cpu_to_le32(file_len);
dump_data = (void *)dump_file->data;
- dump_data->type = IWL_FW_ERROR_DUMP_SRAM;
+ dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_RXF);
+ dump_data->len = cpu_to_le32(mvm->fw_error_rxf_len);
+ memcpy(dump_data->data, mvm->fw_error_rxf, mvm->fw_error_rxf_len);
+
+ dump_data = iwl_mvm_fw_error_next_data(dump_data);
+ dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_SRAM);
dump_data->len = cpu_to_le32(mvm->fw_error_sram_len);
/*
@@ -842,6 +866,23 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm)
* mvm->fw_error_sram right now.
*/
memcpy(dump_data->data, mvm->fw_error_sram, mvm->fw_error_sram_len);
+
+ kfree(mvm->fw_error_rxf);
+ mvm->fw_error_rxf = NULL;
+ mvm->fw_error_rxf_len = 0;
+
+ kfree(mvm->fw_error_sram);
+ mvm->fw_error_sram = NULL;
+ mvm->fw_error_sram_len = 0;
+
+ if (trans_len) {
+ void *buf = iwl_mvm_fw_error_next_data(dump_data);
+ u32 real_trans_len = iwl_trans_dump_data(mvm->trans, buf,
+ trans_len);
+ dump_data = (void *)((u8 *)buf + real_trans_len);
+ dump_file->file_len =
+ cpu_to_le32(file_len - trans_len + real_trans_len);
+ }
}
#endif
@@ -853,6 +894,7 @@ static void iwl_mvm_nic_error(struct iwl_op_mode *op_mode)
#ifdef CONFIG_IWLWIFI_DEBUGFS
iwl_mvm_fw_error_sram_dump(mvm);
+ iwl_mvm_fw_error_rxf_dump(mvm);
#endif
iwl_mvm_nic_restart(mvm);
@@ -1126,9 +1168,9 @@ static void iwl_mvm_d0i3_exit_work(struct work_struct *wk)
struct iwl_mvm *mvm = container_of(wk, struct iwl_mvm, d0i3_exit_work);
struct iwl_host_cmd get_status_cmd = {
.id = WOWLAN_GET_STATUSES,
- .flags = CMD_SYNC | CMD_HIGH_PRIO | CMD_WANT_SKB,
+ .flags = CMD_HIGH_PRIO | CMD_WANT_SKB,
};
- struct iwl_wowlan_status_v6 *status;
+ struct iwl_wowlan_status *status;
int ret;
u32 disconnection_reasons, wakeup_reasons;
__le16 *qos_seq = NULL;
@@ -1158,18 +1200,27 @@ static void iwl_mvm_d0i3_exit_work(struct work_struct *wk)
iwl_free_resp(&get_status_cmd);
out:
iwl_mvm_d0i3_enable_tx(mvm, qos_seq);
+ iwl_mvm_unref(mvm, IWL_MVM_REF_EXIT_WORK);
mutex_unlock(&mvm->mutex);
}
-static int iwl_mvm_exit_d0i3(struct iwl_op_mode *op_mode)
+int _iwl_mvm_exit_d0i3(struct iwl_mvm *mvm)
{
- struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
u32 flags = CMD_ASYNC | CMD_HIGH_PRIO | CMD_SEND_IN_IDLE |
CMD_WAKE_UP_TRANS;
int ret;
IWL_DEBUG_RPM(mvm, "MVM exiting D0i3\n");
+ mutex_lock(&mvm->d0i3_suspend_mutex);
+ if (test_bit(D0I3_DEFER_WAKEUP, &mvm->d0i3_suspend_flags)) {
+ IWL_DEBUG_RPM(mvm, "Deferring d0i3 exit until resume\n");
+ __set_bit(D0I3_PENDING_WAKEUP, &mvm->d0i3_suspend_flags);
+ mutex_unlock(&mvm->d0i3_suspend_mutex);
+ return 0;
+ }
+ mutex_unlock(&mvm->d0i3_suspend_mutex);
+
ret = iwl_mvm_send_cmd_pdu(mvm, D0I3_END_CMD, flags, 0, NULL);
if (ret)
goto out;
@@ -1183,6 +1234,25 @@ out:
return ret;
}
+static int iwl_mvm_exit_d0i3(struct iwl_op_mode *op_mode)
+{
+ struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
+
+ iwl_mvm_ref(mvm, IWL_MVM_REF_EXIT_WORK);
+ return _iwl_mvm_exit_d0i3(mvm);
+}
+
+static void iwl_mvm_napi_add(struct iwl_op_mode *op_mode,
+ struct napi_struct *napi,
+ struct net_device *napi_dev,
+ int (*poll)(struct napi_struct *, int),
+ int weight)
+{
+ struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
+
+ ieee80211_napi_add(mvm->hw, napi, napi_dev, poll, weight);
+}
+
static const struct iwl_op_mode_ops iwl_mvm_ops = {
.start = iwl_op_mode_mvm_start,
.stop = iwl_op_mode_mvm_stop,
@@ -1196,4 +1266,5 @@ static const struct iwl_op_mode_ops iwl_mvm_ops = {
.nic_config = iwl_mvm_nic_config,
.enter_d0i3 = iwl_mvm_enter_d0i3,
.exit_d0i3 = iwl_mvm_exit_d0i3,
+ .napi_add = iwl_mvm_napi_add,
};
diff --git a/drivers/net/wireless/iwlwifi/mvm/phy-ctxt.c b/drivers/net/wireless/iwlwifi/mvm/phy-ctxt.c
index 237efe0ac1c4..539f3a942d43 100644
--- a/drivers/net/wireless/iwlwifi/mvm/phy-ctxt.c
+++ b/drivers/net/wireless/iwlwifi/mvm/phy-ctxt.c
@@ -156,6 +156,18 @@ static void iwl_mvm_phy_ctxt_cmd_data(struct iwl_mvm *mvm,
idle_cnt = chains_static;
active_cnt = chains_dynamic;
+ /* In scenarios where we only ever use a single-stream rates,
+ * i.e. legacy 11b/g/a associations, single-stream APs or even
+ * static SMPS, enable both chains to get diversity, improving
+ * the case where we're far enough from the AP that attenuation
+ * between the two antennas is sufficiently different to impact
+ * performance.
+ */
+ if (active_cnt == 1 && iwl_mvm_rx_diversity_allowed(mvm)) {
+ idle_cnt = 2;
+ active_cnt = 2;
+ }
+
cmd->rxchain_info = cpu_to_le32(mvm->fw->valid_rx_ant <<
PHY_RX_CHAIN_VALID_POS);
cmd->rxchain_info |= cpu_to_le32(idle_cnt << PHY_RX_CHAIN_CNT_POS);
@@ -187,7 +199,7 @@ static int iwl_mvm_phy_ctxt_apply(struct iwl_mvm *mvm,
iwl_mvm_phy_ctxt_cmd_data(mvm, &cmd, chandef,
chains_static, chains_dynamic);
- ret = iwl_mvm_send_cmd_pdu(mvm, PHY_CONTEXT_CMD, CMD_SYNC,
+ ret = iwl_mvm_send_cmd_pdu(mvm, PHY_CONTEXT_CMD, 0,
sizeof(struct iwl_phy_context_cmd),
&cmd);
if (ret)
@@ -202,18 +214,15 @@ int iwl_mvm_phy_ctxt_add(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt,
struct cfg80211_chan_def *chandef,
u8 chains_static, u8 chains_dynamic)
{
- int ret;
-
WARN_ON(!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) &&
ctxt->ref);
lockdep_assert_held(&mvm->mutex);
ctxt->channel = chandef->chan;
- ret = iwl_mvm_phy_ctxt_apply(mvm, ctxt, chandef,
- chains_static, chains_dynamic,
- FW_CTXT_ACTION_ADD, 0);
- return ret;
+ return iwl_mvm_phy_ctxt_apply(mvm, ctxt, chandef,
+ chains_static, chains_dynamic,
+ FW_CTXT_ACTION_ADD, 0);
}
/*
diff --git a/drivers/net/wireless/iwlwifi/mvm/power.c b/drivers/net/wireless/iwlwifi/mvm/power.c
index 6b636eab3339..c182a8baf685 100644
--- a/drivers/net/wireless/iwlwifi/mvm/power.c
+++ b/drivers/net/wireless/iwlwifi/mvm/power.c
@@ -123,28 +123,6 @@ void iwl_mvm_beacon_filter_set_cqm_params(struct iwl_mvm *mvm,
cmd->ba_enable_beacon_abort = cpu_to_le32(mvmvif->bf_data.ba_enabled);
}
-int iwl_mvm_update_beacon_abort(struct iwl_mvm *mvm,
- struct ieee80211_vif *vif, bool enable)
-{
- struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
- struct iwl_beacon_filter_cmd cmd = {
- IWL_BF_CMD_CONFIG_DEFAULTS,
- .bf_enable_beacon_filter = cpu_to_le32(1),
- .ba_enable_beacon_abort = cpu_to_le32(enable),
- };
-
- if (!mvmvif->bf_data.bf_enabled)
- return 0;
-
- if (mvm->cur_ucode == IWL_UCODE_WOWLAN)
- cmd.ba_escape_timer = cpu_to_le32(IWL_BA_ESCAPE_TIMER_D3);
-
- mvmvif->bf_data.ba_enabled = enable;
- iwl_mvm_beacon_filter_set_cqm_params(mvm, vif, &cmd);
- iwl_mvm_beacon_filter_debugfs_parameters(vif, &cmd);
- return iwl_mvm_beacon_filter_send_cmd(mvm, &cmd, CMD_SYNC);
-}
-
static void iwl_mvm_power_log(struct iwl_mvm *mvm,
struct iwl_mac_power_cmd *cmd)
{
@@ -268,6 +246,57 @@ static void iwl_mvm_power_configure_uapsd(struct iwl_mvm *mvm,
IWL_MVM_PS_HEAVY_RX_THLD_PERCENT;
}
+static void iwl_mvm_binding_iterator(void *_data, u8 *mac,
+ struct ieee80211_vif *vif)
+{
+ unsigned long *data = _data;
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+
+ if (!mvmvif->phy_ctxt)
+ return;
+
+ if (vif->type == NL80211_IFTYPE_STATION ||
+ vif->type == NL80211_IFTYPE_AP)
+ __set_bit(mvmvif->phy_ctxt->id, data);
+}
+
+static bool iwl_mvm_power_allow_uapsd(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ unsigned long phy_ctxt_counter = 0;
+
+ ieee80211_iterate_active_interfaces_atomic(mvm->hw,
+ IEEE80211_IFACE_ITER_NORMAL,
+ iwl_mvm_binding_iterator,
+ &phy_ctxt_counter);
+
+ if (!memcmp(mvmvif->uapsd_misbehaving_bssid, vif->bss_conf.bssid,
+ ETH_ALEN))
+ return false;
+
+ if (vif->p2p &&
+ !(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_P2P_PS_UAPSD))
+ return false;
+ /*
+ * Avoid using uAPSD if P2P client is associated to GO that uses
+ * opportunistic power save. This is due to current FW limitation.
+ */
+ if (vif->p2p &&
+ (vif->bss_conf.p2p_noa_attr.oppps_ctwindow &
+ IEEE80211_P2P_OPPPS_ENABLE_BIT))
+ return false;
+
+ /*
+ * Avoid using uAPSD if client is in DCM -
+ * low latency issue in Miracast
+ */
+ if (hweight8(phy_ctxt_counter) >= 2)
+ return false;
+
+ return true;
+}
+
static void iwl_mvm_power_build_cmd(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
struct iwl_mac_power_cmd *cmd)
@@ -280,7 +309,6 @@ static void iwl_mvm_power_build_cmd(struct iwl_mvm *mvm,
bool radar_detect = false;
struct iwl_mvm_vif *mvmvif __maybe_unused =
iwl_mvm_vif_from_mac80211(vif);
- bool allow_uapsd = true;
cmd->id_and_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id,
mvmvif->color));
@@ -303,13 +331,8 @@ static void iwl_mvm_power_build_cmd(struct iwl_mvm *mvm,
cmd->flags |= cpu_to_le16(POWER_FLAGS_POWER_SAVE_ENA_MSK);
-#ifdef CONFIG_IWLWIFI_DEBUGFS
- if (mvmvif->dbgfs_pm.mask & MVM_DEBUGFS_PM_DISABLE_POWER_OFF &&
- mvmvif->dbgfs_pm.disable_power_off)
- cmd->flags &= cpu_to_le16(~POWER_FLAGS_POWER_SAVE_ENA_MSK);
-#endif
if (!vif->bss_conf.ps || iwl_mvm_vif_low_latency(mvmvif) ||
- mvm->pm_disabled)
+ !mvmvif->pm_enabled)
return;
cmd->flags |= cpu_to_le16(POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK);
@@ -351,23 +374,7 @@ static void iwl_mvm_power_build_cmd(struct iwl_mvm *mvm,
cpu_to_le32(IWL_MVM_WOWLAN_PS_TX_DATA_TIMEOUT);
}
- if (!memcmp(mvmvif->uapsd_misbehaving_bssid, vif->bss_conf.bssid,
- ETH_ALEN))
- allow_uapsd = false;
-
- if (vif->p2p &&
- !(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_P2P_PS_UAPSD))
- allow_uapsd = false;
- /*
- * Avoid using uAPSD if P2P client is associated to GO that uses
- * opportunistic power save. This is due to current FW limitation.
- */
- if (vif->p2p &&
- vif->bss_conf.p2p_noa_attr.oppps_ctwindow &
- IEEE80211_P2P_OPPPS_ENABLE_BIT)
- allow_uapsd = false;
-
- if (allow_uapsd)
+ if (iwl_mvm_power_allow_uapsd(mvm, vif))
iwl_mvm_power_configure_uapsd(mvm, vif, cmd);
#ifdef CONFIG_IWLWIFI_DEBUGFS
@@ -421,20 +428,13 @@ static int iwl_mvm_power_send_cmd(struct iwl_mvm *mvm,
{
struct iwl_mac_power_cmd cmd = {};
- if (vif->type != NL80211_IFTYPE_STATION)
- return 0;
-
- if (vif->p2p &&
- !(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_BSS_P2P_PS_DCM))
- return 0;
-
iwl_mvm_power_build_cmd(mvm, vif, &cmd);
iwl_mvm_power_log(mvm, &cmd);
#ifdef CONFIG_IWLWIFI_DEBUGFS
memcpy(&iwl_mvm_vif_from_mac80211(vif)->mac_pwr_cmd, &cmd, sizeof(cmd));
#endif
- return iwl_mvm_send_cmd_pdu(mvm, MAC_PM_POWER_TABLE, CMD_SYNC,
+ return iwl_mvm_send_cmd_pdu(mvm, MAC_PM_POWER_TABLE, 0,
sizeof(cmd), &cmd);
}
@@ -444,12 +444,6 @@ int iwl_mvm_power_update_device(struct iwl_mvm *mvm)
.flags = cpu_to_le16(DEVICE_POWER_FLAGS_POWER_SAVE_ENA_MSK),
};
- if (!(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_PM_CMD_SUPPORT))
- return 0;
-
- if (!(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_DEVICE_PS_CMD))
- return 0;
-
if (iwlmvm_mod_params.power_scheme == IWL_POWER_SCHEME_CAM)
mvm->ps_disabled = true;
@@ -466,7 +460,7 @@ int iwl_mvm_power_update_device(struct iwl_mvm *mvm)
"Sending device power command with flags = 0x%X\n",
cmd.flags);
- return iwl_mvm_send_cmd_pdu(mvm, POWER_TABLE_CMD, CMD_SYNC, sizeof(cmd),
+ return iwl_mvm_send_cmd_pdu(mvm, POWER_TABLE_CMD, 0, sizeof(cmd),
&cmd);
}
@@ -508,86 +502,69 @@ int iwl_mvm_power_uapsd_misbehaving_ap_notif(struct iwl_mvm *mvm,
return 0;
}
-struct iwl_power_constraint {
+struct iwl_power_vifs {
struct ieee80211_vif *bf_vif;
struct ieee80211_vif *bss_vif;
struct ieee80211_vif *p2p_vif;
- u16 bss_phyctx_id;
- u16 p2p_phyctx_id;
- bool pm_disabled;
- bool ps_disabled;
- struct iwl_mvm *mvm;
+ struct ieee80211_vif *ap_vif;
+ struct ieee80211_vif *monitor_vif;
+ bool p2p_active;
+ bool bss_active;
+ bool ap_active;
+ bool monitor_active;
};
static void iwl_mvm_power_iterator(void *_data, u8 *mac,
struct ieee80211_vif *vif)
{
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
- struct iwl_power_constraint *power_iterator = _data;
- struct iwl_mvm *mvm = power_iterator->mvm;
+ struct iwl_power_vifs *power_iterator = _data;
+ mvmvif->pm_enabled = false;
switch (ieee80211_vif_type_p2p(vif)) {
case NL80211_IFTYPE_P2P_DEVICE:
break;
case NL80211_IFTYPE_P2P_GO:
case NL80211_IFTYPE_AP:
- /* no BSS power mgmt if we have an active AP */
- if (mvmvif->ap_ibss_active)
- power_iterator->pm_disabled = true;
+ /* only a single MAC of the same type */
+ WARN_ON(power_iterator->ap_vif);
+ power_iterator->ap_vif = vif;
+ if (mvmvif->phy_ctxt)
+ if (mvmvif->phy_ctxt->id < MAX_PHYS)
+ power_iterator->ap_active = true;
break;
case NL80211_IFTYPE_MONITOR:
- /* no BSS power mgmt and no device power save */
- power_iterator->pm_disabled = true;
- power_iterator->ps_disabled = true;
+ /* only a single MAC of the same type */
+ WARN_ON(power_iterator->monitor_vif);
+ power_iterator->monitor_vif = vif;
+ if (mvmvif->phy_ctxt)
+ if (mvmvif->phy_ctxt->id < MAX_PHYS)
+ power_iterator->monitor_active = true;
break;
case NL80211_IFTYPE_P2P_CLIENT:
- if (mvmvif->phy_ctxt)
- power_iterator->p2p_phyctx_id = mvmvif->phy_ctxt->id;
-
- /* we should have only one P2P vif */
+ /* only a single MAC of the same type */
WARN_ON(power_iterator->p2p_vif);
power_iterator->p2p_vif = vif;
-
- IWL_DEBUG_POWER(mvm, "p2p: p2p_id=%d, bss_id=%d\n",
- power_iterator->p2p_phyctx_id,
- power_iterator->bss_phyctx_id);
- if (!(mvm->fw->ucode_capa.flags &
- IWL_UCODE_TLV_FLAGS_BSS_P2P_PS_DCM)) {
- /* no BSS power mgmt if we have a P2P client*/
- power_iterator->pm_disabled = true;
- } else if (power_iterator->p2p_phyctx_id < MAX_PHYS &&
- power_iterator->bss_phyctx_id < MAX_PHYS &&
- power_iterator->p2p_phyctx_id ==
- power_iterator->bss_phyctx_id) {
- power_iterator->pm_disabled = true;
- }
+ if (mvmvif->phy_ctxt)
+ if (mvmvif->phy_ctxt->id < MAX_PHYS)
+ power_iterator->p2p_active = true;
break;
case NL80211_IFTYPE_STATION:
- if (mvmvif->phy_ctxt)
- power_iterator->bss_phyctx_id = mvmvif->phy_ctxt->id;
-
- /* we should have only one BSS vif */
+ /* only a single MAC of the same type */
WARN_ON(power_iterator->bss_vif);
power_iterator->bss_vif = vif;
+ if (mvmvif->phy_ctxt)
+ if (mvmvif->phy_ctxt->id < MAX_PHYS)
+ power_iterator->bss_active = true;
if (mvmvif->bf_data.bf_enabled &&
!WARN_ON(power_iterator->bf_vif))
power_iterator->bf_vif = vif;
- IWL_DEBUG_POWER(mvm, "bss: p2p_id=%d, bss_id=%d\n",
- power_iterator->p2p_phyctx_id,
- power_iterator->bss_phyctx_id);
- if (mvm->fw->ucode_capa.flags &
- IWL_UCODE_TLV_FLAGS_BSS_P2P_PS_DCM &&
- (power_iterator->p2p_phyctx_id < MAX_PHYS &&
- power_iterator->bss_phyctx_id < MAX_PHYS &&
- power_iterator->p2p_phyctx_id ==
- power_iterator->bss_phyctx_id))
- power_iterator->pm_disabled = true;
break;
default:
@@ -596,70 +573,73 @@ static void iwl_mvm_power_iterator(void *_data, u8 *mac,
}
static void
-iwl_mvm_power_get_global_constraint(struct iwl_mvm *mvm,
- struct iwl_power_constraint *constraint)
+iwl_mvm_power_set_pm(struct iwl_mvm *mvm,
+ struct iwl_power_vifs *vifs)
{
- lockdep_assert_held(&mvm->mutex);
+ struct iwl_mvm_vif *bss_mvmvif = NULL;
+ struct iwl_mvm_vif *p2p_mvmvif = NULL;
+ struct iwl_mvm_vif *ap_mvmvif = NULL;
+ bool client_same_channel = false;
+ bool ap_same_channel = false;
- if (iwlmvm_mod_params.power_scheme == IWL_POWER_SCHEME_CAM) {
- constraint->pm_disabled = true;
- constraint->ps_disabled = true;
- }
+ lockdep_assert_held(&mvm->mutex);
+ /* get vifs info + set pm_enable to false */
ieee80211_iterate_active_interfaces_atomic(mvm->hw,
IEEE80211_IFACE_ITER_NORMAL,
- iwl_mvm_power_iterator, constraint);
-}
-
-int iwl_mvm_power_update_mac(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
-{
- struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
- struct iwl_power_constraint constraint = {
- .p2p_phyctx_id = MAX_PHYS,
- .bss_phyctx_id = MAX_PHYS,
- .mvm = mvm,
- };
- bool ba_enable;
- int ret;
+ iwl_mvm_power_iterator, vifs);
- lockdep_assert_held(&mvm->mutex);
+ if (vifs->bss_vif)
+ bss_mvmvif = iwl_mvm_vif_from_mac80211(vifs->bss_vif);
- if (!(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_PM_CMD_SUPPORT))
- return 0;
+ if (vifs->p2p_vif)
+ p2p_mvmvif = iwl_mvm_vif_from_mac80211(vifs->p2p_vif);
- iwl_mvm_power_get_global_constraint(mvm, &constraint);
- mvm->ps_disabled = constraint.ps_disabled;
- mvm->pm_disabled = constraint.pm_disabled;
+ if (vifs->ap_vif)
+ ap_mvmvif = iwl_mvm_vif_from_mac80211(vifs->ap_vif);
- /* don't update device power state unless we add / remove monitor */
- if (vif->type == NL80211_IFTYPE_MONITOR) {
- ret = iwl_mvm_power_update_device(mvm);
- if (ret)
- return ret;
+ /* enable PM on bss if bss stand alone */
+ if (vifs->bss_active && !vifs->p2p_active && !vifs->ap_active) {
+ bss_mvmvif->pm_enabled = true;
+ return;
}
- if (constraint.bss_vif) {
- ret = iwl_mvm_power_send_cmd(mvm, constraint.bss_vif);
- if (ret)
- return ret;
+ /* enable PM on p2p if p2p stand alone */
+ if (vifs->p2p_active && !vifs->bss_active && !vifs->ap_active) {
+ if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_P2P_PM)
+ p2p_mvmvif->pm_enabled = true;
+ return;
}
- if (constraint.p2p_vif) {
- ret = iwl_mvm_power_send_cmd(mvm, constraint.p2p_vif);
- if (ret)
- return ret;
+ if (vifs->bss_active && vifs->p2p_active)
+ client_same_channel = (bss_mvmvif->phy_ctxt->id ==
+ p2p_mvmvif->phy_ctxt->id);
+ if (vifs->bss_active && vifs->ap_active)
+ ap_same_channel = (bss_mvmvif->phy_ctxt->id ==
+ ap_mvmvif->phy_ctxt->id);
+
+ /* clients are not stand alone: enable PM if DCM */
+ if (!(client_same_channel || ap_same_channel) &&
+ (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_BSS_P2P_PS_DCM)) {
+ if (vifs->bss_active)
+ bss_mvmvif->pm_enabled = true;
+ if (vifs->p2p_active &&
+ (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_P2P_PM))
+ p2p_mvmvif->pm_enabled = true;
+ return;
}
- if (!constraint.bf_vif)
- return 0;
-
- vif = constraint.bf_vif;
- mvmvif = iwl_mvm_vif_from_mac80211(vif);
-
- ba_enable = !(constraint.pm_disabled || constraint.ps_disabled ||
- !vif->bss_conf.ps || iwl_mvm_vif_low_latency(mvmvif));
-
- return iwl_mvm_update_beacon_abort(mvm, constraint.bf_vif, ba_enable);
+ /*
+ * There is only one channel in the system and there are only
+ * bss and p2p clients that share it
+ */
+ if (client_same_channel && !vifs->ap_active &&
+ (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_BSS_P2P_PS_SCM)) {
+ /* share same channel*/
+ bss_mvmvif->pm_enabled = true;
+ if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_P2P_PM)
+ p2p_mvmvif->pm_enabled = true;
+ }
}
#ifdef CONFIG_IWLWIFI_DEBUGFS
@@ -671,19 +651,10 @@ int iwl_mvm_power_mac_dbgfs_read(struct iwl_mvm *mvm,
struct iwl_mac_power_cmd cmd = {};
int pos = 0;
- if (WARN_ON(!(mvm->fw->ucode_capa.flags &
- IWL_UCODE_TLV_FLAGS_PM_CMD_SUPPORT)))
- return 0;
-
mutex_lock(&mvm->mutex);
memcpy(&cmd, &mvmvif->mac_pwr_cmd, sizeof(cmd));
mutex_unlock(&mvm->mutex);
- if (!(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_DEVICE_PS_CMD))
- pos += scnprintf(buf+pos, bufsz-pos, "disable_power_off = %d\n",
- (cmd.flags &
- cpu_to_le16(POWER_FLAGS_POWER_SAVE_ENA_MSK)) ?
- 0 : 1);
pos += scnprintf(buf+pos, bufsz-pos, "power_scheme = %d\n",
iwlmvm_mod_params.power_scheme);
pos += scnprintf(buf+pos, bufsz-pos, "flags = 0x%x\n",
@@ -790,7 +761,7 @@ static int _iwl_mvm_enable_beacon_filter(struct iwl_mvm *mvm,
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
int ret;
- if (mvmvif != mvm->bf_allowed_vif ||
+ if (mvmvif != mvm->bf_allowed_vif || !vif->bss_conf.dtim_period ||
vif->type != NL80211_IFTYPE_STATION || vif->p2p)
return 0;
@@ -818,6 +789,26 @@ int iwl_mvm_enable_beacon_filter(struct iwl_mvm *mvm,
return _iwl_mvm_enable_beacon_filter(mvm, vif, &cmd, flags, false);
}
+static int iwl_mvm_update_beacon_abort(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ bool enable)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_beacon_filter_cmd cmd = {
+ IWL_BF_CMD_CONFIG_DEFAULTS,
+ .bf_enable_beacon_filter = cpu_to_le32(1),
+ };
+
+ if (!mvmvif->bf_data.bf_enabled)
+ return 0;
+
+ if (mvm->cur_ucode == IWL_UCODE_WOWLAN)
+ cmd.ba_escape_timer = cpu_to_le32(IWL_BA_ESCAPE_TIMER_D3);
+
+ mvmvif->bf_data.ba_enabled = enable;
+ return _iwl_mvm_enable_beacon_filter(mvm, vif, &cmd, 0, false);
+}
+
int iwl_mvm_disable_beacon_filter(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
u32 flags)
@@ -826,8 +817,7 @@ int iwl_mvm_disable_beacon_filter(struct iwl_mvm *mvm,
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
int ret;
- if (!(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_BF_UPDATED) ||
- vif->type != NL80211_IFTYPE_STATION || vif->p2p)
+ if (vif->type != NL80211_IFTYPE_STATION || vif->p2p)
return 0;
ret = iwl_mvm_beacon_filter_send_cmd(mvm, &cmd, flags);
@@ -838,6 +828,55 @@ int iwl_mvm_disable_beacon_filter(struct iwl_mvm *mvm,
return ret;
}
+int iwl_mvm_power_update_mac(struct iwl_mvm *mvm)
+{
+ struct iwl_mvm_vif *mvmvif;
+ struct iwl_power_vifs vifs = {};
+ bool ba_enable;
+ int ret;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ iwl_mvm_power_set_pm(mvm, &vifs);
+
+ /* disable PS if CAM */
+ if (iwlmvm_mod_params.power_scheme == IWL_POWER_SCHEME_CAM) {
+ mvm->ps_disabled = true;
+ } else {
+ /* don't update device power state unless we add / remove monitor */
+ if (vifs.monitor_vif) {
+ if (vifs.monitor_active)
+ mvm->ps_disabled = true;
+ ret = iwl_mvm_power_update_device(mvm);
+ if (ret)
+ return ret;
+ }
+ }
+
+ if (vifs.bss_vif) {
+ ret = iwl_mvm_power_send_cmd(mvm, vifs.bss_vif);
+ if (ret)
+ return ret;
+ }
+
+ if (vifs.p2p_vif) {
+ ret = iwl_mvm_power_send_cmd(mvm, vifs.p2p_vif);
+ if (ret)
+ return ret;
+ }
+
+ if (!vifs.bf_vif)
+ return 0;
+
+ mvmvif = iwl_mvm_vif_from_mac80211(vifs.bf_vif);
+
+ ba_enable = !(!mvmvif->pm_enabled || mvm->ps_disabled ||
+ !vifs.bf_vif->bss_conf.ps ||
+ iwl_mvm_vif_low_latency(mvmvif));
+
+ return iwl_mvm_update_beacon_abort(mvm, vifs.bf_vif, ba_enable);
+}
+
int iwl_mvm_update_d0i3_power_mode(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
bool enable, u32 flags)
@@ -861,9 +900,10 @@ int iwl_mvm_update_d0i3_power_mode(struct iwl_mvm *mvm,
if (WARN_ON(!dtimper_msec))
return 0;
- cmd.flags |=
- cpu_to_le16(POWER_FLAGS_SKIP_OVER_DTIM_MSK);
cmd.skip_dtim_periods = 300 / dtimper_msec;
+ if (cmd.skip_dtim_periods)
+ cmd.flags |=
+ cpu_to_le16(POWER_FLAGS_SKIP_OVER_DTIM_MSK);
}
iwl_mvm_power_log(mvm, &cmd);
#ifdef CONFIG_IWLWIFI_DEBUGFS
@@ -894,33 +934,3 @@ int iwl_mvm_update_d0i3_power_mode(struct iwl_mvm *mvm,
return ret;
}
-
-int iwl_mvm_update_beacon_filter(struct iwl_mvm *mvm,
- struct ieee80211_vif *vif,
- bool force,
- u32 flags)
-{
- struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
-
- if (mvmvif != mvm->bf_allowed_vif)
- return 0;
-
- if (!mvmvif->bf_data.bf_enabled) {
- /* disable beacon filtering explicitly if force is true */
- if (force)
- return iwl_mvm_disable_beacon_filter(mvm, vif, flags);
- return 0;
- }
-
- return iwl_mvm_enable_beacon_filter(mvm, vif, flags);
-}
-
-int iwl_power_legacy_set_cam_mode(struct iwl_mvm *mvm)
-{
- struct iwl_powertable_cmd cmd = {
- .keep_alive_seconds = POWER_KEEP_ALIVE_PERIOD_SEC,
- };
-
- return iwl_mvm_send_cmd_pdu(mvm, POWER_TABLE_CMD, CMD_SYNC,
- sizeof(cmd), &cmd);
-}
diff --git a/drivers/net/wireless/iwlwifi/mvm/quota.c b/drivers/net/wireless/iwlwifi/mvm/quota.c
index 35e86e06dffd..ba68d7b84505 100644
--- a/drivers/net/wireless/iwlwifi/mvm/quota.c
+++ b/drivers/net/wireless/iwlwifi/mvm/quota.c
@@ -285,7 +285,7 @@ int iwl_mvm_update_quotas(struct iwl_mvm *mvm, struct ieee80211_vif *newvif)
iwl_mvm_adjust_quota_for_noa(mvm, &cmd);
- ret = iwl_mvm_send_cmd_pdu(mvm, TIME_QUOTA_CMD, CMD_SYNC,
+ ret = iwl_mvm_send_cmd_pdu(mvm, TIME_QUOTA_CMD, 0,
sizeof(cmd), &cmd);
if (ret)
IWL_ERR(mvm, "Failed to send quota: %d\n", ret);
diff --git a/drivers/net/wireless/iwlwifi/mvm/rs.c b/drivers/net/wireless/iwlwifi/mvm/rs.c
index e1c838899363..306a6caa4868 100644
--- a/drivers/net/wireless/iwlwifi/mvm/rs.c
+++ b/drivers/net/wireless/iwlwifi/mvm/rs.c
@@ -211,7 +211,7 @@ static const struct rs_tx_column rs_tx_columns[] = {
.next_columns = {
RS_COLUMN_LEGACY_ANT_B,
RS_COLUMN_SISO_ANT_A,
- RS_COLUMN_SISO_ANT_B,
+ RS_COLUMN_MIMO2,
RS_COLUMN_INVALID,
RS_COLUMN_INVALID,
RS_COLUMN_INVALID,
@@ -223,8 +223,8 @@ static const struct rs_tx_column rs_tx_columns[] = {
.ant = ANT_B,
.next_columns = {
RS_COLUMN_LEGACY_ANT_A,
- RS_COLUMN_SISO_ANT_A,
RS_COLUMN_SISO_ANT_B,
+ RS_COLUMN_MIMO2,
RS_COLUMN_INVALID,
RS_COLUMN_INVALID,
RS_COLUMN_INVALID,
@@ -238,10 +238,10 @@ static const struct rs_tx_column rs_tx_columns[] = {
RS_COLUMN_SISO_ANT_B,
RS_COLUMN_MIMO2,
RS_COLUMN_SISO_ANT_A_SGI,
- RS_COLUMN_SISO_ANT_B_SGI,
RS_COLUMN_LEGACY_ANT_A,
RS_COLUMN_LEGACY_ANT_B,
RS_COLUMN_INVALID,
+ RS_COLUMN_INVALID,
},
.checks = {
rs_siso_allow,
@@ -254,10 +254,10 @@ static const struct rs_tx_column rs_tx_columns[] = {
RS_COLUMN_SISO_ANT_A,
RS_COLUMN_MIMO2,
RS_COLUMN_SISO_ANT_B_SGI,
- RS_COLUMN_SISO_ANT_A_SGI,
RS_COLUMN_LEGACY_ANT_A,
RS_COLUMN_LEGACY_ANT_B,
RS_COLUMN_INVALID,
+ RS_COLUMN_INVALID,
},
.checks = {
rs_siso_allow,
@@ -271,10 +271,10 @@ static const struct rs_tx_column rs_tx_columns[] = {
RS_COLUMN_SISO_ANT_B_SGI,
RS_COLUMN_MIMO2_SGI,
RS_COLUMN_SISO_ANT_A,
- RS_COLUMN_SISO_ANT_B,
- RS_COLUMN_MIMO2,
RS_COLUMN_LEGACY_ANT_A,
RS_COLUMN_LEGACY_ANT_B,
+ RS_COLUMN_INVALID,
+ RS_COLUMN_INVALID,
},
.checks = {
rs_siso_allow,
@@ -289,10 +289,10 @@ static const struct rs_tx_column rs_tx_columns[] = {
RS_COLUMN_SISO_ANT_A_SGI,
RS_COLUMN_MIMO2_SGI,
RS_COLUMN_SISO_ANT_B,
- RS_COLUMN_SISO_ANT_A,
- RS_COLUMN_MIMO2,
RS_COLUMN_LEGACY_ANT_A,
RS_COLUMN_LEGACY_ANT_B,
+ RS_COLUMN_INVALID,
+ RS_COLUMN_INVALID,
},
.checks = {
rs_siso_allow,
@@ -304,12 +304,12 @@ static const struct rs_tx_column rs_tx_columns[] = {
.ant = ANT_AB,
.next_columns = {
RS_COLUMN_SISO_ANT_A,
- RS_COLUMN_SISO_ANT_B,
- RS_COLUMN_SISO_ANT_A_SGI,
- RS_COLUMN_SISO_ANT_B_SGI,
RS_COLUMN_MIMO2_SGI,
RS_COLUMN_LEGACY_ANT_A,
RS_COLUMN_LEGACY_ANT_B,
+ RS_COLUMN_INVALID,
+ RS_COLUMN_INVALID,
+ RS_COLUMN_INVALID,
},
.checks = {
rs_mimo_allow,
@@ -321,12 +321,12 @@ static const struct rs_tx_column rs_tx_columns[] = {
.sgi = true,
.next_columns = {
RS_COLUMN_SISO_ANT_A_SGI,
- RS_COLUMN_SISO_ANT_B_SGI,
- RS_COLUMN_SISO_ANT_A,
- RS_COLUMN_SISO_ANT_B,
RS_COLUMN_MIMO2,
RS_COLUMN_LEGACY_ANT_A,
RS_COLUMN_LEGACY_ANT_B,
+ RS_COLUMN_INVALID,
+ RS_COLUMN_INVALID,
+ RS_COLUMN_INVALID,
},
.checks = {
rs_mimo_allow,
@@ -527,6 +527,9 @@ static void rs_rate_scale_clear_tbl_windows(struct iwl_mvm *mvm,
IWL_DEBUG_RATE(mvm, "Clearing up window stats\n");
for (i = 0; i < IWL_RATE_COUNT; i++)
rs_rate_scale_clear_window(&tbl->win[i]);
+
+ for (i = 0; i < ARRAY_SIZE(tbl->tpc_win); i++)
+ rs_rate_scale_clear_window(&tbl->tpc_win[i]);
}
static inline u8 rs_is_valid_ant(u8 valid_antenna, u8 ant_type)
@@ -656,17 +659,34 @@ static int _rs_collect_tx_data(struct iwl_scale_tbl_info *tbl,
return 0;
}
-static int rs_collect_tx_data(struct iwl_scale_tbl_info *tbl,
- int scale_index, int attempts, int successes)
+static int rs_collect_tx_data(struct iwl_lq_sta *lq_sta,
+ struct iwl_scale_tbl_info *tbl,
+ int scale_index, int attempts, int successes,
+ u8 reduced_txp)
{
struct iwl_rate_scale_data *window = NULL;
+ int ret;
if (scale_index < 0 || scale_index >= IWL_RATE_COUNT)
return -EINVAL;
+ if (tbl->column != RS_COLUMN_INVALID) {
+ lq_sta->tx_stats[tbl->column][scale_index].total += attempts;
+ lq_sta->tx_stats[tbl->column][scale_index].success += successes;
+ }
+
/* Select window for current tx bit rate */
window = &(tbl->win[scale_index]);
+ ret = _rs_collect_tx_data(tbl, scale_index, attempts, successes,
+ window);
+ if (ret)
+ return ret;
+
+ if (WARN_ON_ONCE(reduced_txp > TPC_MAX_REDUCTION))
+ return -EINVAL;
+
+ window = &tbl->tpc_win[reduced_txp];
return _rs_collect_tx_data(tbl, scale_index, attempts, successes,
window);
}
@@ -1000,6 +1020,7 @@ static void rs_tx_status(void *mvm_r, struct ieee80211_supported_band *sband,
u32 ucode_rate;
struct rs_rate rate;
struct iwl_scale_tbl_info *curr_tbl, *other_tbl, *tmp_tbl;
+ u8 reduced_txp = (uintptr_t)info->status.status_driver_data[0];
/* Treat uninitialized rate scaling data same as non-existing. */
if (!lq_sta) {
@@ -1141,9 +1162,10 @@ static void rs_tx_status(void *mvm_r, struct ieee80211_supported_band *sband,
if (info->flags & IEEE80211_TX_STAT_AMPDU) {
ucode_rate = le32_to_cpu(table->rs_table[0]);
rs_rate_from_ucode_rate(ucode_rate, info->band, &rate);
- rs_collect_tx_data(curr_tbl, rate.index,
+ rs_collect_tx_data(lq_sta, curr_tbl, rate.index,
info->status.ampdu_len,
- info->status.ampdu_ack_len);
+ info->status.ampdu_ack_len,
+ reduced_txp);
/* Update success/fail counts if not searching for new mode */
if (lq_sta->rs_state == RS_STATE_STAY_IN_COLUMN) {
@@ -1176,8 +1198,9 @@ static void rs_tx_status(void *mvm_r, struct ieee80211_supported_band *sband,
else
continue;
- rs_collect_tx_data(tmp_tbl, rate.index, 1,
- i < retries ? 0 : legacy_success);
+ rs_collect_tx_data(lq_sta, tmp_tbl, rate.index, 1,
+ i < retries ? 0 : legacy_success,
+ reduced_txp);
}
/* Update success/fail counts if not searching for new mode */
@@ -1188,6 +1211,7 @@ static void rs_tx_status(void *mvm_r, struct ieee80211_supported_band *sband,
}
/* The last TX rate is cached in lq_sta; it's set in if/else above */
lq_sta->last_rate_n_flags = ucode_rate;
+ IWL_DEBUG_RATE(mvm, "reduced txpower: %d\n", reduced_txp);
done:
/* See if there's a better rate or modulation mode to try. */
if (sta && sta->supp_rates[sband->band])
@@ -1311,105 +1335,50 @@ static void rs_set_expected_tpt_table(struct iwl_lq_sta *lq_sta,
tbl->expected_tpt = rs_get_expected_tpt_table(lq_sta, column, rate->bw);
}
-/*
- * Find starting rate for new "search" high-throughput mode of modulation.
- * Goal is to find lowest expected rate (under perfect conditions) that is
- * above the current measured throughput of "active" mode, to give new mode
- * a fair chance to prove itself without too many challenges.
- *
- * This gets called when transitioning to more aggressive modulation
- * (i.e. legacy to SISO or MIMO, or SISO to MIMO), as well as less aggressive
- * (i.e. MIMO to SISO). When moving to MIMO, bit rate will typically need
- * to decrease to match "active" throughput. When moving from MIMO to SISO,
- * bit rate will typically need to increase, but not if performance was bad.
- */
static s32 rs_get_best_rate(struct iwl_mvm *mvm,
struct iwl_lq_sta *lq_sta,
struct iwl_scale_tbl_info *tbl, /* "search" */
- u16 rate_mask, s8 index)
+ unsigned long rate_mask, s8 index)
{
- /* "active" values */
struct iwl_scale_tbl_info *active_tbl =
&(lq_sta->lq_info[lq_sta->active_tbl]);
- s32 active_sr = active_tbl->win[index].success_ratio;
- s32 active_tpt = active_tbl->expected_tpt[index];
- /* expected "search" throughput */
+ s32 success_ratio = active_tbl->win[index].success_ratio;
+ u16 expected_current_tpt = active_tbl->expected_tpt[index];
const u16 *tpt_tbl = tbl->expected_tpt;
-
- s32 new_rate, high, low, start_hi;
u16 high_low;
- s8 rate = index;
-
- new_rate = high = low = start_hi = IWL_RATE_INVALID;
-
- while (1) {
- high_low = rs_get_adjacent_rate(mvm, rate, rate_mask,
- tbl->rate.type);
-
- low = high_low & 0xff;
- high = (high_low >> 8) & 0xff;
+ u32 target_tpt;
+ int rate_idx;
- /*
- * Lower the "search" bit rate, to give new "search" mode
- * approximately the same throughput as "active" if:
- *
- * 1) "Active" mode has been working modestly well (but not
- * great), and expected "search" throughput (under perfect
- * conditions) at candidate rate is above the actual
- * measured "active" throughput (but less than expected
- * "active" throughput under perfect conditions).
- * OR
- * 2) "Active" mode has been working perfectly or very well
- * and expected "search" throughput (under perfect
- * conditions) at candidate rate is above expected
- * "active" throughput (under perfect conditions).
- */
- if ((((100 * tpt_tbl[rate]) > lq_sta->last_tpt) &&
- ((active_sr > RS_SR_FORCE_DECREASE) &&
- (active_sr <= IWL_RATE_HIGH_TH) &&
- (tpt_tbl[rate] <= active_tpt))) ||
- ((active_sr >= IWL_RATE_SCALE_SWITCH) &&
- (tpt_tbl[rate] > active_tpt))) {
- /* (2nd or later pass)
- * If we've already tried to raise the rate, and are
- * now trying to lower it, use the higher rate. */
- if (start_hi != IWL_RATE_INVALID) {
- new_rate = start_hi;
- break;
- }
-
- new_rate = rate;
+ if (success_ratio > RS_SR_NO_DECREASE) {
+ target_tpt = 100 * expected_current_tpt;
+ IWL_DEBUG_RATE(mvm,
+ "SR %d high. Find rate exceeding EXPECTED_CURRENT %d\n",
+ success_ratio, target_tpt);
+ } else {
+ target_tpt = lq_sta->last_tpt;
+ IWL_DEBUG_RATE(mvm,
+ "SR %d not thag good. Find rate exceeding ACTUAL_TPT %d\n",
+ success_ratio, target_tpt);
+ }
- /* Loop again with lower rate */
- if (low != IWL_RATE_INVALID)
- rate = low;
+ rate_idx = find_first_bit(&rate_mask, BITS_PER_LONG);
- /* Lower rate not available, use the original */
- else
- break;
-
- /* Else try to raise the "search" rate to match "active" */
- } else {
- /* (2nd or later pass)
- * If we've already tried to lower the rate, and are
- * now trying to raise it, use the lower rate. */
- if (new_rate != IWL_RATE_INVALID)
- break;
+ while (rate_idx != IWL_RATE_INVALID) {
+ if (target_tpt < (100 * tpt_tbl[rate_idx]))
+ break;
- /* Loop again with higher rate */
- else if (high != IWL_RATE_INVALID) {
- start_hi = high;
- rate = high;
+ high_low = rs_get_adjacent_rate(mvm, rate_idx, rate_mask,
+ tbl->rate.type);
- /* Higher rate not available, use the original */
- } else {
- new_rate = rate;
- break;
- }
- }
+ rate_idx = (high_low >> 8) & 0xff;
}
- return new_rate;
+ IWL_DEBUG_RATE(mvm, "Best rate found %d target_tp %d expected_new %d\n",
+ rate_idx, target_tpt,
+ rate_idx != IWL_RATE_INVALID ?
+ 100 * tpt_tbl[rate_idx] : IWL_INVALID_VALUE);
+
+ return rate_idx;
}
static u32 rs_bw_from_sta_bw(struct ieee80211_sta *sta)
@@ -1584,7 +1553,7 @@ static enum rs_column rs_get_next_column(struct iwl_mvm *mvm,
tpt = lq_sta->last_tpt / 100;
expected_tpt_tbl = rs_get_expected_tpt_table(lq_sta, next_col,
- tbl->rate.bw);
+ rs_bw_from_sta_bw(sta));
if (WARN_ON_ONCE(!expected_tpt_tbl))
continue;
@@ -1625,7 +1594,7 @@ static int rs_switch_to_column(struct iwl_mvm *mvm,
const struct rs_tx_column *curr_column = &rs_tx_columns[tbl->column];
u32 sz = (sizeof(struct iwl_scale_tbl_info) -
(sizeof(struct iwl_rate_scale_data) * IWL_RATE_COUNT));
- u16 rate_mask = 0;
+ unsigned long rate_mask = 0;
u32 rate_idx = 0;
memcpy(search_tbl, tbl, sz);
@@ -1667,7 +1636,7 @@ static int rs_switch_to_column(struct iwl_mvm *mvm,
!(BIT(rate_idx) & rate_mask)) {
IWL_DEBUG_RATE(mvm,
"can not switch with index %d"
- " rate mask %x\n",
+ " rate mask %lx\n",
rate_idx, rate_mask);
goto err;
@@ -1769,6 +1738,203 @@ out:
return action;
}
+static void rs_get_adjacent_txp(struct iwl_mvm *mvm, int index,
+ int *weaker, int *stronger)
+{
+ *weaker = index + TPC_TX_POWER_STEP;
+ if (*weaker > TPC_MAX_REDUCTION)
+ *weaker = TPC_INVALID;
+
+ *stronger = index - TPC_TX_POWER_STEP;
+ if (*stronger < 0)
+ *stronger = TPC_INVALID;
+}
+
+static bool rs_tpc_allowed(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ struct rs_rate *rate, enum ieee80211_band band)
+{
+ int index = rate->index;
+ bool cam = (iwlmvm_mod_params.power_scheme == IWL_POWER_SCHEME_CAM);
+ bool sta_ps_disabled = (vif->type == NL80211_IFTYPE_STATION &&
+ !vif->bss_conf.ps);
+
+ IWL_DEBUG_RATE(mvm, "cam: %d sta_ps_disabled %d\n",
+ cam, sta_ps_disabled);
+ /*
+ * allow tpc only if power management is enabled, or bt coex
+ * activity grade allows it and we are on 2.4Ghz.
+ */
+ if ((cam || sta_ps_disabled) &&
+ !iwl_mvm_bt_coex_is_tpc_allowed(mvm, band))
+ return false;
+
+ IWL_DEBUG_RATE(mvm, "check rate, table type: %d\n", rate->type);
+ if (is_legacy(rate))
+ return index == IWL_RATE_54M_INDEX;
+ if (is_ht(rate))
+ return index == IWL_RATE_MCS_7_INDEX;
+ if (is_vht(rate))
+ return index == IWL_RATE_MCS_7_INDEX ||
+ index == IWL_RATE_MCS_8_INDEX ||
+ index == IWL_RATE_MCS_9_INDEX;
+
+ WARN_ON_ONCE(1);
+ return false;
+}
+
+enum tpc_action {
+ TPC_ACTION_STAY,
+ TPC_ACTION_DECREASE,
+ TPC_ACTION_INCREASE,
+ TPC_ACTION_NO_RESTIRCTION,
+};
+
+static enum tpc_action rs_get_tpc_action(struct iwl_mvm *mvm,
+ s32 sr, int weak, int strong,
+ int current_tpt,
+ int weak_tpt, int strong_tpt)
+{
+ /* stay until we have valid tpt */
+ if (current_tpt == IWL_INVALID_VALUE) {
+ IWL_DEBUG_RATE(mvm, "no current tpt. stay.\n");
+ return TPC_ACTION_STAY;
+ }
+
+ /* Too many failures, increase txp */
+ if (sr <= TPC_SR_FORCE_INCREASE || current_tpt == 0) {
+ IWL_DEBUG_RATE(mvm, "increase txp because of weak SR\n");
+ return TPC_ACTION_NO_RESTIRCTION;
+ }
+
+ /* try decreasing first if applicable */
+ if (weak != TPC_INVALID) {
+ if (weak_tpt == IWL_INVALID_VALUE &&
+ (strong_tpt == IWL_INVALID_VALUE ||
+ current_tpt >= strong_tpt)) {
+ IWL_DEBUG_RATE(mvm,
+ "no weak txp measurement. decrease txp\n");
+ return TPC_ACTION_DECREASE;
+ }
+
+ if (weak_tpt > current_tpt) {
+ IWL_DEBUG_RATE(mvm,
+ "lower txp has better tpt. decrease txp\n");
+ return TPC_ACTION_DECREASE;
+ }
+ }
+
+ /* next, increase if needed */
+ if (sr < TPC_SR_NO_INCREASE && strong != TPC_INVALID) {
+ if (weak_tpt == IWL_INVALID_VALUE &&
+ strong_tpt != IWL_INVALID_VALUE &&
+ current_tpt < strong_tpt) {
+ IWL_DEBUG_RATE(mvm,
+ "higher txp has better tpt. increase txp\n");
+ return TPC_ACTION_INCREASE;
+ }
+
+ if (weak_tpt < current_tpt &&
+ (strong_tpt == IWL_INVALID_VALUE ||
+ strong_tpt > current_tpt)) {
+ IWL_DEBUG_RATE(mvm,
+ "lower txp has worse tpt. increase txp\n");
+ return TPC_ACTION_INCREASE;
+ }
+ }
+
+ IWL_DEBUG_RATE(mvm, "no need to increase or decrease txp - stay\n");
+ return TPC_ACTION_STAY;
+}
+
+static bool rs_tpc_perform(struct iwl_mvm *mvm,
+ struct ieee80211_sta *sta,
+ struct iwl_lq_sta *lq_sta,
+ struct iwl_scale_tbl_info *tbl)
+{
+ struct iwl_mvm_sta *mvm_sta = (void *)sta->drv_priv;
+ struct ieee80211_vif *vif = mvm_sta->vif;
+ struct ieee80211_chanctx_conf *chanctx_conf;
+ enum ieee80211_band band;
+ struct iwl_rate_scale_data *window;
+ struct rs_rate *rate = &tbl->rate;
+ enum tpc_action action;
+ s32 sr;
+ u8 cur = lq_sta->lq.reduced_tpc;
+ int current_tpt;
+ int weak, strong;
+ int weak_tpt = IWL_INVALID_VALUE, strong_tpt = IWL_INVALID_VALUE;
+
+#ifdef CONFIG_MAC80211_DEBUGFS
+ if (lq_sta->dbg_fixed_txp_reduction <= TPC_MAX_REDUCTION) {
+ IWL_DEBUG_RATE(mvm, "fixed tpc: %d\n",
+ lq_sta->dbg_fixed_txp_reduction);
+ lq_sta->lq.reduced_tpc = lq_sta->dbg_fixed_txp_reduction;
+ return cur != lq_sta->dbg_fixed_txp_reduction;
+ }
+#endif
+
+ rcu_read_lock();
+ chanctx_conf = rcu_dereference(vif->chanctx_conf);
+ if (WARN_ON(!chanctx_conf))
+ band = IEEE80211_NUM_BANDS;
+ else
+ band = chanctx_conf->def.chan->band;
+ rcu_read_unlock();
+
+ if (!rs_tpc_allowed(mvm, vif, rate, band)) {
+ IWL_DEBUG_RATE(mvm,
+ "tpc is not allowed. remove txp restrictions\n");
+ lq_sta->lq.reduced_tpc = TPC_NO_REDUCTION;
+ return cur != TPC_NO_REDUCTION;
+ }
+
+ rs_get_adjacent_txp(mvm, cur, &weak, &strong);
+
+ /* Collect measured throughputs for current and adjacent rates */
+ window = tbl->tpc_win;
+ sr = window[cur].success_ratio;
+ current_tpt = window[cur].average_tpt;
+ if (weak != TPC_INVALID)
+ weak_tpt = window[weak].average_tpt;
+ if (strong != TPC_INVALID)
+ strong_tpt = window[strong].average_tpt;
+
+ IWL_DEBUG_RATE(mvm,
+ "(TPC: %d): cur_tpt %d SR %d weak %d strong %d weak_tpt %d strong_tpt %d\n",
+ cur, current_tpt, sr, weak, strong,
+ weak_tpt, strong_tpt);
+
+ action = rs_get_tpc_action(mvm, sr, weak, strong,
+ current_tpt, weak_tpt, strong_tpt);
+
+ /* override actions if we are on the edge */
+ if (weak == TPC_INVALID && action == TPC_ACTION_DECREASE) {
+ IWL_DEBUG_RATE(mvm, "already in lowest txp, stay\n");
+ action = TPC_ACTION_STAY;
+ } else if (strong == TPC_INVALID &&
+ (action == TPC_ACTION_INCREASE ||
+ action == TPC_ACTION_NO_RESTIRCTION)) {
+ IWL_DEBUG_RATE(mvm, "already in highest txp, stay\n");
+ action = TPC_ACTION_STAY;
+ }
+
+ switch (action) {
+ case TPC_ACTION_DECREASE:
+ lq_sta->lq.reduced_tpc = weak;
+ return true;
+ case TPC_ACTION_INCREASE:
+ lq_sta->lq.reduced_tpc = strong;
+ return true;
+ case TPC_ACTION_NO_RESTIRCTION:
+ lq_sta->lq.reduced_tpc = TPC_NO_REDUCTION;
+ return true;
+ case TPC_ACTION_STAY:
+ /* do nothing */
+ break;
+ }
+ return false;
+}
+
/*
* Do rate scaling and search for new modulation mode.
*/
@@ -2019,6 +2185,9 @@ static void rs_rate_scale_perform(struct iwl_mvm *mvm,
break;
case RS_ACTION_STAY:
/* No change */
+ if (lq_sta->rs_state == RS_STATE_STAY_IN_COLUMN)
+ update_lq = rs_tpc_perform(mvm, sta, lq_sta, tbl);
+ break;
default:
break;
}
@@ -2271,10 +2440,6 @@ static void rs_vht_set_enabled_rates(struct ieee80211_sta *sta,
if (i == IWL_RATE_9M_INDEX)
continue;
- /* Disable MCS9 as a workaround */
- if (i == IWL_RATE_MCS_9_INDEX)
- continue;
-
/* VHT MCS9 isn't valid for 20Mhz for NSS=1,2 */
if (i == IWL_RATE_MCS_9_INDEX &&
sta->bandwidth == IEEE80211_STA_RX_BW_20)
@@ -2293,10 +2458,6 @@ static void rs_vht_set_enabled_rates(struct ieee80211_sta *sta,
if (i == IWL_RATE_9M_INDEX)
continue;
- /* Disable MCS9 as a workaround */
- if (i == IWL_RATE_MCS_9_INDEX)
- continue;
-
/* VHT MCS9 isn't valid for 20Mhz for NSS=1,2 */
if (i == IWL_RATE_MCS_9_INDEX &&
sta->bandwidth == IEEE80211_STA_RX_BW_20)
@@ -2478,6 +2639,7 @@ void iwl_mvm_rs_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
lq_sta->is_agg = 0;
#ifdef CONFIG_MAC80211_DEBUGFS
lq_sta->dbg_fixed_rate = 0;
+ lq_sta->dbg_fixed_txp_reduction = TPC_INVALID;
#endif
#ifdef CONFIG_IWLWIFI_DEBUGFS
iwl_mvm_reset_frame_stats(mvm, &mvm->drv_rx_stats);
@@ -2653,6 +2815,7 @@ static void rs_fill_lq_cmd(struct iwl_mvm *mvm,
rs_build_rates_table_from_fixed(mvm, lq_cmd,
lq_sta->band,
lq_sta->dbg_fixed_rate);
+ lq_cmd->reduced_tpc = 0;
ant = (lq_sta->dbg_fixed_rate & RATE_MCS_ANT_ABC_MSK) >>
RATE_MCS_ANT_POS;
} else
@@ -2783,7 +2946,6 @@ static ssize_t rs_sta_dbgfs_scale_table_write(struct file *file,
size_t buf_size;
u32 parsed_rate;
-
mvm = lq_sta->drv;
memset(buf, 0, sizeof(buf));
buf_size = min(count, sizeof(buf) - 1);
@@ -2856,6 +3018,7 @@ static ssize_t rs_sta_dbgfs_scale_table_read(struct file *file,
lq_sta->lq.agg_disable_start_th,
lq_sta->lq.agg_frame_cnt_limit);
+ desc += sprintf(buff+desc, "reduced tpc=%d\n", lq_sta->lq.reduced_tpc);
desc += sprintf(buff+desc,
"Start idx [0]=0x%x [1]=0x%x [2]=0x%x [3]=0x%x\n",
lq_sta->lq.initial_rate_index[0],
@@ -2928,6 +3091,94 @@ static const struct file_operations rs_sta_dbgfs_stats_table_ops = {
.llseek = default_llseek,
};
+static ssize_t rs_sta_dbgfs_drv_tx_stats_read(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ static const char * const column_name[] = {
+ [RS_COLUMN_LEGACY_ANT_A] = "LEGACY_ANT_A",
+ [RS_COLUMN_LEGACY_ANT_B] = "LEGACY_ANT_B",
+ [RS_COLUMN_SISO_ANT_A] = "SISO_ANT_A",
+ [RS_COLUMN_SISO_ANT_B] = "SISO_ANT_B",
+ [RS_COLUMN_SISO_ANT_A_SGI] = "SISO_ANT_A_SGI",
+ [RS_COLUMN_SISO_ANT_B_SGI] = "SISO_ANT_B_SGI",
+ [RS_COLUMN_MIMO2] = "MIMO2",
+ [RS_COLUMN_MIMO2_SGI] = "MIMO2_SGI",
+ };
+
+ static const char * const rate_name[] = {
+ [IWL_RATE_1M_INDEX] = "1M",
+ [IWL_RATE_2M_INDEX] = "2M",
+ [IWL_RATE_5M_INDEX] = "5.5M",
+ [IWL_RATE_11M_INDEX] = "11M",
+ [IWL_RATE_6M_INDEX] = "6M|MCS0",
+ [IWL_RATE_9M_INDEX] = "9M",
+ [IWL_RATE_12M_INDEX] = "12M|MCS1",
+ [IWL_RATE_18M_INDEX] = "18M|MCS2",
+ [IWL_RATE_24M_INDEX] = "24M|MCS3",
+ [IWL_RATE_36M_INDEX] = "36M|MCS4",
+ [IWL_RATE_48M_INDEX] = "48M|MCS5",
+ [IWL_RATE_54M_INDEX] = "54M|MCS6",
+ [IWL_RATE_MCS_7_INDEX] = "MCS7",
+ [IWL_RATE_MCS_8_INDEX] = "MCS8",
+ [IWL_RATE_MCS_9_INDEX] = "MCS9",
+ };
+
+ char *buff, *pos, *endpos;
+ int col, rate;
+ ssize_t ret;
+ struct iwl_lq_sta *lq_sta = file->private_data;
+ struct rs_rate_stats *stats;
+ static const size_t bufsz = 1024;
+
+ buff = kmalloc(bufsz, GFP_KERNEL);
+ if (!buff)
+ return -ENOMEM;
+
+ pos = buff;
+ endpos = pos + bufsz;
+
+ pos += scnprintf(pos, endpos - pos, "COLUMN,");
+ for (rate = 0; rate < IWL_RATE_COUNT; rate++)
+ pos += scnprintf(pos, endpos - pos, "%s,", rate_name[rate]);
+ pos += scnprintf(pos, endpos - pos, "\n");
+
+ for (col = 0; col < RS_COLUMN_COUNT; col++) {
+ pos += scnprintf(pos, endpos - pos,
+ "%s,", column_name[col]);
+
+ for (rate = 0; rate < IWL_RATE_COUNT; rate++) {
+ stats = &(lq_sta->tx_stats[col][rate]);
+ pos += scnprintf(pos, endpos - pos,
+ "%llu/%llu,",
+ stats->success,
+ stats->total);
+ }
+ pos += scnprintf(pos, endpos - pos, "\n");
+ }
+
+ ret = simple_read_from_buffer(user_buf, count, ppos, buff, pos - buff);
+ kfree(buff);
+ return ret;
+}
+
+static ssize_t rs_sta_dbgfs_drv_tx_stats_write(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct iwl_lq_sta *lq_sta = file->private_data;
+ memset(lq_sta->tx_stats, 0, sizeof(lq_sta->tx_stats));
+
+ return count;
+}
+
+static const struct file_operations rs_sta_dbgfs_drv_tx_stats_ops = {
+ .read = rs_sta_dbgfs_drv_tx_stats_read,
+ .write = rs_sta_dbgfs_drv_tx_stats_write,
+ .open = simple_open,
+ .llseek = default_llseek,
+};
+
static void rs_add_debugfs(void *mvm, void *mvm_sta, struct dentry *dir)
{
struct iwl_lq_sta *lq_sta = mvm_sta;
@@ -2937,9 +3188,15 @@ static void rs_add_debugfs(void *mvm, void *mvm_sta, struct dentry *dir)
lq_sta->rs_sta_dbgfs_stats_table_file =
debugfs_create_file("rate_stats_table", S_IRUSR, dir,
lq_sta, &rs_sta_dbgfs_stats_table_ops);
+ lq_sta->rs_sta_dbgfs_drv_tx_stats_file =
+ debugfs_create_file("drv_tx_stats", S_IRUSR | S_IWUSR, dir,
+ lq_sta, &rs_sta_dbgfs_drv_tx_stats_ops);
lq_sta->rs_sta_dbgfs_tx_agg_tid_en_file =
debugfs_create_u8("tx_agg_tid_enable", S_IRUSR | S_IWUSR, dir,
&lq_sta->tx_agg_tid_en);
+ lq_sta->rs_sta_dbgfs_reduced_txp_file =
+ debugfs_create_u8("reduced_tpc", S_IRUSR | S_IWUSR, dir,
+ &lq_sta->dbg_fixed_txp_reduction);
}
static void rs_remove_debugfs(void *mvm, void *mvm_sta)
@@ -2947,7 +3204,9 @@ static void rs_remove_debugfs(void *mvm, void *mvm_sta)
struct iwl_lq_sta *lq_sta = mvm_sta;
debugfs_remove(lq_sta->rs_sta_dbgfs_scale_table_file);
debugfs_remove(lq_sta->rs_sta_dbgfs_stats_table_file);
+ debugfs_remove(lq_sta->rs_sta_dbgfs_drv_tx_stats_file);
debugfs_remove(lq_sta->rs_sta_dbgfs_tx_agg_tid_en_file);
+ debugfs_remove(lq_sta->rs_sta_dbgfs_reduced_txp_file);
}
#endif
diff --git a/drivers/net/wireless/iwlwifi/mvm/rs.h b/drivers/net/wireless/iwlwifi/mvm/rs.h
index 0acfac96a56c..374a83d7db25 100644
--- a/drivers/net/wireless/iwlwifi/mvm/rs.h
+++ b/drivers/net/wireless/iwlwifi/mvm/rs.h
@@ -158,6 +158,13 @@ enum {
#define RS_SR_FORCE_DECREASE 1920 /* 15% */
#define RS_SR_NO_DECREASE 10880 /* 85% */
+#define TPC_SR_FORCE_INCREASE 9600 /* 75% */
+#define TPC_SR_NO_INCREASE 10880 /* 85% */
+#define TPC_TX_POWER_STEP 3
+#define TPC_MAX_REDUCTION 15
+#define TPC_NO_REDUCTION 0
+#define TPC_INVALID 0xff
+
#define LINK_QUAL_AGG_TIME_LIMIT_DEF (4000) /* 4 milliseconds */
#define LINK_QUAL_AGG_TIME_LIMIT_MAX (8000)
#define LINK_QUAL_AGG_TIME_LIMIT_MIN (100)
@@ -266,9 +273,16 @@ enum rs_column {
RS_COLUMN_MIMO2_SGI,
RS_COLUMN_LAST = RS_COLUMN_MIMO2_SGI,
+ RS_COLUMN_COUNT = RS_COLUMN_LAST + 1,
RS_COLUMN_INVALID,
};
+/* Packet stats per rate */
+struct rs_rate_stats {
+ u64 success;
+ u64 total;
+};
+
/**
* struct iwl_scale_tbl_info -- tx params and success history for all rates
*
@@ -280,6 +294,8 @@ struct iwl_scale_tbl_info {
enum rs_column column;
const u16 *expected_tpt; /* throughput metrics; expected_tpt_G, etc. */
struct iwl_rate_scale_data win[IWL_RATE_COUNT]; /* rate histories */
+ /* per txpower-reduction history */
+ struct iwl_rate_scale_data tpc_win[TPC_MAX_REDUCTION + 1];
};
enum {
@@ -315,6 +331,8 @@ struct iwl_lq_sta {
bool is_vht;
enum ieee80211_band band;
+ struct rs_rate_stats tx_stats[RS_COLUMN_COUNT][IWL_RATE_COUNT];
+
/* The following are bitmaps of rates; IWL_RATE_6M_MASK, etc. */
unsigned long active_legacy_rate;
unsigned long active_siso_rate;
@@ -334,8 +352,11 @@ struct iwl_lq_sta {
#ifdef CONFIG_MAC80211_DEBUGFS
struct dentry *rs_sta_dbgfs_scale_table_file;
struct dentry *rs_sta_dbgfs_stats_table_file;
+ struct dentry *rs_sta_dbgfs_drv_tx_stats_file;
struct dentry *rs_sta_dbgfs_tx_agg_tid_en_file;
+ struct dentry *rs_sta_dbgfs_reduced_txp_file;
u32 dbg_fixed_rate;
+ u8 dbg_fixed_txp_reduction;
#endif
struct iwl_mvm *drv;
@@ -345,6 +366,9 @@ struct iwl_lq_sta {
u32 last_rate_n_flags;
/* packets destined for this STA are aggregated */
u8 is_agg;
+
+ /* tx power reduce for this sta */
+ int tpc_reduce;
};
/* Initialize station's rate scaling information after adding station */
diff --git a/drivers/net/wireless/iwlwifi/mvm/rx.c b/drivers/net/wireless/iwlwifi/mvm/rx.c
index 6061553a5e44..cf7276967acd 100644
--- a/drivers/net/wireless/iwlwifi/mvm/rx.c
+++ b/drivers/net/wireless/iwlwifi/mvm/rx.c
@@ -60,7 +60,6 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*****************************************************************************/
#include "iwl-trans.h"
-
#include "mvm.h"
#include "fw-api.h"
@@ -130,42 +129,7 @@ static void iwl_mvm_pass_packet_to_mac80211(struct iwl_mvm *mvm,
memcpy(IEEE80211_SKB_RXCB(skb), stats, sizeof(*stats));
- ieee80211_rx_ni(mvm->hw, skb);
-}
-
-static void iwl_mvm_calc_rssi(struct iwl_mvm *mvm,
- struct iwl_rx_phy_info *phy_info,
- struct ieee80211_rx_status *rx_status)
-{
- int rssi_a, rssi_b, rssi_a_dbm, rssi_b_dbm, max_rssi_dbm;
- u32 agc_a, agc_b;
- u32 val;
-
- val = le32_to_cpu(phy_info->non_cfg_phy[IWL_RX_INFO_AGC_IDX]);
- agc_a = (val & IWL_OFDM_AGC_A_MSK) >> IWL_OFDM_AGC_A_POS;
- agc_b = (val & IWL_OFDM_AGC_B_MSK) >> IWL_OFDM_AGC_B_POS;
-
- val = le32_to_cpu(phy_info->non_cfg_phy[IWL_RX_INFO_RSSI_AB_IDX]);
- rssi_a = (val & IWL_OFDM_RSSI_INBAND_A_MSK) >> IWL_OFDM_RSSI_A_POS;
- rssi_b = (val & IWL_OFDM_RSSI_INBAND_B_MSK) >> IWL_OFDM_RSSI_B_POS;
-
- /*
- * dBm = rssi dB - agc dB - constant.
- * Higher AGC (higher radio gain) means lower signal.
- */
- rssi_a_dbm = rssi_a - IWL_RSSI_OFFSET - agc_a;
- rssi_b_dbm = rssi_b - IWL_RSSI_OFFSET - agc_b;
- max_rssi_dbm = max_t(int, rssi_a_dbm, rssi_b_dbm);
-
- IWL_DEBUG_STATS(mvm, "Rssi In A %d B %d Max %d AGCA %d AGCB %d\n",
- rssi_a_dbm, rssi_b_dbm, max_rssi_dbm, agc_a, agc_b);
-
- rx_status->signal = max_rssi_dbm;
- rx_status->chains = (le16_to_cpu(phy_info->phy_flags) &
- RX_RES_PHY_FLAGS_ANTENNA)
- >> RX_RES_PHY_FLAGS_ANTENNA_POS;
- rx_status->chain_signal[0] = rssi_a_dbm;
- rx_status->chain_signal[1] = rssi_b_dbm;
+ ieee80211_rx(mvm->hw, skb);
}
/*
@@ -337,10 +301,7 @@ int iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
*/
/*rx_status.flag |= RX_FLAG_MACTIME_MPDU;*/
- if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_RX_ENERGY_API)
- iwl_mvm_get_signal_strength(mvm, phy_info, &rx_status);
- else
- iwl_mvm_calc_rssi(mvm, phy_info, &rx_status);
+ iwl_mvm_get_signal_strength(mvm, phy_info, &rx_status);
IWL_DEBUG_STATS_LIMIT(mvm, "Rssi %d, TSF %llu\n", rx_status.signal,
(unsigned long long)rx_status.mactime);
@@ -394,6 +355,8 @@ int iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
rx_status.rate_idx = rate_n_flags & RATE_VHT_MCS_RATE_CODE_MSK;
rx_status.flag |= RX_FLAG_VHT;
rx_status.flag |= stbc << RX_FLAG_STBC_SHIFT;
+ if (rate_n_flags & RATE_MCS_BF_MSK)
+ rx_status.vht_flag |= RX_VHT_FLAG_BF;
} else {
rx_status.rate_idx =
iwl_mvm_legacy_rate_to_mac80211_idx(rate_n_flags,
diff --git a/drivers/net/wireless/iwlwifi/mvm/scan.c b/drivers/net/wireless/iwlwifi/mvm/scan.c
index c28de54c75d4..4b6c7d4bd199 100644
--- a/drivers/net/wireless/iwlwifi/mvm/scan.c
+++ b/drivers/net/wireless/iwlwifi/mvm/scan.c
@@ -306,7 +306,6 @@ int iwl_mvm_scan_request(struct iwl_mvm *mvm,
.id = SCAN_REQUEST_CMD,
.len = { 0, },
.data = { mvm->scan_cmd, },
- .flags = CMD_SYNC,
.dataflags = { IWL_HCMD_DFL_NOCOPY, },
};
struct iwl_scan_cmd *cmd = mvm->scan_cmd;
@@ -319,7 +318,10 @@ int iwl_mvm_scan_request(struct iwl_mvm *mvm,
struct iwl_mvm_scan_params params = {};
lockdep_assert_held(&mvm->mutex);
- BUG_ON(mvm->scan_cmd == NULL);
+
+ /* we should have failed registration if scan_cmd was NULL */
+ if (WARN_ON(mvm->scan_cmd == NULL))
+ return -ENOMEM;
IWL_DEBUG_SCAN(mvm, "Handling mac80211 scan request\n");
mvm->scan_status = IWL_MVM_SCAN_OS;
@@ -514,7 +516,7 @@ int iwl_mvm_cancel_scan(struct iwl_mvm *mvm)
ARRAY_SIZE(scan_abort_notif),
iwl_mvm_scan_abort_notif, NULL);
- ret = iwl_mvm_send_cmd_pdu(mvm, SCAN_ABORT_CMD, CMD_SYNC, 0, NULL);
+ ret = iwl_mvm_send_cmd_pdu(mvm, SCAN_ABORT_CMD, 0, 0, NULL);
if (ret) {
IWL_ERR(mvm, "Couldn't send SCAN_ABORT_CMD: %d\n", ret);
/* mac80211's state will be cleaned in the nic_restart flow */
@@ -538,9 +540,13 @@ int iwl_mvm_rx_scan_offload_complete_notif(struct iwl_mvm *mvm,
/* scan status must be locked for proper checking */
lockdep_assert_held(&mvm->mutex);
- IWL_DEBUG_SCAN(mvm, "Scheduled scan completed, status %s\n",
+ IWL_DEBUG_SCAN(mvm,
+ "Scheduled scan completed, status %s EBS status %s:%d\n",
scan_notif->status == IWL_SCAN_OFFLOAD_COMPLETED ?
- "completed" : "aborted");
+ "completed" : "aborted", scan_notif->ebs_status ==
+ IWL_SCAN_EBS_SUCCESS ? "success" : "failed",
+ scan_notif->ebs_status);
+
/* only call mac80211 completion if the stop was initiated by FW */
if (mvm->scan_status == IWL_MVM_SCAN_SCHED) {
@@ -548,6 +554,8 @@ int iwl_mvm_rx_scan_offload_complete_notif(struct iwl_mvm *mvm,
ieee80211_sched_scan_stopped(mvm->hw);
}
+ mvm->last_ebs_successful = !scan_notif->ebs_status;
+
return 0;
}
@@ -740,7 +748,6 @@ int iwl_mvm_config_sched_scan(struct iwl_mvm *mvm,
struct iwl_scan_offload_cfg *scan_cfg;
struct iwl_host_cmd cmd = {
.id = SCAN_OFFLOAD_CONFIG_CMD,
- .flags = CMD_SYNC,
};
struct iwl_mvm_scan_params params = {};
@@ -798,7 +805,6 @@ int iwl_mvm_config_sched_scan_profiles(struct iwl_mvm *mvm,
struct iwl_scan_offload_blacklist *blacklist;
struct iwl_host_cmd cmd = {
.id = SCAN_OFFLOAD_UPDATE_PROFILES_CMD,
- .flags = CMD_SYNC,
.len[1] = sizeof(*profile_cfg),
.dataflags[0] = IWL_HCMD_DFL_NOCOPY,
.dataflags[1] = IWL_HCMD_DFL_NOCOPY,
@@ -884,7 +890,12 @@ int iwl_mvm_sched_scan_start(struct iwl_mvm *mvm,
scan_req.flags |= cpu_to_le16(IWL_SCAN_OFFLOAD_FLAG_PASS_ALL);
}
- return iwl_mvm_send_cmd_pdu(mvm, SCAN_OFFLOAD_REQUEST_CMD, CMD_SYNC,
+ if (mvm->last_ebs_successful &&
+ mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_EBS_SUPPORT)
+ scan_req.flags |=
+ cpu_to_le16(IWL_SCAN_OFFLOAD_FLAG_EBS_ACCURATE_MODE);
+
+ return iwl_mvm_send_cmd_pdu(mvm, SCAN_OFFLOAD_REQUEST_CMD, 0,
sizeof(scan_req), &scan_req);
}
@@ -893,7 +904,6 @@ static int iwl_mvm_send_sched_scan_abort(struct iwl_mvm *mvm)
int ret;
struct iwl_host_cmd cmd = {
.id = SCAN_OFFLOAD_ABORT_CMD,
- .flags = CMD_SYNC,
};
u32 status;
@@ -922,7 +932,7 @@ static int iwl_mvm_send_sched_scan_abort(struct iwl_mvm *mvm)
return ret;
}
-int iwl_mvm_sched_scan_stop(struct iwl_mvm *mvm)
+int iwl_mvm_sched_scan_stop(struct iwl_mvm *mvm, bool notify)
{
int ret;
struct iwl_notification_wait wait_scan_done;
@@ -960,5 +970,8 @@ int iwl_mvm_sched_scan_stop(struct iwl_mvm *mvm)
*/
mvm->scan_status = IWL_MVM_SCAN_NONE;
+ if (notify)
+ ieee80211_sched_scan_stopped(mvm->hw);
+
return 0;
}
diff --git a/drivers/net/wireless/iwlwifi/mvm/sf.c b/drivers/net/wireless/iwlwifi/mvm/sf.c
index 88809b2d1654..7edfd15efc9d 100644
--- a/drivers/net/wireless/iwlwifi/mvm/sf.c
+++ b/drivers/net/wireless/iwlwifi/mvm/sf.c
@@ -237,9 +237,6 @@ int iwl_mvm_sf_update(struct iwl_mvm *mvm, struct ieee80211_vif *changed_vif,
.sta_vif_ap_sta_id = IWL_MVM_STATION_COUNT,
};
- if (IWL_UCODE_API(mvm->fw->ucode_ver) < 8)
- return 0;
-
/*
* Ignore the call if we are in HW Restart flow, or if the handled
* vif is a p2p device.
diff --git a/drivers/net/wireless/iwlwifi/mvm/sta.c b/drivers/net/wireless/iwlwifi/mvm/sta.c
index f339ef884250..1fb01ea2e704 100644
--- a/drivers/net/wireless/iwlwifi/mvm/sta.c
+++ b/drivers/net/wireless/iwlwifi/mvm/sta.c
@@ -66,115 +66,6 @@
#include "sta.h"
#include "rs.h"
-static void iwl_mvm_add_sta_cmd_v7_to_v5(struct iwl_mvm_add_sta_cmd_v7 *cmd_v7,
- struct iwl_mvm_add_sta_cmd_v5 *cmd_v5)
-{
- memset(cmd_v5, 0, sizeof(*cmd_v5));
-
- cmd_v5->add_modify = cmd_v7->add_modify;
- cmd_v5->tid_disable_tx = cmd_v7->tid_disable_tx;
- cmd_v5->mac_id_n_color = cmd_v7->mac_id_n_color;
- memcpy(cmd_v5->addr, cmd_v7->addr, ETH_ALEN);
- cmd_v5->sta_id = cmd_v7->sta_id;
- cmd_v5->modify_mask = cmd_v7->modify_mask;
- cmd_v5->station_flags = cmd_v7->station_flags;
- cmd_v5->station_flags_msk = cmd_v7->station_flags_msk;
- cmd_v5->add_immediate_ba_tid = cmd_v7->add_immediate_ba_tid;
- cmd_v5->remove_immediate_ba_tid = cmd_v7->remove_immediate_ba_tid;
- cmd_v5->add_immediate_ba_ssn = cmd_v7->add_immediate_ba_ssn;
- cmd_v5->sleep_tx_count = cmd_v7->sleep_tx_count;
- cmd_v5->sleep_state_flags = cmd_v7->sleep_state_flags;
- cmd_v5->assoc_id = cmd_v7->assoc_id;
- cmd_v5->beamform_flags = cmd_v7->beamform_flags;
- cmd_v5->tfd_queue_msk = cmd_v7->tfd_queue_msk;
-}
-
-static void
-iwl_mvm_add_sta_key_to_add_sta_cmd_v5(struct iwl_mvm_add_sta_key_cmd *key_cmd,
- struct iwl_mvm_add_sta_cmd_v5 *sta_cmd,
- u32 mac_id_n_color)
-{
- memset(sta_cmd, 0, sizeof(*sta_cmd));
-
- sta_cmd->sta_id = key_cmd->sta_id;
- sta_cmd->add_modify = STA_MODE_MODIFY;
- sta_cmd->modify_mask = STA_MODIFY_KEY;
- sta_cmd->mac_id_n_color = cpu_to_le32(mac_id_n_color);
-
- sta_cmd->key.key_offset = key_cmd->key_offset;
- sta_cmd->key.key_flags = key_cmd->key_flags;
- memcpy(sta_cmd->key.key, key_cmd->key, sizeof(sta_cmd->key.key));
- sta_cmd->key.tkip_rx_tsc_byte2 = key_cmd->tkip_rx_tsc_byte2;
- memcpy(sta_cmd->key.tkip_rx_ttak, key_cmd->tkip_rx_ttak,
- sizeof(sta_cmd->key.tkip_rx_ttak));
-}
-
-static int iwl_mvm_send_add_sta_cmd_status(struct iwl_mvm *mvm,
- struct iwl_mvm_add_sta_cmd_v7 *cmd,
- int *status)
-{
- struct iwl_mvm_add_sta_cmd_v5 cmd_v5;
-
- if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_STA_KEY_CMD)
- return iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, sizeof(*cmd),
- cmd, status);
-
- iwl_mvm_add_sta_cmd_v7_to_v5(cmd, &cmd_v5);
-
- return iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, sizeof(cmd_v5),
- &cmd_v5, status);
-}
-
-static int iwl_mvm_send_add_sta_cmd(struct iwl_mvm *mvm, u32 flags,
- struct iwl_mvm_add_sta_cmd_v7 *cmd)
-{
- struct iwl_mvm_add_sta_cmd_v5 cmd_v5;
-
- if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_STA_KEY_CMD)
- return iwl_mvm_send_cmd_pdu(mvm, ADD_STA, flags,
- sizeof(*cmd), cmd);
-
- iwl_mvm_add_sta_cmd_v7_to_v5(cmd, &cmd_v5);
-
- return iwl_mvm_send_cmd_pdu(mvm, ADD_STA, flags, sizeof(cmd_v5),
- &cmd_v5);
-}
-
-static int
-iwl_mvm_send_add_sta_key_cmd_status(struct iwl_mvm *mvm,
- struct iwl_mvm_add_sta_key_cmd *cmd,
- u32 mac_id_n_color,
- int *status)
-{
- struct iwl_mvm_add_sta_cmd_v5 sta_cmd;
-
- if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_STA_KEY_CMD)
- return iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA_KEY,
- sizeof(*cmd), cmd, status);
-
- iwl_mvm_add_sta_key_to_add_sta_cmd_v5(cmd, &sta_cmd, mac_id_n_color);
-
- return iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, sizeof(sta_cmd),
- &sta_cmd, status);
-}
-
-static int iwl_mvm_send_add_sta_key_cmd(struct iwl_mvm *mvm,
- u32 flags,
- struct iwl_mvm_add_sta_key_cmd *cmd,
- u32 mac_id_n_color)
-{
- struct iwl_mvm_add_sta_cmd_v5 sta_cmd;
-
- if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_STA_KEY_CMD)
- return iwl_mvm_send_cmd_pdu(mvm, ADD_STA_KEY, flags,
- sizeof(*cmd), cmd);
-
- iwl_mvm_add_sta_key_to_add_sta_cmd_v5(cmd, &sta_cmd, mac_id_n_color);
-
- return iwl_mvm_send_cmd_pdu(mvm, ADD_STA, flags, sizeof(sta_cmd),
- &sta_cmd);
-}
-
static int iwl_mvm_find_free_sta_id(struct iwl_mvm *mvm,
enum nl80211_iftype iftype)
{
@@ -207,7 +98,7 @@ int iwl_mvm_sta_send_to_fw(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
bool update)
{
struct iwl_mvm_sta *mvm_sta = (void *)sta->drv_priv;
- struct iwl_mvm_add_sta_cmd_v7 add_sta_cmd;
+ struct iwl_mvm_add_sta_cmd add_sta_cmd;
int ret;
u32 status;
u32 agg_size = 0, mpdu_dens = 0;
@@ -295,7 +186,8 @@ int iwl_mvm_sta_send_to_fw(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
cpu_to_le32(mpdu_dens << STA_FLG_AGG_MPDU_DENS_SHIFT);
status = ADD_STA_SUCCESS;
- ret = iwl_mvm_send_add_sta_cmd_status(mvm, &add_sta_cmd, &status);
+ ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, sizeof(add_sta_cmd),
+ &add_sta_cmd, &status);
if (ret)
return ret;
@@ -380,7 +272,7 @@ int iwl_mvm_update_sta(struct iwl_mvm *mvm,
int iwl_mvm_drain_sta(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta,
bool drain)
{
- struct iwl_mvm_add_sta_cmd_v7 cmd = {};
+ struct iwl_mvm_add_sta_cmd cmd = {};
int ret;
u32 status;
@@ -393,7 +285,8 @@ int iwl_mvm_drain_sta(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta,
cmd.station_flags_msk = cpu_to_le32(STA_FLG_DRAIN_FLOW);
status = ADD_STA_SUCCESS;
- ret = iwl_mvm_send_add_sta_cmd_status(mvm, &cmd, &status);
+ ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, sizeof(cmd),
+ &cmd, &status);
if (ret)
return ret;
@@ -434,7 +327,7 @@ static int iwl_mvm_rm_sta_common(struct iwl_mvm *mvm, u8 sta_id)
return -EINVAL;
}
- ret = iwl_mvm_send_cmd_pdu(mvm, REMOVE_STA, CMD_SYNC,
+ ret = iwl_mvm_send_cmd_pdu(mvm, REMOVE_STA, 0,
sizeof(rm_sta_cmd), &rm_sta_cmd);
if (ret) {
IWL_ERR(mvm, "Failed to remove station. Id=%d\n", sta_id);
@@ -498,7 +391,7 @@ void iwl_mvm_sta_drained_wk(struct work_struct *wk)
sta_id);
continue;
}
- rcu_assign_pointer(mvm->fw_id_to_mac_id[sta_id], NULL);
+ RCU_INIT_POINTER(mvm->fw_id_to_mac_id[sta_id], NULL);
clear_bit(sta_id, mvm->sta_drained);
}
@@ -520,14 +413,6 @@ int iwl_mvm_rm_sta(struct iwl_mvm *mvm,
/* flush its queues here since we are freeing mvm_sta */
ret = iwl_mvm_flush_tx_path(mvm, mvm_sta->tfd_queue_msk, true);
- /*
- * Put a non-NULL since the fw station isn't removed.
- * It will be removed after the MAC will be set as
- * unassoc.
- */
- rcu_assign_pointer(mvm->fw_id_to_mac_id[mvm_sta->sta_id],
- ERR_PTR(-EINVAL));
-
/* if we are associated - we can't remove the AP STA now */
if (vif->bss_conf.assoc)
return ret;
@@ -557,7 +442,7 @@ int iwl_mvm_rm_sta(struct iwl_mvm *mvm,
} else {
spin_unlock_bh(&mvm_sta->lock);
ret = iwl_mvm_rm_sta_common(mvm, mvm_sta->sta_id);
- rcu_assign_pointer(mvm->fw_id_to_mac_id[mvm_sta->sta_id], NULL);
+ RCU_INIT_POINTER(mvm->fw_id_to_mac_id[mvm_sta->sta_id], NULL);
}
return ret;
@@ -571,7 +456,7 @@ int iwl_mvm_rm_sta_id(struct iwl_mvm *mvm,
lockdep_assert_held(&mvm->mutex);
- rcu_assign_pointer(mvm->fw_id_to_mac_id[sta_id], NULL);
+ RCU_INIT_POINTER(mvm->fw_id_to_mac_id[sta_id], NULL);
return ret;
}
@@ -593,7 +478,7 @@ int iwl_mvm_allocate_int_sta(struct iwl_mvm *mvm, struct iwl_mvm_int_sta *sta,
void iwl_mvm_dealloc_int_sta(struct iwl_mvm *mvm, struct iwl_mvm_int_sta *sta)
{
- rcu_assign_pointer(mvm->fw_id_to_mac_id[sta->sta_id], NULL);
+ RCU_INIT_POINTER(mvm->fw_id_to_mac_id[sta->sta_id], NULL);
memset(sta, 0, sizeof(struct iwl_mvm_int_sta));
sta->sta_id = IWL_MVM_STATION_COUNT;
}
@@ -603,13 +488,13 @@ static int iwl_mvm_add_int_sta_common(struct iwl_mvm *mvm,
const u8 *addr,
u16 mac_id, u16 color)
{
- struct iwl_mvm_add_sta_cmd_v7 cmd;
+ struct iwl_mvm_add_sta_cmd cmd;
int ret;
u32 status;
lockdep_assert_held(&mvm->mutex);
- memset(&cmd, 0, sizeof(struct iwl_mvm_add_sta_cmd_v7));
+ memset(&cmd, 0, sizeof(cmd));
cmd.sta_id = sta->sta_id;
cmd.mac_id_n_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(mac_id,
color));
@@ -619,7 +504,8 @@ static int iwl_mvm_add_int_sta_common(struct iwl_mvm *mvm,
if (addr)
memcpy(cmd.addr, addr, ETH_ALEN);
- ret = iwl_mvm_send_add_sta_cmd_status(mvm, &cmd, &status);
+ ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, sizeof(cmd),
+ &cmd, &status);
if (ret)
return ret;
@@ -753,7 +639,7 @@ int iwl_mvm_sta_rx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
int tid, u16 ssn, bool start)
{
struct iwl_mvm_sta *mvm_sta = (void *)sta->drv_priv;
- struct iwl_mvm_add_sta_cmd_v7 cmd = {};
+ struct iwl_mvm_add_sta_cmd cmd = {};
int ret;
u32 status;
@@ -777,7 +663,8 @@ int iwl_mvm_sta_rx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
STA_MODIFY_REMOVE_BA_TID;
status = ADD_STA_SUCCESS;
- ret = iwl_mvm_send_add_sta_cmd_status(mvm, &cmd, &status);
+ ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, sizeof(cmd),
+ &cmd, &status);
if (ret)
return ret;
@@ -812,7 +699,7 @@ static int iwl_mvm_sta_tx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
int tid, u8 queue, bool start)
{
struct iwl_mvm_sta *mvm_sta = (void *)sta->drv_priv;
- struct iwl_mvm_add_sta_cmd_v7 cmd = {};
+ struct iwl_mvm_add_sta_cmd cmd = {};
int ret;
u32 status;
@@ -834,7 +721,8 @@ static int iwl_mvm_sta_tx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
cmd.tid_disable_tx = cpu_to_le16(mvm_sta->tid_disable_agg);
status = ADD_STA_SUCCESS;
- ret = iwl_mvm_send_add_sta_cmd_status(mvm, &cmd, &status);
+ ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, sizeof(cmd),
+ &cmd, &status);
if (ret)
return ret;
@@ -1129,12 +1017,11 @@ static int iwl_mvm_send_sta_key(struct iwl_mvm *mvm,
u8 sta_id, u32 tkip_iv32, u16 *tkip_p1k,
u32 cmd_flags)
{
- __le16 key_flags;
struct iwl_mvm_add_sta_key_cmd cmd = {};
+ __le16 key_flags;
int ret, status;
u16 keyidx;
int i;
- u32 mac_id_n_color = mvm_sta->mac_id_n_color;
keyidx = (keyconf->keyidx << STA_KEY_FLG_KEYID_POS) &
STA_KEY_FLG_KEYID_MSK;
@@ -1166,13 +1053,12 @@ static int iwl_mvm_send_sta_key(struct iwl_mvm *mvm,
cmd.sta_id = sta_id;
status = ADD_STA_SUCCESS;
- if (cmd_flags == CMD_SYNC)
- ret = iwl_mvm_send_add_sta_key_cmd_status(mvm, &cmd,
- mac_id_n_color,
- &status);
+ if (cmd_flags & CMD_ASYNC)
+ ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA_KEY, CMD_ASYNC,
+ sizeof(cmd), &cmd);
else
- ret = iwl_mvm_send_add_sta_key_cmd(mvm, CMD_ASYNC, &cmd,
- mac_id_n_color);
+ ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA_KEY, sizeof(cmd),
+ &cmd, &status);
switch (status) {
case ADD_STA_SUCCESS:
@@ -1225,7 +1111,7 @@ static int iwl_mvm_send_sta_igtk(struct iwl_mvm *mvm,
remove_key ? "removing" : "installing",
igtk_cmd.sta_id);
- return iwl_mvm_send_cmd_pdu(mvm, MGMT_MCAST_KEY, CMD_SYNC,
+ return iwl_mvm_send_cmd_pdu(mvm, MGMT_MCAST_KEY, 0,
sizeof(igtk_cmd), &igtk_cmd);
}
@@ -1312,15 +1198,15 @@ int iwl_mvm_set_sta_key(struct iwl_mvm *mvm,
ieee80211_get_key_rx_seq(keyconf, 0, &seq);
ieee80211_get_tkip_rx_p1k(keyconf, addr, seq.tkip.iv32, p1k);
ret = iwl_mvm_send_sta_key(mvm, mvm_sta, keyconf, sta_id,
- seq.tkip.iv32, p1k, CMD_SYNC);
+ seq.tkip.iv32, p1k, 0);
break;
case WLAN_CIPHER_SUITE_CCMP:
ret = iwl_mvm_send_sta_key(mvm, mvm_sta, keyconf, sta_id,
- 0, NULL, CMD_SYNC);
+ 0, NULL, 0);
break;
default:
ret = iwl_mvm_send_sta_key(mvm, mvm_sta, keyconf,
- sta_id, 0, NULL, CMD_SYNC);
+ sta_id, 0, NULL, 0);
}
if (ret)
@@ -1399,9 +1285,8 @@ int iwl_mvm_remove_sta_key(struct iwl_mvm *mvm,
cmd.sta_id = sta_id;
status = ADD_STA_SUCCESS;
- ret = iwl_mvm_send_add_sta_key_cmd_status(mvm, &cmd,
- mvm_sta->mac_id_n_color,
- &status);
+ ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA_KEY, sizeof(cmd),
+ &cmd, &status);
switch (status) {
case ADD_STA_SUCCESS:
@@ -1448,7 +1333,7 @@ void iwl_mvm_sta_modify_ps_wake(struct iwl_mvm *mvm,
struct ieee80211_sta *sta)
{
struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
- struct iwl_mvm_add_sta_cmd_v7 cmd = {
+ struct iwl_mvm_add_sta_cmd cmd = {
.add_modify = STA_MODE_MODIFY,
.sta_id = mvmsta->sta_id,
.station_flags_msk = cpu_to_le32(STA_FLG_PS),
@@ -1456,7 +1341,7 @@ void iwl_mvm_sta_modify_ps_wake(struct iwl_mvm *mvm,
};
int ret;
- ret = iwl_mvm_send_add_sta_cmd(mvm, CMD_ASYNC, &cmd);
+ ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC, sizeof(cmd), &cmd);
if (ret)
IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret);
}
@@ -1468,7 +1353,7 @@ void iwl_mvm_sta_modify_sleep_tx_count(struct iwl_mvm *mvm,
bool agg)
{
struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
- struct iwl_mvm_add_sta_cmd_v7 cmd = {
+ struct iwl_mvm_add_sta_cmd cmd = {
.add_modify = STA_MODE_MODIFY,
.sta_id = mvmsta->sta_id,
.modify_mask = STA_MODIFY_SLEEPING_STA_TX_COUNT,
@@ -1538,7 +1423,7 @@ void iwl_mvm_sta_modify_sleep_tx_count(struct iwl_mvm *mvm,
cmd.sleep_state_flags |= cpu_to_le16(STA_SLEEP_STATE_UAPSD);
}
- ret = iwl_mvm_send_add_sta_cmd(mvm, CMD_ASYNC, &cmd);
+ ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC, sizeof(cmd), &cmd);
if (ret)
IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret);
}
diff --git a/drivers/net/wireless/iwlwifi/mvm/sta.h b/drivers/net/wireless/iwlwifi/mvm/sta.h
index 2ed84c421481..d98e8a2142b8 100644
--- a/drivers/net/wireless/iwlwifi/mvm/sta.h
+++ b/drivers/net/wireless/iwlwifi/mvm/sta.h
@@ -253,6 +253,8 @@ enum iwl_mvm_agg_state {
* This is basically (last acked packet++).
* @rate_n_flags: Rate at which Tx was attempted. Holds the data between the
* Tx response (TX_CMD), and the block ack notification (COMPRESSED_BA).
+ * @reduced_tpc: Reduced tx power. Holds the data between the
+ * Tx response (TX_CMD), and the block ack notification (COMPRESSED_BA).
* @state: state of the BA agreement establishment / tear down.
* @txq_id: Tx queue used by the BA session
* @ssn: the first packet to be sent in AGG HW queue in Tx AGG start flow, or
@@ -265,6 +267,7 @@ struct iwl_mvm_tid_data {
u16 next_reclaimed;
/* The rest is Tx AGG related */
u32 rate_n_flags;
+ u8 reduced_tpc;
enum iwl_mvm_agg_state state;
u16 txq_id;
u16 ssn;
@@ -284,8 +287,6 @@ static inline u16 iwl_mvm_tid_queued(struct iwl_mvm_tid_data *tid_data)
* @tid_disable_agg: bitmap: if bit(tid) is set, the fw won't send ampdus for
* tid.
* @max_agg_bufsize: the maximal size of the AGG buffer for this station
- * @bt_reduced_txpower_dbg: debug mode in which %bt_reduced_txpower is forced
- * by debugfs.
* @bt_reduced_txpower: is reduced tx power enabled for this station
* @next_status_eosp: the next reclaimed packet is a PS-Poll response and
* we need to signal the EOSP
@@ -306,7 +307,6 @@ struct iwl_mvm_sta {
u32 mac_id_n_color;
u16 tid_disable_agg;
u8 max_agg_bufsize;
- bool bt_reduced_txpower_dbg;
bool bt_reduced_txpower;
bool next_status_eosp;
spinlock_t lock;
diff --git a/drivers/net/wireless/iwlwifi/mvm/time-event.c b/drivers/net/wireless/iwlwifi/mvm/time-event.c
index 61331245ad93..80100f6cc12a 100644
--- a/drivers/net/wireless/iwlwifi/mvm/time-event.c
+++ b/drivers/net/wireless/iwlwifi/mvm/time-event.c
@@ -273,67 +273,10 @@ static bool iwl_mvm_time_event_response(struct iwl_notif_wait_data *notif_wait,
return true;
}
-/* used to convert from time event API v2 to v1 */
-#define TE_V2_DEP_POLICY_MSK (TE_V2_DEP_OTHER | TE_V2_DEP_TSF |\
- TE_V2_EVENT_SOCIOPATHIC)
-static inline u16 te_v2_get_notify(__le16 policy)
-{
- return le16_to_cpu(policy) & TE_V2_NOTIF_MSK;
-}
-
-static inline u16 te_v2_get_dep_policy(__le16 policy)
-{
- return (le16_to_cpu(policy) & TE_V2_DEP_POLICY_MSK) >>
- TE_V2_PLACEMENT_POS;
-}
-
-static inline u16 te_v2_get_absence(__le16 policy)
-{
- return (le16_to_cpu(policy) & TE_V2_ABSENCE) >> TE_V2_ABSENCE_POS;
-}
-
-static void iwl_mvm_te_v2_to_v1(const struct iwl_time_event_cmd_v2 *cmd_v2,
- struct iwl_time_event_cmd_v1 *cmd_v1)
-{
- cmd_v1->id_and_color = cmd_v2->id_and_color;
- cmd_v1->action = cmd_v2->action;
- cmd_v1->id = cmd_v2->id;
- cmd_v1->apply_time = cmd_v2->apply_time;
- cmd_v1->max_delay = cmd_v2->max_delay;
- cmd_v1->depends_on = cmd_v2->depends_on;
- cmd_v1->interval = cmd_v2->interval;
- cmd_v1->duration = cmd_v2->duration;
- if (cmd_v2->repeat == TE_V2_REPEAT_ENDLESS)
- cmd_v1->repeat = cpu_to_le32(TE_V1_REPEAT_ENDLESS);
- else
- cmd_v1->repeat = cpu_to_le32(cmd_v2->repeat);
- cmd_v1->max_frags = cpu_to_le32(cmd_v2->max_frags);
- cmd_v1->interval_reciprocal = 0; /* unused */
-
- cmd_v1->dep_policy = cpu_to_le32(te_v2_get_dep_policy(cmd_v2->policy));
- cmd_v1->is_present = cpu_to_le32(!te_v2_get_absence(cmd_v2->policy));
- cmd_v1->notify = cpu_to_le32(te_v2_get_notify(cmd_v2->policy));
-}
-
-static int iwl_mvm_send_time_event_cmd(struct iwl_mvm *mvm,
- const struct iwl_time_event_cmd_v2 *cmd)
-{
- struct iwl_time_event_cmd_v1 cmd_v1;
-
- if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_TIME_EVENT_API_V2)
- return iwl_mvm_send_cmd_pdu(mvm, TIME_EVENT_CMD, CMD_SYNC,
- sizeof(*cmd), cmd);
-
- iwl_mvm_te_v2_to_v1(cmd, &cmd_v1);
- return iwl_mvm_send_cmd_pdu(mvm, TIME_EVENT_CMD, CMD_SYNC,
- sizeof(cmd_v1), &cmd_v1);
-}
-
-
static int iwl_mvm_time_event_send_add(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
struct iwl_mvm_time_event_data *te_data,
- struct iwl_time_event_cmd_v2 *te_cmd)
+ struct iwl_time_event_cmd *te_cmd)
{
static const u8 time_event_response[] = { TIME_EVENT_CMD };
struct iwl_notification_wait wait_time_event;
@@ -369,7 +312,8 @@ static int iwl_mvm_time_event_send_add(struct iwl_mvm *mvm,
ARRAY_SIZE(time_event_response),
iwl_mvm_time_event_response, te_data);
- ret = iwl_mvm_send_time_event_cmd(mvm, te_cmd);
+ ret = iwl_mvm_send_cmd_pdu(mvm, TIME_EVENT_CMD, 0,
+ sizeof(*te_cmd), te_cmd);
if (ret) {
IWL_ERR(mvm, "Couldn't send TIME_EVENT_CMD: %d\n", ret);
iwl_remove_notification(&mvm->notif_wait, &wait_time_event);
@@ -397,7 +341,7 @@ void iwl_mvm_protect_session(struct iwl_mvm *mvm,
{
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
struct iwl_mvm_time_event_data *te_data = &mvmvif->time_event_data;
- struct iwl_time_event_cmd_v2 time_cmd = {};
+ struct iwl_time_event_cmd time_cmd = {};
lockdep_assert_held(&mvm->mutex);
@@ -453,7 +397,7 @@ void iwl_mvm_remove_time_event(struct iwl_mvm *mvm,
struct iwl_mvm_vif *mvmvif,
struct iwl_mvm_time_event_data *te_data)
{
- struct iwl_time_event_cmd_v2 time_cmd = {};
+ struct iwl_time_event_cmd time_cmd = {};
u32 id, uid;
int ret;
@@ -490,7 +434,8 @@ void iwl_mvm_remove_time_event(struct iwl_mvm *mvm,
cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color));
IWL_DEBUG_TE(mvm, "Removing TE 0x%x\n", le32_to_cpu(time_cmd.id));
- ret = iwl_mvm_send_time_event_cmd(mvm, &time_cmd);
+ ret = iwl_mvm_send_cmd_pdu(mvm, TIME_EVENT_CMD, 0,
+ sizeof(time_cmd), &time_cmd);
if (WARN_ON(ret))
return;
}
@@ -510,7 +455,7 @@ int iwl_mvm_start_p2p_roc(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
{
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
struct iwl_mvm_time_event_data *te_data = &mvmvif->time_event_data;
- struct iwl_time_event_cmd_v2 time_cmd = {};
+ struct iwl_time_event_cmd time_cmd = {};
lockdep_assert_held(&mvm->mutex);
if (te_data->running) {
diff --git a/drivers/net/wireless/iwlwifi/mvm/tt.c b/drivers/net/wireless/iwlwifi/mvm/tt.c
index 7a99fa361954..868561512783 100644
--- a/drivers/net/wireless/iwlwifi/mvm/tt.c
+++ b/drivers/net/wireless/iwlwifi/mvm/tt.c
@@ -409,7 +409,6 @@ void iwl_mvm_tt_tx_backoff(struct iwl_mvm *mvm, u32 backoff)
.id = REPLY_THERMAL_MNG_BACKOFF,
.len = { sizeof(u32), },
.data = { &backoff, },
- .flags = CMD_SYNC,
};
backoff = max(backoff, mvm->thermal_throttle.min_backoff);
@@ -468,13 +467,14 @@ void iwl_mvm_tt_handler(struct iwl_mvm *mvm)
}
if (params->support_tx_backoff) {
- tx_backoff = 0;
+ tx_backoff = tt->min_backoff;
for (i = 0; i < TT_TX_BACKOFF_SIZE; i++) {
if (temperature < params->tx_backoff[i].temperature)
break;
- tx_backoff = params->tx_backoff[i].backoff;
+ tx_backoff = max(tt->min_backoff,
+ params->tx_backoff[i].backoff);
}
- if (tx_backoff != 0)
+ if (tx_backoff != tt->min_backoff)
throttle_enable = true;
if (tt->tx_backoff != tx_backoff)
iwl_mvm_tt_tx_backoff(mvm, tx_backoff);
@@ -484,7 +484,8 @@ void iwl_mvm_tt_handler(struct iwl_mvm *mvm)
IWL_WARN(mvm,
"Due to high temperature thermal throttling initiated\n");
tt->throttle = true;
- } else if (tt->throttle && !tt->dynamic_smps && tt->tx_backoff == 0 &&
+ } else if (tt->throttle && !tt->dynamic_smps &&
+ tt->tx_backoff == tt->min_backoff &&
temperature <= params->tx_protection_exit) {
IWL_WARN(mvm,
"Temperature is back to normal thermal throttling stopped\n");
diff --git a/drivers/net/wireless/iwlwifi/mvm/tx.c b/drivers/net/wireless/iwlwifi/mvm/tx.c
index 879aeac46cc1..3846a6c41eb1 100644
--- a/drivers/net/wireless/iwlwifi/mvm/tx.c
+++ b/drivers/net/wireless/iwlwifi/mvm/tx.c
@@ -636,7 +636,11 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm,
seq_ctl = le16_to_cpu(hdr->seq_ctrl);
}
- ieee80211_tx_status_ni(mvm->hw, skb);
+ BUILD_BUG_ON(ARRAY_SIZE(info->status.status_driver_data) < 1);
+ info->status.status_driver_data[0] =
+ (void *)(uintptr_t)tx_resp->reduced_tpc;
+
+ ieee80211_tx_status(mvm->hw, skb);
}
if (txq_id >= mvm->first_agg_queue) {
@@ -815,6 +819,7 @@ static void iwl_mvm_rx_tx_cmd_agg(struct iwl_mvm *mvm,
struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
mvmsta->tid_data[tid].rate_n_flags =
le32_to_cpu(tx_resp->initial_rate);
+ mvmsta->tid_data[tid].reduced_tpc = tx_resp->reduced_tpc;
}
rcu_read_unlock();
@@ -928,6 +933,8 @@ int iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
info->status.ampdu_len = ba_notif->txed;
iwl_mvm_hwrate_to_tx_status(tid_data->rate_n_flags,
info);
+ info->status.status_driver_data[0] =
+ (void *)(uintptr_t)tid_data->reduced_tpc;
}
}
@@ -937,7 +944,7 @@ int iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
while (!skb_queue_empty(&reclaimed_skbs)) {
skb = __skb_dequeue(&reclaimed_skbs);
- ieee80211_tx_status_ni(mvm->hw, skb);
+ ieee80211_tx_status(mvm->hw, skb);
}
return 0;
@@ -951,7 +958,7 @@ int iwl_mvm_flush_tx_path(struct iwl_mvm *mvm, u32 tfd_msk, bool sync)
.flush_ctl = cpu_to_le16(DUMP_TX_FIFO_FLUSH),
};
- u32 flags = sync ? CMD_SYNC : CMD_ASYNC;
+ u32 flags = sync ? 0 : CMD_ASYNC;
ret = iwl_mvm_send_cmd_pdu(mvm, TXPATH_FLUSH, flags,
sizeof(flush_cmd), &flush_cmd);
diff --git a/drivers/net/wireless/iwlwifi/mvm/utils.c b/drivers/net/wireless/iwlwifi/mvm/utils.c
index 2180902266ae..aa9fc77e8413 100644
--- a/drivers/net/wireless/iwlwifi/mvm/utils.c
+++ b/drivers/net/wireless/iwlwifi/mvm/utils.c
@@ -64,6 +64,7 @@
#include "iwl-debug.h"
#include "iwl-io.h"
+#include "iwl-prph.h"
#include "mvm.h"
#include "fw-api-rs.h"
@@ -143,7 +144,7 @@ int iwl_mvm_send_cmd_status(struct iwl_mvm *mvm, struct iwl_host_cmd *cmd,
"cmd flags %x", cmd->flags))
return -EINVAL;
- cmd->flags |= CMD_SYNC | CMD_WANT_SKB;
+ cmd->flags |= CMD_WANT_SKB;
ret = iwl_trans_send_cmd(mvm->trans, cmd);
if (ret == -ERFKILL) {
@@ -469,6 +470,8 @@ void iwl_mvm_dump_nic_error_log(struct iwl_mvm *mvm)
mvm->status, table.valid);
}
+ /* Do not change this output - scripts rely on it */
+
IWL_ERR(mvm, "Loaded firmware version: %s\n", mvm->fw->fw_version);
trace_iwlwifi_dev_ucode_error(trans->dev, table.error_id, table.tsf_low,
@@ -516,13 +519,14 @@ void iwl_mvm_dump_nic_error_log(struct iwl_mvm *mvm)
iwl_mvm_dump_umac_error_log(mvm);
}
+#ifdef CONFIG_IWLWIFI_DEBUGFS
void iwl_mvm_fw_error_sram_dump(struct iwl_mvm *mvm)
{
const struct fw_img *img;
u32 ofs, sram_len;
void *sram;
- if (!mvm->ucode_loaded || mvm->fw_error_sram)
+ if (!mvm->ucode_loaded || mvm->fw_error_sram || mvm->fw_error_dump)
return;
img = &mvm->fw->img[mvm->cur_ucode];
@@ -538,6 +542,48 @@ void iwl_mvm_fw_error_sram_dump(struct iwl_mvm *mvm)
mvm->fw_error_sram_len = sram_len;
}
+void iwl_mvm_fw_error_rxf_dump(struct iwl_mvm *mvm)
+{
+ int i, reg_val;
+ unsigned long flags;
+
+ if (!mvm->ucode_loaded || mvm->fw_error_rxf || mvm->fw_error_dump)
+ return;
+
+ /* reading buffer size */
+ reg_val = iwl_trans_read_prph(mvm->trans, RXF_SIZE_ADDR);
+ mvm->fw_error_rxf_len =
+ (reg_val & RXF_SIZE_BYTE_CNT_MSK) >> RXF_SIZE_BYTE_CND_POS;
+
+ /* the register holds the value divided by 128 */
+ mvm->fw_error_rxf_len = mvm->fw_error_rxf_len << 7;
+
+ if (!mvm->fw_error_rxf_len)
+ return;
+
+ mvm->fw_error_rxf = kzalloc(mvm->fw_error_rxf_len, GFP_ATOMIC);
+ if (!mvm->fw_error_rxf) {
+ mvm->fw_error_rxf_len = 0;
+ return;
+ }
+
+ if (!iwl_trans_grab_nic_access(mvm->trans, false, &flags)) {
+ kfree(mvm->fw_error_rxf);
+ mvm->fw_error_rxf = NULL;
+ mvm->fw_error_rxf_len = 0;
+ return;
+ }
+
+ for (i = 0; i < (mvm->fw_error_rxf_len / sizeof(u32)); i++) {
+ iwl_trans_write_prph(mvm->trans, RXF_LD_FENCE_OFFSET_ADDR,
+ i * sizeof(u32));
+ mvm->fw_error_rxf[i] =
+ iwl_trans_read_prph(mvm->trans, RXF_FIFO_RD_FENCE_ADDR);
+ }
+ iwl_trans_release_nic_access(mvm->trans, &flags);
+}
+#endif
+
/**
* iwl_mvm_send_lq_cmd() - Send link quality command
* @init: This command is sent as part of station initialization right
@@ -553,7 +599,7 @@ int iwl_mvm_send_lq_cmd(struct iwl_mvm *mvm, struct iwl_lq_cmd *lq, bool init)
struct iwl_host_cmd cmd = {
.id = LQ_CMD,
.len = { sizeof(struct iwl_lq_cmd), },
- .flags = init ? CMD_SYNC : CMD_ASYNC,
+ .flags = init ? 0 : CMD_ASYNC,
.data = { lq, },
};
@@ -604,6 +650,39 @@ void iwl_mvm_update_smps(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
ieee80211_request_smps(vif, smps_mode);
}
+static void iwl_mvm_diversity_iter(void *_data, u8 *mac,
+ struct ieee80211_vif *vif)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ bool *result = _data;
+ int i;
+
+ for (i = 0; i < NUM_IWL_MVM_SMPS_REQ; i++) {
+ if (mvmvif->smps_requests[i] == IEEE80211_SMPS_STATIC ||
+ mvmvif->smps_requests[i] == IEEE80211_SMPS_DYNAMIC)
+ *result = false;
+ }
+}
+
+bool iwl_mvm_rx_diversity_allowed(struct iwl_mvm *mvm)
+{
+ bool result = true;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ if (num_of_ant(mvm->fw->valid_rx_ant) == 1)
+ return false;
+
+ if (!mvm->cfg->rx_with_siso_diversity)
+ return false;
+
+ ieee80211_iterate_active_interfaces_atomic(
+ mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
+ iwl_mvm_diversity_iter, &result);
+
+ return result;
+}
+
int iwl_mvm_update_low_latency(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
bool value)
{
@@ -623,7 +702,7 @@ int iwl_mvm_update_low_latency(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
iwl_mvm_bt_coex_vif_change(mvm);
- return iwl_mvm_power_update_mac(mvm, vif);
+ return iwl_mvm_power_update_mac(mvm);
}
static void iwl_mvm_ll_iter(void *_data, u8 *mac, struct ieee80211_vif *vif)
diff --git a/drivers/net/wireless/iwlwifi/pcie/drv.c b/drivers/net/wireless/iwlwifi/pcie/drv.c
index 3d1d57f9f5bc..7091a18d5a72 100644
--- a/drivers/net/wireless/iwlwifi/pcie/drv.c
+++ b/drivers/net/wireless/iwlwifi/pcie/drv.c
@@ -417,7 +417,7 @@ static u64 splx_get_pwr_limit(struct iwl_trans *trans, union acpi_object *splx)
splx->package.count != 2 ||
splx->package.elements[0].type != ACPI_TYPE_INTEGER ||
splx->package.elements[0].integer.value != 0) {
- IWL_ERR(trans, "Unsupported splx structure");
+ IWL_ERR(trans, "Unsupported splx structure\n");
return 0;
}
@@ -426,14 +426,14 @@ static u64 splx_get_pwr_limit(struct iwl_trans *trans, union acpi_object *splx)
limits->package.count < 2 ||
limits->package.elements[0].type != ACPI_TYPE_INTEGER ||
limits->package.elements[1].type != ACPI_TYPE_INTEGER) {
- IWL_ERR(trans, "Invalid limits element");
+ IWL_ERR(trans, "Invalid limits element\n");
return 0;
}
domain_type = &limits->package.elements[0];
power_limit = &limits->package.elements[1];
if (!(domain_type->integer.value & SPL_DOMAINTYPE_WIFI)) {
- IWL_DEBUG_INFO(trans, "WiFi power is not limited");
+ IWL_DEBUG_INFO(trans, "WiFi power is not limited\n");
return 0;
}
@@ -450,26 +450,26 @@ static void set_dflt_pwr_limit(struct iwl_trans *trans, struct pci_dev *pdev)
pxsx_handle = ACPI_HANDLE(&pdev->dev);
if (!pxsx_handle) {
IWL_DEBUG_INFO(trans,
- "Could not retrieve root port ACPI handle");
+ "Could not retrieve root port ACPI handle\n");
return;
}
/* Get the method's handle */
status = acpi_get_handle(pxsx_handle, (acpi_string)SPL_METHOD, &handle);
if (ACPI_FAILURE(status)) {
- IWL_DEBUG_INFO(trans, "SPL method not found");
+ IWL_DEBUG_INFO(trans, "SPL method not found\n");
return;
}
/* Call SPLC with no arguments */
status = acpi_evaluate_object(handle, NULL, NULL, &splx);
if (ACPI_FAILURE(status)) {
- IWL_ERR(trans, "SPLC invocation failed (0x%x)", status);
+ IWL_ERR(trans, "SPLC invocation failed (0x%x)\n", status);
return;
}
trans->dflt_pwr_limit = splx_get_pwr_limit(trans, splx.pointer);
- IWL_DEBUG_INFO(trans, "Default power limit set to %lld",
+ IWL_DEBUG_INFO(trans, "Default power limit set to %lld\n",
trans->dflt_pwr_limit);
kfree(splx.pointer);
}
diff --git a/drivers/net/wireless/iwlwifi/pcie/internal.h b/drivers/net/wireless/iwlwifi/pcie/internal.h
index 9091513ea738..6c22b23a2845 100644
--- a/drivers/net/wireless/iwlwifi/pcie/internal.h
+++ b/drivers/net/wireless/iwlwifi/pcie/internal.h
@@ -102,7 +102,7 @@ struct iwl_rxq {
u32 write_actual;
struct list_head rx_free;
struct list_head rx_used;
- int need_update;
+ bool need_update;
struct iwl_rb_status *rb_stts;
dma_addr_t rb_stts_dma;
spinlock_t lock;
@@ -117,21 +117,19 @@ struct iwl_dma_ptr {
/**
* iwl_queue_inc_wrap - increment queue index, wrap back to beginning
* @index -- current index
- * @n_bd -- total number of entries in queue (must be power of 2)
*/
-static inline int iwl_queue_inc_wrap(int index, int n_bd)
+static inline int iwl_queue_inc_wrap(int index)
{
- return ++index & (n_bd - 1);
+ return ++index & (TFD_QUEUE_SIZE_MAX - 1);
}
/**
* iwl_queue_dec_wrap - decrement queue index, wrap back to end
* @index -- current index
- * @n_bd -- total number of entries in queue (must be power of 2)
*/
-static inline int iwl_queue_dec_wrap(int index, int n_bd)
+static inline int iwl_queue_dec_wrap(int index)
{
- return --index & (n_bd - 1);
+ return --index & (TFD_QUEUE_SIZE_MAX - 1);
}
struct iwl_cmd_meta {
@@ -145,13 +143,13 @@ struct iwl_cmd_meta {
*
* Contains common data for Rx and Tx queues.
*
- * Note the difference between n_bd and n_window: the hardware
- * always assumes 256 descriptors, so n_bd is always 256 (unless
+ * Note the difference between TFD_QUEUE_SIZE_MAX and n_window: the hardware
+ * always assumes 256 descriptors, so TFD_QUEUE_SIZE_MAX is always 256 (unless
* there might be HW changes in the future). For the normal TX
* queues, n_window, which is the size of the software queue data
* is also 256; however, for the command queue, n_window is only
* 32 since we don't need so many commands pending. Since the HW
- * still uses 256 BDs for DMA though, n_bd stays 256. As a result,
+ * still uses 256 BDs for DMA though, TFD_QUEUE_SIZE_MAX stays 256. As a result,
* the software buffers (in the variables @meta, @txb in struct
* iwl_txq) only have 32 entries, while the HW buffers (@tfds in
* the same struct) have 256.
@@ -162,7 +160,6 @@ struct iwl_cmd_meta {
* data is a window overlayed over the HW queue.
*/
struct iwl_queue {
- int n_bd; /* number of BDs in this queue */
int write_ptr; /* 1-st empty entry (index) host_w*/
int read_ptr; /* last used entry (index) host_r*/
/* use for monitoring and recovering the stuck queue */
@@ -231,7 +228,7 @@ struct iwl_txq {
spinlock_t lock;
struct timer_list stuck_timer;
struct iwl_trans_pcie *trans_pcie;
- u8 need_update;
+ bool need_update;
u8 active;
bool ampdu;
};
@@ -270,6 +267,9 @@ struct iwl_trans_pcie {
struct iwl_trans *trans;
struct iwl_drv *drv;
+ struct net_device napi_dev;
+ struct napi_struct napi;
+
/* INT ICT Table */
__le32 *ict_tbl;
dma_addr_t ict_tbl_dma;
@@ -362,7 +362,7 @@ void iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id, int fifo,
void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int queue);
int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
struct iwl_device_cmd *dev_cmd, int txq_id);
-void iwl_pcie_txq_inc_wr_ptr(struct iwl_trans *trans, struct iwl_txq *txq);
+void iwl_pcie_txq_check_wrptrs(struct iwl_trans *trans);
int iwl_trans_pcie_send_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd);
void iwl_pcie_hcmd_complete(struct iwl_trans *trans,
struct iwl_rx_cmd_buffer *rxb, int handler_status);
@@ -370,6 +370,13 @@ void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
struct sk_buff_head *skbs);
void iwl_trans_pcie_tx_reset(struct iwl_trans *trans);
+static inline u16 iwl_pcie_tfd_tb_get_len(struct iwl_tfd *tfd, u8 idx)
+{
+ struct iwl_tfd_tb *tb = &tfd->tbs[idx];
+
+ return le16_to_cpu(tb->hi_n_len) >> 4;
+}
+
/*****************************************************
* Error handling
******************************************************/
diff --git a/drivers/net/wireless/iwlwifi/pcie/rx.c b/drivers/net/wireless/iwlwifi/pcie/rx.c
index fdfa3969cac9..a2698e5e062c 100644
--- a/drivers/net/wireless/iwlwifi/pcie/rx.c
+++ b/drivers/net/wireless/iwlwifi/pcie/rx.c
@@ -145,15 +145,13 @@ int iwl_pcie_rx_stop(struct iwl_trans *trans)
/*
* iwl_pcie_rxq_inc_wr_ptr - Update the write pointer for the RX queue
*/
-static void iwl_pcie_rxq_inc_wr_ptr(struct iwl_trans *trans,
- struct iwl_rxq *rxq)
+static void iwl_pcie_rxq_inc_wr_ptr(struct iwl_trans *trans)
{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct iwl_rxq *rxq = &trans_pcie->rxq;
u32 reg;
- spin_lock(&rxq->lock);
-
- if (rxq->need_update == 0)
- goto exit_unlock;
+ lockdep_assert_held(&rxq->lock);
/*
* explicitly wake up the NIC if:
@@ -169,13 +167,27 @@ static void iwl_pcie_rxq_inc_wr_ptr(struct iwl_trans *trans,
reg);
iwl_set_bit(trans, CSR_GP_CNTRL,
CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
- goto exit_unlock;
+ rxq->need_update = true;
+ return;
}
}
rxq->write_actual = round_down(rxq->write, 8);
iwl_write32(trans, FH_RSCSR_CHNL0_WPTR, rxq->write_actual);
- rxq->need_update = 0;
+}
+
+static void iwl_pcie_rxq_check_wrptr(struct iwl_trans *trans)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct iwl_rxq *rxq = &trans_pcie->rxq;
+
+ spin_lock(&rxq->lock);
+
+ if (!rxq->need_update)
+ goto exit_unlock;
+
+ iwl_pcie_rxq_inc_wr_ptr(trans);
+ rxq->need_update = false;
exit_unlock:
spin_unlock(&rxq->lock);
@@ -236,9 +248,8 @@ static void iwl_pcie_rxq_restock(struct iwl_trans *trans)
* Increment device's write pointer in multiples of 8. */
if (rxq->write_actual != (rxq->write & ~0x7)) {
spin_lock(&rxq->lock);
- rxq->need_update = 1;
+ iwl_pcie_rxq_inc_wr_ptr(trans);
spin_unlock(&rxq->lock);
- iwl_pcie_rxq_inc_wr_ptr(trans, rxq);
}
}
@@ -362,20 +373,9 @@ static void iwl_pcie_rxq_free_rbs(struct iwl_trans *trans)
* Also restock the Rx queue via iwl_pcie_rxq_restock.
* This is called as a scheduled work item (except for during initialization)
*/
-static void iwl_pcie_rx_replenish(struct iwl_trans *trans)
-{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
-
- iwl_pcie_rxq_alloc_rbs(trans, GFP_KERNEL);
-
- spin_lock(&trans_pcie->irq_lock);
- iwl_pcie_rxq_restock(trans);
- spin_unlock(&trans_pcie->irq_lock);
-}
-
-static void iwl_pcie_rx_replenish_now(struct iwl_trans *trans)
+static void iwl_pcie_rx_replenish(struct iwl_trans *trans, gfp_t gfp)
{
- iwl_pcie_rxq_alloc_rbs(trans, GFP_ATOMIC);
+ iwl_pcie_rxq_alloc_rbs(trans, gfp);
iwl_pcie_rxq_restock(trans);
}
@@ -385,7 +385,7 @@ static void iwl_pcie_rx_replenish_work(struct work_struct *data)
struct iwl_trans_pcie *trans_pcie =
container_of(data, struct iwl_trans_pcie, rx_replenish);
- iwl_pcie_rx_replenish(trans_pcie->trans);
+ iwl_pcie_rx_replenish(trans_pcie->trans, GFP_KERNEL);
}
static int iwl_pcie_rx_alloc(struct iwl_trans *trans)
@@ -521,14 +521,13 @@ int iwl_pcie_rx_init(struct iwl_trans *trans)
memset(rxq->rb_stts, 0, sizeof(*rxq->rb_stts));
spin_unlock(&rxq->lock);
- iwl_pcie_rx_replenish(trans);
+ iwl_pcie_rx_replenish(trans, GFP_KERNEL);
iwl_pcie_rx_hw_init(trans, rxq);
- spin_lock(&trans_pcie->irq_lock);
- rxq->need_update = 1;
- iwl_pcie_rxq_inc_wr_ptr(trans, rxq);
- spin_unlock(&trans_pcie->irq_lock);
+ spin_lock(&rxq->lock);
+ iwl_pcie_rxq_inc_wr_ptr(trans);
+ spin_unlock(&rxq->lock);
return 0;
}
@@ -673,7 +672,6 @@ static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans,
/* Reuse the page if possible. For notification packets and
* SKBs that fail to Rx correctly, add them back into the
* rx_free list for reuse later. */
- spin_lock(&rxq->lock);
if (rxb->page != NULL) {
rxb->page_dma =
dma_map_page(trans->dev, rxb->page, 0,
@@ -694,7 +692,6 @@ static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans,
}
} else
list_add_tail(&rxb->list, &rxq->rx_used);
- spin_unlock(&rxq->lock);
}
/*
@@ -709,6 +706,8 @@ static void iwl_pcie_rx_handle(struct iwl_trans *trans)
u32 count = 8;
int total_empty;
+restart:
+ spin_lock(&rxq->lock);
/* uCode's read index (stored in shared DRAM) indicates the last Rx
* buffer that the driver may process (last buffer filled by ucode). */
r = le16_to_cpu(ACCESS_ONCE(rxq->rb_stts->closed_rb_num)) & 0x0FFF;
@@ -743,18 +742,25 @@ static void iwl_pcie_rx_handle(struct iwl_trans *trans)
count++;
if (count >= 8) {
rxq->read = i;
- iwl_pcie_rx_replenish_now(trans);
+ spin_unlock(&rxq->lock);
+ iwl_pcie_rx_replenish(trans, GFP_ATOMIC);
count = 0;
+ goto restart;
}
}
}
/* Backtrack one entry */
rxq->read = i;
+ spin_unlock(&rxq->lock);
+
if (fill_rx)
- iwl_pcie_rx_replenish_now(trans);
+ iwl_pcie_rx_replenish(trans, GFP_ATOMIC);
else
iwl_pcie_rxq_restock(trans);
+
+ if (trans_pcie->napi.poll)
+ napi_gro_flush(&trans_pcie->napi, false);
}
/*
@@ -844,7 +850,7 @@ static u32 iwl_pcie_int_cause_ict(struct iwl_trans *trans)
trans_pcie->ict_index, read);
trans_pcie->ict_tbl[trans_pcie->ict_index] = 0;
trans_pcie->ict_index =
- iwl_queue_inc_wrap(trans_pcie->ict_index, ICT_COUNT);
+ ((trans_pcie->ict_index + 1) & (ICT_COUNT - 1));
read = le32_to_cpu(trans_pcie->ict_tbl[trans_pcie->ict_index]);
trace_iwlwifi_dev_ict_read(trans->dev, trans_pcie->ict_index,
@@ -876,7 +882,6 @@ irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id)
struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
u32 inta = 0;
u32 handled = 0;
- u32 i;
lock_map_acquire(&trans->sync_cmd_lockdep_map);
@@ -1028,9 +1033,8 @@ irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id)
/* uCode wakes up after power-down sleep */
if (inta & CSR_INT_BIT_WAKEUP) {
IWL_DEBUG_ISR(trans, "Wakeup interrupt\n");
- iwl_pcie_rxq_inc_wr_ptr(trans, &trans_pcie->rxq);
- for (i = 0; i < trans->cfg->base_params->num_of_queues; i++)
- iwl_pcie_txq_inc_wr_ptr(trans, &trans_pcie->txq[i]);
+ iwl_pcie_rxq_check_wrptr(trans);
+ iwl_pcie_txq_check_wrptrs(trans);
isr_stats->wakeup++;
@@ -1068,8 +1072,6 @@ irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id)
iwl_write8(trans, CSR_INT_PERIODIC_REG,
CSR_INT_PERIODIC_DIS);
- iwl_pcie_rx_handle(trans);
-
/*
* Enable periodic interrupt in 8 msec only if we received
* real RX interrupt (instead of just periodic int), to catch
@@ -1082,6 +1084,10 @@ irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id)
CSR_INT_PERIODIC_ENA);
isr_stats->rx++;
+
+ local_bh_disable();
+ iwl_pcie_rx_handle(trans);
+ local_bh_enable();
}
/* This "Tx" DMA channel is used only for loading uCode */
diff --git a/drivers/net/wireless/iwlwifi/pcie/trans.c b/drivers/net/wireless/iwlwifi/pcie/trans.c
index 2365553f1ef7..788085bc65d7 100644
--- a/drivers/net/wireless/iwlwifi/pcie/trans.c
+++ b/drivers/net/wireless/iwlwifi/pcie/trans.c
@@ -73,6 +73,7 @@
#include "iwl-csr.h"
#include "iwl-prph.h"
#include "iwl-agn-hw.h"
+#include "iwl-fw-error-dump.h"
#include "internal.h"
static u32 iwl_trans_pcie_read_shr(struct iwl_trans *trans, u32 reg)
@@ -103,7 +104,6 @@ static void iwl_pcie_set_pwr(struct iwl_trans *trans, bool vaux)
/* PCI registers */
#define PCI_CFG_RETRY_TIMEOUT 0x041
-#define CPU1_CPU2_SEPARATOR_SECTION 0xFFFFCCCC
static void iwl_pcie_apm_config(struct iwl_trans *trans)
{
@@ -454,6 +454,7 @@ static int iwl_pcie_prepare_card_hw(struct iwl_trans *trans)
{
int ret;
int t = 0;
+ int iter;
IWL_DEBUG_INFO(trans, "iwl_trans_prepare_card_hw enter\n");
@@ -462,18 +463,23 @@ static int iwl_pcie_prepare_card_hw(struct iwl_trans *trans)
if (ret >= 0)
return 0;
- /* If HW is not ready, prepare the conditions to check again */
- iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
- CSR_HW_IF_CONFIG_REG_PREPARE);
+ for (iter = 0; iter < 10; iter++) {
+ /* If HW is not ready, prepare the conditions to check again */
+ iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
+ CSR_HW_IF_CONFIG_REG_PREPARE);
+
+ do {
+ ret = iwl_pcie_set_hw_ready(trans);
+ if (ret >= 0)
+ return 0;
- do {
- ret = iwl_pcie_set_hw_ready(trans);
- if (ret >= 0)
- return 0;
+ usleep_range(200, 1000);
+ t += 200;
+ } while (t < 150000);
+ msleep(25);
+ }
- usleep_range(200, 1000);
- t += 200;
- } while (t < 150000);
+ IWL_DEBUG_INFO(trans, "got NIC after %d iterations\n", iter);
return ret;
}
@@ -1053,6 +1059,12 @@ static void iwl_trans_pcie_write_prph(struct iwl_trans *trans, u32 addr,
iwl_trans_pcie_write32(trans, HBUS_TARG_PRPH_WDAT, val);
}
+static int iwl_pcie_dummy_napi_poll(struct napi_struct *napi, int budget)
+{
+ WARN_ON(1);
+ return 0;
+}
+
static void iwl_trans_pcie_configure(struct iwl_trans *trans,
const struct iwl_trans_config *trans_cfg)
{
@@ -1079,6 +1091,18 @@ static void iwl_trans_pcie_configure(struct iwl_trans *trans,
trans_pcie->command_names = trans_cfg->command_names;
trans_pcie->bc_table_dword = trans_cfg->bc_table_dword;
+
+ /* Initialize NAPI here - it should be before registering to mac80211
+ * in the opmode but after the HW struct is allocated.
+ * As this function may be called again in some corner cases don't
+ * do anything if NAPI was already initialized.
+ */
+ if (!trans_pcie->napi.poll && trans->op_mode->ops->napi_add) {
+ init_dummy_netdev(&trans_pcie->napi_dev);
+ iwl_op_mode_napi_add(trans->op_mode, &trans_pcie->napi,
+ &trans_pcie->napi_dev,
+ iwl_pcie_dummy_napi_poll, 64);
+ }
}
void iwl_trans_pcie_free(struct iwl_trans *trans)
@@ -1099,6 +1123,9 @@ void iwl_trans_pcie_free(struct iwl_trans *trans)
pci_disable_device(trans_pcie->pci_dev);
kmem_cache_destroy(trans->dev_cmd_pool);
+ if (trans_pcie->napi.poll)
+ netif_napi_del(&trans_pcie->napi);
+
kfree(trans);
}
@@ -1237,7 +1264,7 @@ static int iwl_trans_pcie_write_mem(struct iwl_trans *trans, u32 addr,
#define IWL_FLUSH_WAIT_MS 2000
-static int iwl_trans_pcie_wait_txq_empty(struct iwl_trans *trans)
+static int iwl_trans_pcie_wait_txq_empty(struct iwl_trans *trans, u32 txq_bm)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_txq *txq;
@@ -1250,13 +1277,31 @@ static int iwl_trans_pcie_wait_txq_empty(struct iwl_trans *trans)
/* waiting for all the tx frames complete might take a while */
for (cnt = 0; cnt < trans->cfg->base_params->num_of_queues; cnt++) {
+ u8 wr_ptr;
+
if (cnt == trans_pcie->cmd_queue)
continue;
+ if (!test_bit(cnt, trans_pcie->queue_used))
+ continue;
+ if (!(BIT(cnt) & txq_bm))
+ continue;
+
+ IWL_DEBUG_TX_QUEUES(trans, "Emptying queue %d...\n", cnt);
txq = &trans_pcie->txq[cnt];
q = &txq->q;
- while (q->read_ptr != q->write_ptr && !time_after(jiffies,
- now + msecs_to_jiffies(IWL_FLUSH_WAIT_MS)))
+ wr_ptr = ACCESS_ONCE(q->write_ptr);
+
+ while (q->read_ptr != ACCESS_ONCE(q->write_ptr) &&
+ !time_after(jiffies,
+ now + msecs_to_jiffies(IWL_FLUSH_WAIT_MS))) {
+ u8 write_ptr = ACCESS_ONCE(q->write_ptr);
+
+ if (WARN_ONCE(wr_ptr != write_ptr,
+ "WR pointer moved while flushing %d -> %d\n",
+ wr_ptr, write_ptr))
+ return -ETIMEDOUT;
msleep(1);
+ }
if (q->read_ptr != q->write_ptr) {
IWL_ERR(trans,
@@ -1264,6 +1309,7 @@ static int iwl_trans_pcie_wait_txq_empty(struct iwl_trans *trans)
ret = -ETIMEDOUT;
break;
}
+ IWL_DEBUG_TX_QUEUES(trans, "Queue %d is now empty.\n", cnt);
}
if (!ret)
@@ -1298,8 +1344,8 @@ static int iwl_trans_pcie_wait_txq_empty(struct iwl_trans *trans)
IWL_ERR(trans,
"Q %d is %sactive and mapped to fifo %d ra_tid 0x%04x [%d,%d]\n",
cnt, active ? "" : "in", fifo, tbl_dw,
- iwl_read_prph(trans,
- SCD_QUEUE_RDPTR(cnt)) & (txq->q.n_bd - 1),
+ iwl_read_prph(trans, SCD_QUEUE_RDPTR(cnt)) &
+ (TFD_QUEUE_SIZE_MAX - 1),
iwl_read_prph(trans, SCD_QUEUE_WRPTR(cnt)));
}
@@ -1630,6 +1676,61 @@ err:
IWL_ERR(trans, "failed to create the trans debugfs entry\n");
return -ENOMEM;
}
+
+static u32 iwl_trans_pcie_get_cmdlen(struct iwl_tfd *tfd)
+{
+ u32 cmdlen = 0;
+ int i;
+
+ for (i = 0; i < IWL_NUM_OF_TBS; i++)
+ cmdlen += iwl_pcie_tfd_tb_get_len(tfd, i);
+
+ return cmdlen;
+}
+
+static u32 iwl_trans_pcie_dump_data(struct iwl_trans *trans,
+ void *buf, u32 buflen)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct iwl_fw_error_dump_data *data;
+ struct iwl_txq *cmdq = &trans_pcie->txq[trans_pcie->cmd_queue];
+ struct iwl_fw_error_dump_txcmd *txcmd;
+ u32 len;
+ int i, ptr;
+
+ if (!buf)
+ return sizeof(*data) +
+ cmdq->q.n_window * (sizeof(*txcmd) +
+ TFD_MAX_PAYLOAD_SIZE);
+
+ len = 0;
+ data = buf;
+ data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_TXCMD);
+ txcmd = (void *)data->data;
+ spin_lock_bh(&cmdq->lock);
+ ptr = cmdq->q.write_ptr;
+ for (i = 0; i < cmdq->q.n_window; i++) {
+ u8 idx = get_cmd_index(&cmdq->q, ptr);
+ u32 caplen, cmdlen;
+
+ cmdlen = iwl_trans_pcie_get_cmdlen(&cmdq->tfds[ptr]);
+ caplen = min_t(u32, TFD_MAX_PAYLOAD_SIZE, cmdlen);
+
+ if (cmdlen) {
+ len += sizeof(*txcmd) + caplen;
+ txcmd->cmdlen = cpu_to_le32(cmdlen);
+ txcmd->caplen = cpu_to_le32(caplen);
+ memcpy(txcmd->data, cmdq->entries[idx].cmd, caplen);
+ txcmd = (void *)((u8 *)txcmd->data + caplen);
+ }
+
+ ptr = iwl_queue_dec_wrap(ptr);
+ }
+ spin_unlock_bh(&cmdq->lock);
+
+ data->len = cpu_to_le32(len);
+ return sizeof(*data) + len;
+}
#else
static int iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans,
struct dentry *dir)
@@ -1672,6 +1773,10 @@ static const struct iwl_trans_ops trans_ops_pcie = {
.grab_nic_access = iwl_trans_pcie_grab_nic_access,
.release_nic_access = iwl_trans_pcie_release_nic_access,
.set_bits_mask = iwl_trans_pcie_set_bits_mask,
+
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+ .dump_data = iwl_trans_pcie_dump_data,
+#endif
};
struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
diff --git a/drivers/net/wireless/iwlwifi/pcie/tx.c b/drivers/net/wireless/iwlwifi/pcie/tx.c
index 3b0c72c10054..038940afbdc5 100644
--- a/drivers/net/wireless/iwlwifi/pcie/tx.c
+++ b/drivers/net/wireless/iwlwifi/pcie/tx.c
@@ -70,20 +70,20 @@ static int iwl_queue_space(const struct iwl_queue *q)
/*
* To avoid ambiguity between empty and completely full queues, there
- * should always be less than q->n_bd elements in the queue.
- * If q->n_window is smaller than q->n_bd, there is no need to reserve
- * any queue entries for this purpose.
+ * should always be less than TFD_QUEUE_SIZE_MAX elements in the queue.
+ * If q->n_window is smaller than TFD_QUEUE_SIZE_MAX, there is no need
+ * to reserve any queue entries for this purpose.
*/
- if (q->n_window < q->n_bd)
+ if (q->n_window < TFD_QUEUE_SIZE_MAX)
max = q->n_window;
else
- max = q->n_bd - 1;
+ max = TFD_QUEUE_SIZE_MAX - 1;
/*
- * q->n_bd is a power of 2, so the following is equivalent to modulo by
- * q->n_bd and is well defined for negative dividends.
+ * TFD_QUEUE_SIZE_MAX is a power of 2, so the following is equivalent to
+ * modulo by TFD_QUEUE_SIZE_MAX and is well defined.
*/
- used = (q->write_ptr - q->read_ptr) & (q->n_bd - 1);
+ used = (q->write_ptr - q->read_ptr) & (TFD_QUEUE_SIZE_MAX - 1);
if (WARN_ON(used > max))
return 0;
@@ -94,17 +94,11 @@ static int iwl_queue_space(const struct iwl_queue *q)
/*
* iwl_queue_init - Initialize queue's high/low-water and read/write indexes
*/
-static int iwl_queue_init(struct iwl_queue *q, int count, int slots_num, u32 id)
+static int iwl_queue_init(struct iwl_queue *q, int slots_num, u32 id)
{
- q->n_bd = count;
q->n_window = slots_num;
q->id = id;
- /* count must be power-of-two size, otherwise iwl_queue_inc_wrap
- * and iwl_queue_dec_wrap are broken. */
- if (WARN_ON(!is_power_of_2(count)))
- return -EINVAL;
-
/* slots_num must be power-of-two size, otherwise
* get_cmd_index is broken. */
if (WARN_ON(!is_power_of_2(slots_num)))
@@ -197,17 +191,17 @@ static void iwl_pcie_txq_stuck_timer(unsigned long data)
IWL_ERR(trans,
"Q %d is %sactive and mapped to fifo %d ra_tid 0x%04x [%d,%d]\n",
i, active ? "" : "in", fifo, tbl_dw,
- iwl_read_prph(trans,
- SCD_QUEUE_RDPTR(i)) & (txq->q.n_bd - 1),
+ iwl_read_prph(trans, SCD_QUEUE_RDPTR(i)) &
+ (TFD_QUEUE_SIZE_MAX - 1),
iwl_read_prph(trans, SCD_QUEUE_WRPTR(i)));
}
for (i = q->read_ptr; i != q->write_ptr;
- i = iwl_queue_inc_wrap(i, q->n_bd))
+ i = iwl_queue_inc_wrap(i))
IWL_ERR(trans, "scratch %d = 0x%08x\n", i,
le32_to_cpu(txq->scratchbufs[i].scratch));
- iwl_write_prph(trans, DEVICE_SET_NMI_REG, 1);
+ iwl_force_nmi(trans);
}
/*
@@ -287,14 +281,14 @@ static void iwl_pcie_txq_inval_byte_cnt_tbl(struct iwl_trans *trans,
/*
* iwl_pcie_txq_inc_wr_ptr - Send new write index to hardware
*/
-void iwl_pcie_txq_inc_wr_ptr(struct iwl_trans *trans, struct iwl_txq *txq)
+static void iwl_pcie_txq_inc_wr_ptr(struct iwl_trans *trans,
+ struct iwl_txq *txq)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
u32 reg = 0;
int txq_id = txq->q.id;
- if (txq->need_update == 0)
- return;
+ lockdep_assert_held(&txq->lock);
/*
* explicitly wake up the NIC if:
@@ -317,6 +311,7 @@ void iwl_pcie_txq_inc_wr_ptr(struct iwl_trans *trans, struct iwl_txq *txq)
txq_id, reg);
iwl_set_bit(trans, CSR_GP_CNTRL,
CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
+ txq->need_update = true;
return;
}
}
@@ -327,8 +322,23 @@ void iwl_pcie_txq_inc_wr_ptr(struct iwl_trans *trans, struct iwl_txq *txq)
*/
IWL_DEBUG_TX(trans, "Q:%d WR: 0x%x\n", txq_id, txq->q.write_ptr);
iwl_write32(trans, HBUS_TARG_WRPTR, txq->q.write_ptr | (txq_id << 8));
+}
+
+void iwl_pcie_txq_check_wrptrs(struct iwl_trans *trans)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ int i;
+
+ for (i = 0; i < trans->cfg->base_params->num_of_queues; i++) {
+ struct iwl_txq *txq = &trans_pcie->txq[i];
- txq->need_update = 0;
+ spin_lock_bh(&txq->lock);
+ if (trans_pcie->txq[i].need_update) {
+ iwl_pcie_txq_inc_wr_ptr(trans, txq);
+ trans_pcie->txq[i].need_update = false;
+ }
+ spin_unlock_bh(&txq->lock);
+ }
}
static inline dma_addr_t iwl_pcie_tfd_tb_get_addr(struct iwl_tfd *tfd, u8 idx)
@@ -343,13 +353,6 @@ static inline dma_addr_t iwl_pcie_tfd_tb_get_addr(struct iwl_tfd *tfd, u8 idx)
return addr;
}
-static inline u16 iwl_pcie_tfd_tb_get_len(struct iwl_tfd *tfd, u8 idx)
-{
- struct iwl_tfd_tb *tb = &tfd->tbs[idx];
-
- return le16_to_cpu(tb->hi_n_len) >> 4;
-}
-
static inline void iwl_pcie_tfd_set_tb(struct iwl_tfd *tfd, u8 idx,
dma_addr_t addr, u16 len)
{
@@ -409,13 +412,17 @@ static void iwl_pcie_txq_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq)
{
struct iwl_tfd *tfd_tmp = txq->tfds;
- /* rd_ptr is bounded by n_bd and idx is bounded by n_window */
+ /* rd_ptr is bounded by TFD_QUEUE_SIZE_MAX and
+ * idx is bounded by n_window
+ */
int rd_ptr = txq->q.read_ptr;
int idx = get_cmd_index(&txq->q, rd_ptr);
lockdep_assert_held(&txq->lock);
- /* We have only q->n_window txq->entries, but we use q->n_bd tfds */
+ /* We have only q->n_window txq->entries, but we use
+ * TFD_QUEUE_SIZE_MAX tfds
+ */
iwl_pcie_tfd_unmap(trans, &txq->entries[idx].meta, &tfd_tmp[rd_ptr]);
/* free SKB */
@@ -436,7 +443,7 @@ static void iwl_pcie_txq_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq)
}
static int iwl_pcie_txq_build_tfd(struct iwl_trans *trans, struct iwl_txq *txq,
- dma_addr_t addr, u16 len, u8 reset)
+ dma_addr_t addr, u16 len, bool reset)
{
struct iwl_queue *q;
struct iwl_tfd *tfd, *tfd_tmp;
@@ -542,15 +549,14 @@ static int iwl_pcie_txq_init(struct iwl_trans *trans, struct iwl_txq *txq,
{
int ret;
- txq->need_update = 0;
+ txq->need_update = false;
/* TFD_QUEUE_SIZE_MAX must be power-of-two size, otherwise
* iwl_queue_inc_wrap and iwl_queue_dec_wrap are broken. */
BUILD_BUG_ON(TFD_QUEUE_SIZE_MAX & (TFD_QUEUE_SIZE_MAX - 1));
/* Initialize queue's high/low-water marks, and head/tail indexes */
- ret = iwl_queue_init(&txq->q, TFD_QUEUE_SIZE_MAX, slots_num,
- txq_id);
+ ret = iwl_queue_init(&txq->q, slots_num, txq_id);
if (ret)
return ret;
@@ -575,15 +581,12 @@ static void iwl_pcie_txq_unmap(struct iwl_trans *trans, int txq_id)
struct iwl_txq *txq = &trans_pcie->txq[txq_id];
struct iwl_queue *q = &txq->q;
- if (!q->n_bd)
- return;
-
spin_lock_bh(&txq->lock);
while (q->write_ptr != q->read_ptr) {
IWL_DEBUG_TX_REPLY(trans, "Q %d Free %d\n",
txq_id, q->read_ptr);
iwl_pcie_txq_free_tfd(trans, txq);
- q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd);
+ q->read_ptr = iwl_queue_inc_wrap(q->read_ptr);
}
txq->active = false;
spin_unlock_bh(&txq->lock);
@@ -620,10 +623,12 @@ static void iwl_pcie_txq_free(struct iwl_trans *trans, int txq_id)
}
/* De-alloc circular buffer of TFDs */
- if (txq->q.n_bd) {
- dma_free_coherent(dev, sizeof(struct iwl_tfd) *
- txq->q.n_bd, txq->tfds, txq->q.dma_addr);
+ if (txq->tfds) {
+ dma_free_coherent(dev,
+ sizeof(struct iwl_tfd) * TFD_QUEUE_SIZE_MAX,
+ txq->tfds, txq->q.dma_addr);
txq->q.dma_addr = 0;
+ txq->tfds = NULL;
dma_free_coherent(dev,
sizeof(*txq->scratchbufs) * txq->q.n_window,
@@ -680,7 +685,8 @@ void iwl_pcie_tx_start(struct iwl_trans *trans, u32 scd_base_addr)
/* The chain extension of the SCD doesn't work well. This feature is
* enabled by default by the HW, so we need to disable it manually.
*/
- iwl_write_prph(trans, SCD_CHAINEXT_EN, 0);
+ if (trans->cfg->base_params->scd_chain_ext_wa)
+ iwl_write_prph(trans, SCD_CHAINEXT_EN, 0);
iwl_trans_ac_txq_enable(trans, trans_pcie->cmd_queue,
trans_pcie->cmd_fifo);
@@ -931,8 +937,7 @@ void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_txq *txq = &trans_pcie->txq[txq_id];
- /* n_bd is usually 256 => n_bd - 1 = 0xff */
- int tfd_num = ssn & (txq->q.n_bd - 1);
+ int tfd_num = ssn & (TFD_QUEUE_SIZE_MAX - 1);
struct iwl_queue *q = &txq->q;
int last_to_free;
@@ -956,12 +961,12 @@ void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
/*Since we free until index _not_ inclusive, the one before index is
* the last we will free. This one must be used */
- last_to_free = iwl_queue_dec_wrap(tfd_num, q->n_bd);
+ last_to_free = iwl_queue_dec_wrap(tfd_num);
if (!iwl_queue_used(q, last_to_free)) {
IWL_ERR(trans,
"%s: Read index for DMA queue txq id (%d), last_to_free %d is out of range [0-%d] %d %d.\n",
- __func__, txq_id, last_to_free, q->n_bd,
+ __func__, txq_id, last_to_free, TFD_QUEUE_SIZE_MAX,
q->write_ptr, q->read_ptr);
goto out;
}
@@ -971,7 +976,7 @@ void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
for (;
q->read_ptr != tfd_num;
- q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) {
+ q->read_ptr = iwl_queue_inc_wrap(q->read_ptr)) {
if (WARN_ON_ONCE(txq->entries[txq->q.read_ptr].skb == NULL))
continue;
@@ -1010,25 +1015,26 @@ static void iwl_pcie_cmdq_reclaim(struct iwl_trans *trans, int txq_id, int idx)
lockdep_assert_held(&txq->lock);
- if ((idx >= q->n_bd) || (!iwl_queue_used(q, idx))) {
+ if ((idx >= TFD_QUEUE_SIZE_MAX) || (!iwl_queue_used(q, idx))) {
IWL_ERR(trans,
"%s: Read index for DMA queue txq id (%d), index %d is out of range [0-%d] %d %d.\n",
- __func__, txq_id, idx, q->n_bd,
+ __func__, txq_id, idx, TFD_QUEUE_SIZE_MAX,
q->write_ptr, q->read_ptr);
return;
}
- for (idx = iwl_queue_inc_wrap(idx, q->n_bd); q->read_ptr != idx;
- q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) {
+ for (idx = iwl_queue_inc_wrap(idx); q->read_ptr != idx;
+ q->read_ptr = iwl_queue_inc_wrap(q->read_ptr)) {
if (nfreed++ > 0) {
IWL_ERR(trans, "HCMD skipped: index (%d) %d %d\n",
idx, q->write_ptr, q->read_ptr);
- iwl_write_prph(trans, DEVICE_SET_NMI_REG, 1);
+ iwl_force_nmi(trans);
}
}
- if (q->read_ptr == q->write_ptr) {
+ if (trans->cfg->base_params->apmg_wake_up_wa &&
+ q->read_ptr == q->write_ptr) {
spin_lock_irqsave(&trans_pcie->reg_lock, flags);
WARN_ON(!trans_pcie->cmd_in_flight);
trans_pcie->cmd_in_flight = false;
@@ -1309,28 +1315,39 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
cmd_pos = offsetof(struct iwl_device_cmd, payload);
copy_size = sizeof(out_cmd->hdr);
for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) {
- int copy = 0;
+ int copy;
if (!cmd->len[i])
continue;
- /* need at least IWL_HCMD_SCRATCHBUF_SIZE copied */
- if (copy_size < IWL_HCMD_SCRATCHBUF_SIZE) {
- copy = IWL_HCMD_SCRATCHBUF_SIZE - copy_size;
-
- if (copy > cmd->len[i])
- copy = cmd->len[i];
- }
-
/* copy everything if not nocopy/dup */
if (!(cmd->dataflags[i] & (IWL_HCMD_DFL_NOCOPY |
- IWL_HCMD_DFL_DUP)))
+ IWL_HCMD_DFL_DUP))) {
copy = cmd->len[i];
- if (copy) {
memcpy((u8 *)out_cmd + cmd_pos, cmd->data[i], copy);
cmd_pos += copy;
copy_size += copy;
+ continue;
+ }
+
+ /*
+ * Otherwise we need at least IWL_HCMD_SCRATCHBUF_SIZE copied
+ * in total (for the scratchbuf handling), but copy up to what
+ * we can fit into the payload for debug dump purposes.
+ */
+ copy = min_t(int, TFD_MAX_PAYLOAD_SIZE - cmd_pos, cmd->len[i]);
+
+ memcpy((u8 *)out_cmd + cmd_pos, cmd->data[i], copy);
+ cmd_pos += copy;
+
+ /* However, treat copy_size the proper way, we need it below */
+ if (copy_size < IWL_HCMD_SCRATCHBUF_SIZE) {
+ copy = IWL_HCMD_SCRATCHBUF_SIZE - copy_size;
+
+ if (copy > cmd->len[i])
+ copy = cmd->len[i];
+ copy_size += copy;
}
}
@@ -1345,7 +1362,7 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
memcpy(&txq->scratchbufs[q->write_ptr], &out_cmd->hdr, scratch_size);
iwl_pcie_txq_build_tfd(trans, txq,
iwl_pcie_get_scratchbuf_dma(txq, q->write_ptr),
- scratch_size, 1);
+ scratch_size, true);
/* map first command fragment, if any remains */
if (copy_size > scratch_size) {
@@ -1361,7 +1378,7 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
}
iwl_pcie_txq_build_tfd(trans, txq, phys_addr,
- copy_size - scratch_size, 0);
+ copy_size - scratch_size, false);
}
/* map the remaining (adjusted) nocopy/dup fragments */
@@ -1384,7 +1401,7 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
goto out;
}
- iwl_pcie_txq_build_tfd(trans, txq, phys_addr, cmdlen[i], 0);
+ iwl_pcie_txq_build_tfd(trans, txq, phys_addr, cmdlen[i], false);
}
out_meta->flags = cmd->flags;
@@ -1392,8 +1409,6 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
kfree(txq->entries[idx].free_buf);
txq->entries[idx].free_buf = dup_buf;
- txq->need_update = 1;
-
trace_iwlwifi_dev_hcmd(trans->dev, cmd, cmd_size, &out_cmd->hdr);
/* start timer if queue currently empty */
@@ -1405,9 +1420,11 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
/*
* wake up the NIC to make sure that the firmware will see the host
* command - we will let the NIC sleep once all the host commands
- * returned.
+ * returned. This needs to be done only on NICs that have
+ * apmg_wake_up_wa set.
*/
- if (!trans_pcie->cmd_in_flight) {
+ if (trans->cfg->base_params->apmg_wake_up_wa &&
+ !trans_pcie->cmd_in_flight) {
trans_pcie->cmd_in_flight = true;
__iwl_trans_pcie_set_bit(trans, CSR_GP_CNTRL,
CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
@@ -1427,7 +1444,7 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
}
/* Increment and update queue's write index */
- q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd);
+ q->write_ptr = iwl_queue_inc_wrap(q->write_ptr);
iwl_pcie_txq_inc_wr_ptr(trans, txq);
spin_unlock_irqrestore(&trans_pcie->reg_lock, flags);
@@ -1583,7 +1600,7 @@ static int iwl_pcie_send_hcmd_sync(struct iwl_trans *trans,
get_cmd_string(trans_pcie, cmd->id));
ret = -ETIMEDOUT;
- iwl_write_prph(trans, DEVICE_SET_NMI_REG, 1);
+ iwl_force_nmi(trans);
iwl_trans_fw_error(trans);
goto cancel;
@@ -1661,7 +1678,7 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
dma_addr_t tb0_phys, tb1_phys, scratch_phys;
void *tb1_addr;
u16 len, tb1_len, tb2_len;
- u8 wait_write_ptr = 0;
+ bool wait_write_ptr;
__le16 fc = hdr->frame_control;
u8 hdr_len = ieee80211_hdrlen(fc);
u16 wifi_seq;
@@ -1722,7 +1739,7 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
memcpy(&txq->scratchbufs[q->write_ptr], &dev_cmd->hdr,
IWL_HCMD_SCRATCHBUF_SIZE);
iwl_pcie_txq_build_tfd(trans, txq, tb0_phys,
- IWL_HCMD_SCRATCHBUF_SIZE, 1);
+ IWL_HCMD_SCRATCHBUF_SIZE, true);
/* there must be data left over for TB1 or this code must be changed */
BUILD_BUG_ON(sizeof(struct iwl_tx_cmd) < IWL_HCMD_SCRATCHBUF_SIZE);
@@ -1732,7 +1749,7 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
tb1_phys = dma_map_single(trans->dev, tb1_addr, tb1_len, DMA_TO_DEVICE);
if (unlikely(dma_mapping_error(trans->dev, tb1_phys)))
goto out_err;
- iwl_pcie_txq_build_tfd(trans, txq, tb1_phys, tb1_len, 0);
+ iwl_pcie_txq_build_tfd(trans, txq, tb1_phys, tb1_len, false);
/*
* Set up TFD's third entry to point directly to remainder
@@ -1748,7 +1765,7 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
&txq->tfds[q->write_ptr]);
goto out_err;
}
- iwl_pcie_txq_build_tfd(trans, txq, tb2_phys, tb2_len, 0);
+ iwl_pcie_txq_build_tfd(trans, txq, tb2_phys, tb2_len, false);
}
/* Set up entry for this TFD in Tx byte-count array */
@@ -1762,12 +1779,7 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
trace_iwlwifi_dev_tx_data(trans->dev, skb,
skb->data + hdr_len, tb2_len);
- if (!ieee80211_has_morefrags(fc)) {
- txq->need_update = 1;
- } else {
- wait_write_ptr = 1;
- txq->need_update = 0;
- }
+ wait_write_ptr = ieee80211_has_morefrags(fc);
/* start timer if queue currently empty */
if (txq->need_update && q->read_ptr == q->write_ptr &&
@@ -1775,22 +1787,19 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
mod_timer(&txq->stuck_timer, jiffies + trans_pcie->wd_timeout);
/* Tell device the write index *just past* this latest filled TFD */
- q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd);
- iwl_pcie_txq_inc_wr_ptr(trans, txq);
+ q->write_ptr = iwl_queue_inc_wrap(q->write_ptr);
+ if (!wait_write_ptr)
+ iwl_pcie_txq_inc_wr_ptr(trans, txq);
/*
* At this point the frame is "transmitted" successfully
- * and we will get a TX status notification eventually,
- * regardless of the value of ret. "ret" only indicates
- * whether or not we should update the write pointer.
+ * and we will get a TX status notification eventually.
*/
if (iwl_queue_space(q) < q->high_mark) {
- if (wait_write_ptr) {
- txq->need_update = 1;
+ if (wait_write_ptr)
iwl_pcie_txq_inc_wr_ptr(trans, txq);
- } else {
+ else
iwl_stop_queue(trans, txq);
- }
}
spin_unlock(&txq->lock);
return 0;
diff --git a/drivers/net/wireless/libertas/cfg.c b/drivers/net/wireless/libertas/cfg.c
index 54e344aed6e0..47a998d8f99e 100644
--- a/drivers/net/wireless/libertas/cfg.c
+++ b/drivers/net/wireless/libertas/cfg.c
@@ -1006,9 +1006,8 @@ struct cmd_key_material {
} __packed;
static int lbs_set_key_material(struct lbs_private *priv,
- int key_type,
- int key_info,
- u8 *key, u16 key_len)
+ int key_type, int key_info,
+ const u8 *key, u16 key_len)
{
struct cmd_key_material cmd;
int ret;
@@ -1610,7 +1609,7 @@ static int lbs_cfg_del_key(struct wiphy *wiphy, struct net_device *netdev,
*/
static int lbs_cfg_get_station(struct wiphy *wiphy, struct net_device *dev,
- u8 *mac, struct station_info *sinfo)
+ const u8 *mac, struct station_info *sinfo)
{
struct lbs_private *priv = wiphy_priv(wiphy);
s8 signal, noise;
diff --git a/drivers/net/wireless/libertas/defs.h b/drivers/net/wireless/libertas/defs.h
index ab966f08024a..407784aca627 100644
--- a/drivers/net/wireless/libertas/defs.h
+++ b/drivers/net/wireless/libertas/defs.h
@@ -90,7 +90,8 @@ do { if ((lbs_debug & (grp)) == (grp)) \
#define lbs_deb_cfg80211(fmt, args...) LBS_DEB_LL(LBS_DEB_CFG80211, " cfg80211", fmt, ##args)
#ifdef DEBUG
-static inline void lbs_deb_hex(unsigned int grp, const char *prompt, u8 *buf, int len)
+static inline void lbs_deb_hex(unsigned int grp, const char *prompt,
+ const u8 *buf, int len)
{
int i = 0;
diff --git a/drivers/net/wireless/libertas/rx.c b/drivers/net/wireless/libertas/rx.c
index c7366b07b568..e446fed7b345 100644
--- a/drivers/net/wireless/libertas/rx.c
+++ b/drivers/net/wireless/libertas/rx.c
@@ -71,8 +71,10 @@ int lbs_process_rxed_packet(struct lbs_private *priv, struct sk_buff *skb)
skb->ip_summed = CHECKSUM_NONE;
- if (priv->wdev->iftype == NL80211_IFTYPE_MONITOR)
- return process_rxed_802_11_packet(priv, skb);
+ if (priv->wdev->iftype == NL80211_IFTYPE_MONITOR) {
+ ret = process_rxed_802_11_packet(priv, skb);
+ goto done;
+ }
p_rx_pd = (struct rxpd *) skb->data;
p_rx_pkt = (struct rxpackethdr *) ((u8 *)p_rx_pd +
@@ -86,7 +88,7 @@ int lbs_process_rxed_packet(struct lbs_private *priv, struct sk_buff *skb)
if (skb->len < (ETH_HLEN + 8 + sizeof(struct rxpd))) {
lbs_deb_rx("rx err: frame received with bad length\n");
dev->stats.rx_length_errors++;
- ret = 0;
+ ret = -EINVAL;
dev_kfree_skb(skb);
goto done;
}
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
index 9d7a52f5a410..a312c653d116 100644
--- a/drivers/net/wireless/mac80211_hwsim.c
+++ b/drivers/net/wireless/mac80211_hwsim.c
@@ -1676,7 +1676,9 @@ static int mac80211_hwsim_ampdu_action(struct ieee80211_hw *hw,
return 0;
}
-static void mac80211_hwsim_flush(struct ieee80211_hw *hw, u32 queues, bool drop)
+static void mac80211_hwsim_flush(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ u32 queues, bool drop)
{
/* Not implemented, queues only on kernel side */
}
@@ -2056,6 +2058,7 @@ static int mac80211_hwsim_create_radio(int channels, const char *reg_alpha2,
WIPHY_FLAG_AP_UAPSD |
WIPHY_FLAG_HAS_CHANNEL_SWITCH;
hw->wiphy->features |= NL80211_FEATURE_ACTIVE_MONITOR;
+ hw->wiphy->features |= NL80211_FEATURE_AP_MODE_CHAN_WIDTH_CHANGE;
/* ask mac80211 to reserve space for magic */
hw->vif_data_size = sizeof(struct hwsim_vif_priv);
diff --git a/drivers/net/wireless/mwifiex/11ac.c b/drivers/net/wireless/mwifiex/11ac.c
index c92f27aa71ed..706831df1fa2 100644
--- a/drivers/net/wireless/mwifiex/11ac.c
+++ b/drivers/net/wireless/mwifiex/11ac.c
@@ -212,8 +212,7 @@ int mwifiex_cmd_append_11ac_tlv(struct mwifiex_private *priv,
sizeof(struct mwifiex_ie_types_header));
memcpy((u8 *)vht_op +
sizeof(struct mwifiex_ie_types_header),
- (u8 *)bss_desc->bcn_vht_oper +
- sizeof(struct ieee_types_header),
+ (u8 *)bss_desc->bcn_vht_oper,
le16_to_cpu(vht_op->header.len));
/* negotiate the channel width and central freq
diff --git a/drivers/net/wireless/mwifiex/11n.c b/drivers/net/wireless/mwifiex/11n.c
index d14ead8beca8..e1c2f67ae85e 100644
--- a/drivers/net/wireless/mwifiex/11n.c
+++ b/drivers/net/wireless/mwifiex/11n.c
@@ -345,8 +345,7 @@ mwifiex_cmd_append_11n_tlv(struct mwifiex_private *priv,
memcpy((u8 *) ht_info +
sizeof(struct mwifiex_ie_types_header),
- (u8 *) bss_desc->bcn_ht_oper +
- sizeof(struct ieee_types_header),
+ (u8 *)bss_desc->bcn_ht_oper,
le16_to_cpu(ht_info->header.len));
if (!(sband->ht_cap.cap &
@@ -750,3 +749,45 @@ void mwifiex_set_ba_params(struct mwifiex_private *priv)
return;
}
+
+u8 mwifiex_get_sec_chan_offset(int chan)
+{
+ u8 sec_offset;
+
+ switch (chan) {
+ case 36:
+ case 44:
+ case 52:
+ case 60:
+ case 100:
+ case 108:
+ case 116:
+ case 124:
+ case 132:
+ case 140:
+ case 149:
+ case 157:
+ sec_offset = IEEE80211_HT_PARAM_CHA_SEC_ABOVE;
+ break;
+ case 40:
+ case 48:
+ case 56:
+ case 64:
+ case 104:
+ case 112:
+ case 120:
+ case 128:
+ case 136:
+ case 144:
+ case 153:
+ case 161:
+ sec_offset = IEEE80211_HT_PARAM_CHA_SEC_BELOW;
+ break;
+ case 165:
+ default:
+ sec_offset = IEEE80211_HT_PARAM_CHA_SEC_NONE;
+ break;
+ }
+
+ return sec_offset;
+}
diff --git a/drivers/net/wireless/mwifiex/11n.h b/drivers/net/wireless/mwifiex/11n.h
index 40b007a00f4b..0b73fa08f5d4 100644
--- a/drivers/net/wireless/mwifiex/11n.h
+++ b/drivers/net/wireless/mwifiex/11n.h
@@ -63,6 +63,7 @@ int mwifiex_cmd_amsdu_aggr_ctrl(struct host_cmd_ds_command *cmd,
int cmd_action,
struct mwifiex_ds_11n_amsdu_aggr_ctrl *aa_ctrl);
void mwifiex_del_tx_ba_stream_tbl_by_ra(struct mwifiex_private *priv, u8 *ra);
+u8 mwifiex_get_sec_chan_offset(int chan);
static inline u8
mwifiex_is_station_ampdu_allowed(struct mwifiex_private *priv,
@@ -199,7 +200,7 @@ static inline int mwifiex_is_sta_11n_enabled(struct mwifiex_private *priv,
}
static inline u8
-mwifiex_tdls_peer_11n_enabled(struct mwifiex_private *priv, u8 *ra)
+mwifiex_tdls_peer_11n_enabled(struct mwifiex_private *priv, const u8 *ra)
{
struct mwifiex_sta_node *node = mwifiex_get_sta_entry(priv, ra);
if (node)
diff --git a/drivers/net/wireless/mwifiex/11n_aggr.c b/drivers/net/wireless/mwifiex/11n_aggr.c
index 63211707f939..5b32106182f8 100644
--- a/drivers/net/wireless/mwifiex/11n_aggr.c
+++ b/drivers/net/wireless/mwifiex/11n_aggr.c
@@ -100,6 +100,7 @@ mwifiex_11n_form_amsdu_txpd(struct mwifiex_private *priv,
struct sk_buff *skb)
{
struct txpd *local_tx_pd;
+ struct mwifiex_txinfo *tx_info = MWIFIEX_SKB_TXCB(skb);
skb_push(skb, sizeof(*local_tx_pd));
@@ -118,6 +119,9 @@ mwifiex_11n_form_amsdu_txpd(struct mwifiex_private *priv,
local_tx_pd->tx_pkt_length = cpu_to_le16(skb->len -
sizeof(*local_tx_pd));
+ if (tx_info->flags & MWIFIEX_BUF_FLAG_TDLS_PKT)
+ local_tx_pd->flags |= MWIFIEX_TXPD_FLAGS_TDLS_PACKET;
+
if (local_tx_pd->tx_control == 0)
/* TxCtrl set by user or default */
local_tx_pd->tx_control = cpu_to_le32(priv->pkt_tx_ctrl);
@@ -160,6 +164,7 @@ mwifiex_11n_aggregate_pkt(struct mwifiex_private *priv,
int pad = 0, ret;
struct mwifiex_tx_param tx_param;
struct txpd *ptx_pd = NULL;
+ struct timeval tv;
int headroom = adapter->iface_type == MWIFIEX_USB ? 0 : INTF_HEADER_LEN;
skb_src = skb_peek(&pra_list->skb_head);
@@ -182,8 +187,14 @@ mwifiex_11n_aggregate_pkt(struct mwifiex_private *priv,
tx_info_aggr->bss_type = tx_info_src->bss_type;
tx_info_aggr->bss_num = tx_info_src->bss_num;
+
+ if (tx_info_src->flags & MWIFIEX_BUF_FLAG_TDLS_PKT)
+ tx_info_aggr->flags |= MWIFIEX_BUF_FLAG_TDLS_PKT;
skb_aggr->priority = skb_src->priority;
+ do_gettimeofday(&tv);
+ skb_aggr->tstamp = timeval_to_ktime(tv);
+
do {
/* Check if AMSDU can accommodate this MSDU */
if (skb_tailroom(skb_aggr) < (skb_src->len + LLC_SNAP_LEN))
@@ -236,18 +247,11 @@ mwifiex_11n_aggregate_pkt(struct mwifiex_private *priv,
ret = adapter->if_ops.host_to_card(adapter, MWIFIEX_USB_EP_DATA,
skb_aggr, NULL);
} else {
- /*
- * Padding per MSDU will affect the length of next
- * packet and hence the exact length of next packet
- * is uncertain here.
- *
- * Also, aggregation of transmission buffer, while
- * downloading the data to the card, wont gain much
- * on the AMSDU packets as the AMSDU packets utilizes
- * the transmission buffer space to the maximum
- * (adapter->tx_buf_size).
- */
- tx_param.next_pkt_len = 0;
+ if (skb_src)
+ tx_param.next_pkt_len =
+ skb_src->len + sizeof(struct txpd);
+ else
+ tx_param.next_pkt_len = 0;
ret = adapter->if_ops.host_to_card(adapter, MWIFIEX_TYPE_DATA,
skb_aggr, &tx_param);
diff --git a/drivers/net/wireless/mwifiex/README b/drivers/net/wireless/mwifiex/README
index b9242c3dca43..3b55ce5690a5 100644
--- a/drivers/net/wireless/mwifiex/README
+++ b/drivers/net/wireless/mwifiex/README
@@ -200,4 +200,11 @@ getlog
cat getlog
+fw_dump
+ This command is used to dump firmware memory into files.
+ Separate file will be created for each memory segment.
+ Usage:
+
+ cat fw_dump
+
===============================================================================
diff --git a/drivers/net/wireless/mwifiex/cfg80211.c b/drivers/net/wireless/mwifiex/cfg80211.c
index 21ee27ab7b74..e95dec91a561 100644
--- a/drivers/net/wireless/mwifiex/cfg80211.c
+++ b/drivers/net/wireless/mwifiex/cfg80211.c
@@ -994,7 +994,7 @@ mwifiex_dump_station_info(struct mwifiex_private *priv,
*/
static int
mwifiex_cfg80211_get_station(struct wiphy *wiphy, struct net_device *dev,
- u8 *mac, struct station_info *sinfo)
+ const u8 *mac, struct station_info *sinfo)
{
struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev);
@@ -1270,7 +1270,7 @@ static int mwifiex_cfg80211_change_beacon(struct wiphy *wiphy,
*/
static int
mwifiex_cfg80211_del_station(struct wiphy *wiphy, struct net_device *dev,
- u8 *mac)
+ const u8 *mac)
{
struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev);
struct mwifiex_sta_node *sta_node;
@@ -2629,7 +2629,7 @@ static int mwifiex_cfg80211_set_coalesce(struct wiphy *wiphy,
*/
static int
mwifiex_cfg80211_tdls_mgmt(struct wiphy *wiphy, struct net_device *dev,
- u8 *peer, u8 action_code, u8 dialog_token,
+ const u8 *peer, u8 action_code, u8 dialog_token,
u16 status_code, u32 peer_capability,
const u8 *extra_ies, size_t extra_ies_len)
{
@@ -2701,7 +2701,7 @@ mwifiex_cfg80211_tdls_mgmt(struct wiphy *wiphy, struct net_device *dev,
static int
mwifiex_cfg80211_tdls_oper(struct wiphy *wiphy, struct net_device *dev,
- u8 *peer, enum nl80211_tdls_operation action)
+ const u8 *peer, enum nl80211_tdls_operation action)
{
struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev);
@@ -2748,9 +2748,8 @@ mwifiex_cfg80211_tdls_oper(struct wiphy *wiphy, struct net_device *dev,
}
static int
-mwifiex_cfg80211_add_station(struct wiphy *wiphy,
- struct net_device *dev,
- u8 *mac, struct station_parameters *params)
+mwifiex_cfg80211_add_station(struct wiphy *wiphy, struct net_device *dev,
+ const u8 *mac, struct station_parameters *params)
{
struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev);
@@ -2765,9 +2764,9 @@ mwifiex_cfg80211_add_station(struct wiphy *wiphy,
}
static int
-mwifiex_cfg80211_change_station(struct wiphy *wiphy,
- struct net_device *dev,
- u8 *mac, struct station_parameters *params)
+mwifiex_cfg80211_change_station(struct wiphy *wiphy, struct net_device *dev,
+ const u8 *mac,
+ struct station_parameters *params)
{
int ret;
struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev);
diff --git a/drivers/net/wireless/mwifiex/cmdevt.c b/drivers/net/wireless/mwifiex/cmdevt.c
index 1062c918a7bf..8dee6c86f4f1 100644
--- a/drivers/net/wireless/mwifiex/cmdevt.c
+++ b/drivers/net/wireless/mwifiex/cmdevt.c
@@ -955,8 +955,6 @@ mwifiex_cmd_timeout_func(unsigned long function_context)
adapter->cmd_wait_q.status = -ETIMEDOUT;
wake_up_interruptible(&adapter->cmd_wait_q.wait);
mwifiex_cancel_pending_ioctl(adapter);
- /* reset cmd_sent flag to unblock new commands */
- adapter->cmd_sent = false;
}
}
if (adapter->hw_status == MWIFIEX_HW_STATUS_INITIALIZING)
diff --git a/drivers/net/wireless/mwifiex/debugfs.c b/drivers/net/wireless/mwifiex/debugfs.c
index b8a49aad12fd..7b419bbcd544 100644
--- a/drivers/net/wireless/mwifiex/debugfs.c
+++ b/drivers/net/wireless/mwifiex/debugfs.c
@@ -257,6 +257,29 @@ free_and_exit:
}
/*
+ * Proc firmware dump read handler.
+ *
+ * This function is called when the 'fw_dump' file is opened for
+ * reading.
+ * This function dumps firmware memory in different files
+ * (ex. DTCM, ITCM, SQRAM etc.) based on the the segments for
+ * debugging.
+ */
+static ssize_t
+mwifiex_fw_dump_read(struct file *file, char __user *ubuf,
+ size_t count, loff_t *ppos)
+{
+ struct mwifiex_private *priv = file->private_data;
+
+ if (!priv->adapter->if_ops.fw_dump)
+ return -EIO;
+
+ priv->adapter->if_ops.fw_dump(priv->adapter);
+
+ return 0;
+}
+
+/*
* Proc getlog file read handler.
*
* This function is called when the 'getlog' file is opened for reading
@@ -699,6 +722,7 @@ static const struct file_operations mwifiex_dfs_##name##_fops = { \
MWIFIEX_DFS_FILE_READ_OPS(info);
MWIFIEX_DFS_FILE_READ_OPS(debug);
MWIFIEX_DFS_FILE_READ_OPS(getlog);
+MWIFIEX_DFS_FILE_READ_OPS(fw_dump);
MWIFIEX_DFS_FILE_OPS(regrdwr);
MWIFIEX_DFS_FILE_OPS(rdeeprom);
@@ -722,6 +746,7 @@ mwifiex_dev_debugfs_init(struct mwifiex_private *priv)
MWIFIEX_DFS_ADD_FILE(getlog);
MWIFIEX_DFS_ADD_FILE(regrdwr);
MWIFIEX_DFS_ADD_FILE(rdeeprom);
+ MWIFIEX_DFS_ADD_FILE(fw_dump);
}
/*
diff --git a/drivers/net/wireless/mwifiex/decl.h b/drivers/net/wireless/mwifiex/decl.h
index e7b3e16e5d34..38da6ff6f416 100644
--- a/drivers/net/wireless/mwifiex/decl.h
+++ b/drivers/net/wireless/mwifiex/decl.h
@@ -42,12 +42,12 @@
#define MWIFIEX_MAX_TX_BASTREAM_SUPPORTED 2
#define MWIFIEX_MAX_RX_BASTREAM_SUPPORTED 16
-#define MWIFIEX_STA_AMPDU_DEF_TXWINSIZE 16
-#define MWIFIEX_STA_AMPDU_DEF_RXWINSIZE 32
+#define MWIFIEX_STA_AMPDU_DEF_TXWINSIZE 64
+#define MWIFIEX_STA_AMPDU_DEF_RXWINSIZE 64
#define MWIFIEX_UAP_AMPDU_DEF_TXWINSIZE 32
#define MWIFIEX_UAP_AMPDU_DEF_RXWINSIZE 16
-#define MWIFIEX_11AC_STA_AMPDU_DEF_TXWINSIZE 32
-#define MWIFIEX_11AC_STA_AMPDU_DEF_RXWINSIZE 48
+#define MWIFIEX_11AC_STA_AMPDU_DEF_TXWINSIZE 64
+#define MWIFIEX_11AC_STA_AMPDU_DEF_RXWINSIZE 64
#define MWIFIEX_11AC_UAP_AMPDU_DEF_TXWINSIZE 48
#define MWIFIEX_11AC_UAP_AMPDU_DEF_RXWINSIZE 32
diff --git a/drivers/net/wireless/mwifiex/fw.h b/drivers/net/wireless/mwifiex/fw.h
index b485dc1ae5eb..3175dd04834b 100644
--- a/drivers/net/wireless/mwifiex/fw.h
+++ b/drivers/net/wireless/mwifiex/fw.h
@@ -169,6 +169,7 @@ enum MWIFIEX_802_11_PRIVACY_FILTER {
#define TLV_TYPE_GWK_CIPHER (PROPRIETARY_TLV_BASE_ID + 146)
#define TLV_TYPE_COALESCE_RULE (PROPRIETARY_TLV_BASE_ID + 154)
#define TLV_TYPE_KEY_PARAM_V2 (PROPRIETARY_TLV_BASE_ID + 156)
+#define TLV_TYPE_TDLS_IDLE_TIMEOUT (PROPRIETARY_TLV_BASE_ID + 194)
#define TLV_TYPE_FW_API_REV (PROPRIETARY_TLV_BASE_ID + 199)
#define MWIFIEX_TX_DATA_BUF_SIZE_2K 2048
@@ -229,6 +230,7 @@ enum MWIFIEX_802_11_PRIVACY_FILTER {
#define ISENABLED_40MHZ_INTOLERANT(Dot11nDevCap) (Dot11nDevCap & BIT(8))
#define ISSUPP_RXLDPC(Dot11nDevCap) (Dot11nDevCap & BIT(22))
#define ISSUPP_BEAMFORMING(Dot11nDevCap) (Dot11nDevCap & BIT(30))
+#define ISALLOWED_CHANWIDTH40(ht_param) (ht_param & BIT(2))
/* httxcfg bitmap
* 0 reserved
@@ -403,7 +405,7 @@ enum P2P_MODES {
#define HS_CFG_CANCEL 0xffffffff
#define HS_CFG_COND_DEF 0x00000000
#define HS_CFG_GPIO_DEF 0xff
-#define HS_CFG_GAP_DEF 0
+#define HS_CFG_GAP_DEF 0xff
#define HS_CFG_COND_BROADCAST_DATA 0x00000001
#define HS_CFG_COND_UNICAST_DATA 0x00000002
#define HS_CFG_COND_MAC_EVENT 0x00000004
@@ -487,6 +489,7 @@ enum P2P_MODES {
#define EVENT_UAP_MIC_COUNTERMEASURES 0x0000004c
#define EVENT_HOSTWAKE_STAIE 0x0000004d
#define EVENT_CHANNEL_SWITCH_ANN 0x00000050
+#define EVENT_TDLS_GENERIC_EVENT 0x00000052
#define EVENT_EXT_SCAN_REPORT 0x00000058
#define EVENT_REMAIN_ON_CHAN_EXPIRED 0x0000005f
@@ -519,6 +522,7 @@ enum P2P_MODES {
#define ACT_TDLS_DELETE 0x00
#define ACT_TDLS_CREATE 0x01
#define ACT_TDLS_CONFIG 0x02
+#define TDLS_EVENT_LINK_TEAR_DOWN 3
#define MWIFIEX_FW_V15 15
@@ -535,6 +539,7 @@ struct mwifiex_ie_types_data {
#define MWIFIEX_TxPD_POWER_MGMT_NULL_PACKET 0x01
#define MWIFIEX_TxPD_POWER_MGMT_LAST_PACKET 0x08
#define MWIFIEX_TXPD_FLAGS_TDLS_PACKET 0x10
+#define MWIFIEX_RXPD_FLAGS_TDLS_PACKET 0x01
struct txpd {
u8 bss_type;
@@ -577,7 +582,7 @@ struct rxpd {
* [Bit 7] Reserved
*/
u8 ht_info;
- u8 reserved;
+ u8 flags;
} __packed;
struct uap_txpd {
@@ -708,6 +713,13 @@ struct mwifiex_ie_types_vendor_param_set {
u8 ie[MWIFIEX_MAX_VSIE_LEN];
};
+#define MWIFIEX_TDLS_IDLE_TIMEOUT 60
+
+struct mwifiex_ie_types_tdls_idle_timeout {
+ struct mwifiex_ie_types_header header;
+ __le16 value;
+} __packed;
+
struct mwifiex_ie_types_rsn_param_set {
struct mwifiex_ie_types_header header;
u8 rsn_ie[1];
@@ -1745,6 +1757,15 @@ struct host_cmd_ds_802_11_subsc_evt {
__le16 events;
} __packed;
+struct mwifiex_tdls_generic_event {
+ __le16 type;
+ u8 peer_mac[ETH_ALEN];
+ union {
+ __le16 reason_code;
+ __le16 reserved;
+ } u;
+} __packed;
+
struct mwifiex_ie {
__le16 ie_index;
__le16 mgmt_subtype_mask;
diff --git a/drivers/net/wireless/mwifiex/ioctl.h b/drivers/net/wireless/mwifiex/ioctl.h
index ee494db54060..1b576722671d 100644
--- a/drivers/net/wireless/mwifiex/ioctl.h
+++ b/drivers/net/wireless/mwifiex/ioctl.h
@@ -303,7 +303,7 @@ struct mwifiex_ds_ant_cfg {
u32 rx_ant;
};
-#define MWIFIEX_NUM_OF_CMD_BUFFER 20
+#define MWIFIEX_NUM_OF_CMD_BUFFER 50
#define MWIFIEX_SIZE_OF_CMD_BUFFER 2048
enum {
diff --git a/drivers/net/wireless/mwifiex/main.c b/drivers/net/wireless/mwifiex/main.c
index 9c771b3e9918..cbabc12fbda3 100644
--- a/drivers/net/wireless/mwifiex/main.c
+++ b/drivers/net/wireless/mwifiex/main.c
@@ -521,7 +521,6 @@ done:
release_firmware(adapter->firmware);
adapter->firmware = NULL;
}
- complete(&adapter->fw_load);
if (init_failed)
mwifiex_free_adapter(adapter);
up(sem);
@@ -535,7 +534,6 @@ static int mwifiex_init_hw_fw(struct mwifiex_adapter *adapter)
{
int ret;
- init_completion(&adapter->fw_load);
ret = request_firmware_nowait(THIS_MODULE, 1, adapter->fw_name,
adapter->dev, GFP_KERNEL, adapter,
mwifiex_fw_dpc);
diff --git a/drivers/net/wireless/mwifiex/main.h b/drivers/net/wireless/mwifiex/main.h
index d53e1e8c9467..1398afa84064 100644
--- a/drivers/net/wireless/mwifiex/main.h
+++ b/drivers/net/wireless/mwifiex/main.h
@@ -672,6 +672,7 @@ struct mwifiex_if_ops {
int (*init_fw_port) (struct mwifiex_adapter *);
int (*dnld_fw) (struct mwifiex_adapter *, struct mwifiex_fw_image *);
void (*card_reset) (struct mwifiex_adapter *);
+ void (*fw_dump)(struct mwifiex_adapter *);
int (*clean_pcie_ring) (struct mwifiex_adapter *adapter);
};
@@ -787,7 +788,6 @@ struct mwifiex_adapter {
struct mwifiex_wait_queue cmd_wait_q;
u8 scan_wait_q_woken;
spinlock_t queue_lock; /* lock for tx queues */
- struct completion fw_load;
u8 country_code[IEEE80211_COUNTRY_STRING_LEN];
u16 max_mgmt_ie_index;
u8 scan_delay_cnt;
@@ -910,8 +910,6 @@ int mwifiex_handle_uap_rx_forward(struct mwifiex_private *priv,
struct sk_buff *skb);
int mwifiex_process_sta_event(struct mwifiex_private *);
int mwifiex_process_uap_event(struct mwifiex_private *);
-struct mwifiex_sta_node *
-mwifiex_get_sta_entry(struct mwifiex_private *priv, u8 *mac);
void mwifiex_delete_all_station_list(struct mwifiex_private *priv);
void *mwifiex_process_sta_txpd(struct mwifiex_private *, struct sk_buff *skb);
void *mwifiex_process_uap_txpd(struct mwifiex_private *, struct sk_buff *skb);
@@ -1101,7 +1099,7 @@ mwifiex_11h_get_csa_closed_channel(struct mwifiex_private *priv)
return 0;
/* Clear csa channel, if DFS channel move time has passed */
- if (jiffies > priv->csa_expire_time) {
+ if (time_after(jiffies, priv->csa_expire_time)) {
priv->csa_chan = 0;
priv->csa_expire_time = 0;
}
@@ -1220,26 +1218,26 @@ void mwifiex_dnld_txpwr_table(struct mwifiex_private *priv);
extern const struct ethtool_ops mwifiex_ethtool_ops;
void mwifiex_del_all_sta_list(struct mwifiex_private *priv);
-void mwifiex_del_sta_entry(struct mwifiex_private *priv, u8 *mac);
+void mwifiex_del_sta_entry(struct mwifiex_private *priv, const u8 *mac);
void
mwifiex_set_sta_ht_cap(struct mwifiex_private *priv, const u8 *ies,
int ies_len, struct mwifiex_sta_node *node);
struct mwifiex_sta_node *
-mwifiex_add_sta_entry(struct mwifiex_private *priv, u8 *mac);
+mwifiex_add_sta_entry(struct mwifiex_private *priv, const u8 *mac);
struct mwifiex_sta_node *
-mwifiex_get_sta_entry(struct mwifiex_private *priv, u8 *mac);
-int mwifiex_send_tdls_data_frame(struct mwifiex_private *priv, u8 *peer,
+mwifiex_get_sta_entry(struct mwifiex_private *priv, const u8 *mac);
+int mwifiex_send_tdls_data_frame(struct mwifiex_private *priv, const u8 *peer,
u8 action_code, u8 dialog_token,
u16 status_code, const u8 *extra_ies,
size_t extra_ies_len);
-int mwifiex_send_tdls_action_frame(struct mwifiex_private *priv,
- u8 *peer, u8 action_code, u8 dialog_token,
- u16 status_code, const u8 *extra_ies,
- size_t extra_ies_len);
+int mwifiex_send_tdls_action_frame(struct mwifiex_private *priv, const u8 *peer,
+ u8 action_code, u8 dialog_token,
+ u16 status_code, const u8 *extra_ies,
+ size_t extra_ies_len);
void mwifiex_process_tdls_action_frame(struct mwifiex_private *priv,
u8 *buf, int len);
-int mwifiex_tdls_oper(struct mwifiex_private *priv, u8 *peer, u8 action);
-int mwifiex_get_tdls_link_status(struct mwifiex_private *priv, u8 *mac);
+int mwifiex_tdls_oper(struct mwifiex_private *priv, const u8 *peer, u8 action);
+int mwifiex_get_tdls_link_status(struct mwifiex_private *priv, const u8 *mac);
void mwifiex_disable_all_tdls_links(struct mwifiex_private *priv);
bool mwifiex_is_bss_in_11ac_mode(struct mwifiex_private *priv);
u8 mwifiex_get_center_freq_index(struct mwifiex_private *priv, u8 band,
diff --git a/drivers/net/wireless/mwifiex/pcie.c b/drivers/net/wireless/mwifiex/pcie.c
index a7e8b96b2d90..574d4b597468 100644
--- a/drivers/net/wireless/mwifiex/pcie.c
+++ b/drivers/net/wireless/mwifiex/pcie.c
@@ -221,9 +221,6 @@ static void mwifiex_pcie_remove(struct pci_dev *pdev)
if (!adapter || !adapter->priv_num)
return;
- /* In case driver is removed when asynchronous FW load is in progress */
- wait_for_completion(&adapter->fw_load);
-
if (user_rmmod) {
#ifdef CONFIG_PM_SLEEP
if (adapter->is_suspended)
@@ -1074,6 +1071,7 @@ static int mwifiex_pcie_send_data_complete(struct mwifiex_adapter *adapter)
* is mapped to PCI device memory. Tx ring pointers are advanced accordingly.
* Download ready interrupt to FW is deffered if Tx ring is not full and
* additional payload can be accomodated.
+ * Caller must ensure tx_param parameter to this function is not NULL.
*/
static int
mwifiex_pcie_send_data(struct mwifiex_adapter *adapter, struct sk_buff *skb,
diff --git a/drivers/net/wireless/mwifiex/scan.c b/drivers/net/wireless/mwifiex/scan.c
index 7b3af3d29ded..45c5b3450cf5 100644
--- a/drivers/net/wireless/mwifiex/scan.c
+++ b/drivers/net/wireless/mwifiex/scan.c
@@ -29,9 +29,6 @@
#define MWIFIEX_MAX_CHANNELS_PER_SPECIFIC_SCAN 14
#define MWIFIEX_DEF_CHANNELS_PER_SCAN_CMD 4
-#define MWIFIEX_LIMIT_1_CHANNEL_PER_SCAN_CMD 15
-#define MWIFIEX_LIMIT_2_CHANNELS_PER_SCAN_CMD 27
-#define MWIFIEX_LIMIT_3_CHANNELS_PER_SCAN_CMD 35
/* Memory needed to store a max sized Channel List TLV for a firmware scan */
#define CHAN_TLV_MAX_SIZE (sizeof(struct mwifiex_ie_types_header) \
@@ -1055,20 +1052,10 @@ mwifiex_config_scan(struct mwifiex_private *priv,
/*
* In associated state we will reduce the number of channels scanned per
- * scan command to avoid any traffic delay/loss. This number is decided
- * based on total number of channels to be scanned due to constraints
- * of command buffers.
+ * scan command to 1 to avoid any traffic delay/loss.
*/
- if (priv->media_connected) {
- if (chan_num < MWIFIEX_LIMIT_1_CHANNEL_PER_SCAN_CMD)
+ if (priv->media_connected)
*max_chan_per_scan = 1;
- else if (chan_num < MWIFIEX_LIMIT_2_CHANNELS_PER_SCAN_CMD)
- *max_chan_per_scan = 2;
- else if (chan_num < MWIFIEX_LIMIT_3_CHANNELS_PER_SCAN_CMD)
- *max_chan_per_scan = 3;
- else
- *max_chan_per_scan = 4;
- }
}
/*
@@ -1353,23 +1340,17 @@ int mwifiex_update_bss_desc_with_ie(struct mwifiex_adapter *adapter,
bss_entry->beacon_buf);
break;
case WLAN_EID_BSS_COEX_2040:
- bss_entry->bcn_bss_co_2040 = current_ptr +
- sizeof(struct ieee_types_header);
- bss_entry->bss_co_2040_offset = (u16) (current_ptr +
- sizeof(struct ieee_types_header) -
- bss_entry->beacon_buf);
+ bss_entry->bcn_bss_co_2040 = current_ptr;
+ bss_entry->bss_co_2040_offset =
+ (u16) (current_ptr - bss_entry->beacon_buf);
break;
case WLAN_EID_EXT_CAPABILITY:
- bss_entry->bcn_ext_cap = current_ptr +
- sizeof(struct ieee_types_header);
- bss_entry->ext_cap_offset = (u16) (current_ptr +
- sizeof(struct ieee_types_header) -
- bss_entry->beacon_buf);
+ bss_entry->bcn_ext_cap = current_ptr;
+ bss_entry->ext_cap_offset =
+ (u16) (current_ptr - bss_entry->beacon_buf);
break;
case WLAN_EID_OPMODE_NOTIF:
- bss_entry->oper_mode =
- (void *)(current_ptr +
- sizeof(struct ieee_types_header));
+ bss_entry->oper_mode = (void *)current_ptr;
bss_entry->oper_mode_offset =
(u16)((u8 *)bss_entry->oper_mode -
bss_entry->beacon_buf);
@@ -1757,6 +1738,19 @@ mwifiex_parse_single_response_buf(struct mwifiex_private *priv, u8 **bss_info,
return 0;
}
+static void mwifiex_complete_scan(struct mwifiex_private *priv)
+{
+ struct mwifiex_adapter *adapter = priv->adapter;
+
+ if (adapter->curr_cmd->wait_q_enabled) {
+ adapter->cmd_wait_q.status = 0;
+ if (!priv->scan_request) {
+ dev_dbg(adapter->dev, "complete internal scan\n");
+ mwifiex_complete_cmd(adapter, adapter->curr_cmd);
+ }
+ }
+}
+
static void mwifiex_check_next_scan_command(struct mwifiex_private *priv)
{
struct mwifiex_adapter *adapter = priv->adapter;
@@ -1770,16 +1764,9 @@ static void mwifiex_check_next_scan_command(struct mwifiex_private *priv)
adapter->scan_processing = false;
spin_unlock_irqrestore(&adapter->mwifiex_cmd_lock, flags);
- /* Need to indicate IOCTL complete */
- if (adapter->curr_cmd->wait_q_enabled) {
- adapter->cmd_wait_q.status = 0;
- if (!priv->scan_request) {
- dev_dbg(adapter->dev,
- "complete internal scan\n");
- mwifiex_complete_cmd(adapter,
- adapter->curr_cmd);
- }
- }
+ if (!adapter->ext_scan)
+ mwifiex_complete_scan(priv);
+
if (priv->report_scan_result)
priv->report_scan_result = false;
@@ -1984,6 +1971,9 @@ int mwifiex_cmd_802_11_scan_ext(struct mwifiex_private *priv,
int mwifiex_ret_802_11_scan_ext(struct mwifiex_private *priv)
{
dev_dbg(priv->adapter->dev, "info: EXT scan returns successfully\n");
+
+ mwifiex_complete_scan(priv);
+
return 0;
}
diff --git a/drivers/net/wireless/mwifiex/sdio.c b/drivers/net/wireless/mwifiex/sdio.c
index d206f04d4994..4ce3d7b33991 100644
--- a/drivers/net/wireless/mwifiex/sdio.c
+++ b/drivers/net/wireless/mwifiex/sdio.c
@@ -85,6 +85,8 @@ mwifiex_sdio_probe(struct sdio_func *func, const struct sdio_device_id *id)
card->supports_sdio_new_mode = data->supports_sdio_new_mode;
card->has_control_mask = data->has_control_mask;
card->tx_buf_size = data->tx_buf_size;
+ card->mp_tx_agg_buf_size = data->mp_tx_agg_buf_size;
+ card->mp_rx_agg_buf_size = data->mp_rx_agg_buf_size;
}
sdio_claim_host(func);
@@ -177,9 +179,6 @@ mwifiex_sdio_remove(struct sdio_func *func)
if (!adapter || !adapter->priv_num)
return;
- /* In case driver is removed when asynchronous FW load is in progress */
- wait_for_completion(&adapter->fw_load);
-
if (user_rmmod) {
if (adapter->is_suspended)
mwifiex_sdio_resume(adapter->dev);
@@ -1679,8 +1678,12 @@ static int mwifiex_sdio_host_to_card(struct mwifiex_adapter *adapter,
if (ret) {
if (type == MWIFIEX_TYPE_CMD)
adapter->cmd_sent = false;
- if (type == MWIFIEX_TYPE_DATA)
+ if (type == MWIFIEX_TYPE_DATA) {
adapter->data_sent = false;
+ /* restore curr_wr_port in error cases */
+ card->curr_wr_port = port;
+ card->mp_wr_bitmap |= (u32)(1 << card->curr_wr_port);
+ }
} else {
if (type == MWIFIEX_TYPE_DATA) {
if (!(card->mp_wr_bitmap & (1 << card->curr_wr_port)))
@@ -1842,8 +1845,8 @@ static int mwifiex_init_sdio(struct mwifiex_adapter *adapter)
card->mpa_rx.len_arr = kzalloc(sizeof(*card->mpa_rx.len_arr) *
card->mp_agg_pkt_limit, GFP_KERNEL);
ret = mwifiex_alloc_sdio_mpa_buffers(adapter,
- SDIO_MP_TX_AGGR_DEF_BUF_SIZE,
- SDIO_MP_RX_AGGR_DEF_BUF_SIZE);
+ card->mp_tx_agg_buf_size,
+ card->mp_rx_agg_buf_size);
if (ret) {
dev_err(adapter->dev, "failed to alloc sdio mp-a buffers\n");
kfree(card->mp_regs);
diff --git a/drivers/net/wireless/mwifiex/sdio.h b/drivers/net/wireless/mwifiex/sdio.h
index c71201b2e2a3..6eea30b43ed7 100644
--- a/drivers/net/wireless/mwifiex/sdio.h
+++ b/drivers/net/wireless/mwifiex/sdio.h
@@ -64,10 +64,8 @@
#define UP_LD_CMD_PORT_HOST_INT_STATUS (0x40U)
#define DN_LD_CMD_PORT_HOST_INT_STATUS (0x80U)
-#define SDIO_MP_TX_AGGR_DEF_BUF_SIZE (8192) /* 8K */
-
-/* Multi port RX aggregation buffer size */
-#define SDIO_MP_RX_AGGR_DEF_BUF_SIZE (16384) /* 16K */
+#define MWIFIEX_MP_AGGR_BUF_SIZE_16K (16384)
+#define MWIFIEX_MP_AGGR_BUF_SIZE_32K (32768)
/* Misc. Config Register : Auto Re-enable interrupts */
#define AUTO_RE_ENABLE_INT BIT(4)
@@ -234,6 +232,8 @@ struct sdio_mmc_card {
bool supports_sdio_new_mode;
bool has_control_mask;
u16 tx_buf_size;
+ u32 mp_tx_agg_buf_size;
+ u32 mp_rx_agg_buf_size;
u32 mp_rd_bitmap;
u32 mp_wr_bitmap;
@@ -258,6 +258,8 @@ struct mwifiex_sdio_device {
bool supports_sdio_new_mode;
bool has_control_mask;
u16 tx_buf_size;
+ u32 mp_tx_agg_buf_size;
+ u32 mp_rx_agg_buf_size;
};
static const struct mwifiex_sdio_card_reg mwifiex_reg_sd87xx = {
@@ -315,6 +317,8 @@ static const struct mwifiex_sdio_device mwifiex_sdio_sd8786 = {
.supports_sdio_new_mode = false,
.has_control_mask = true,
.tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_2K,
+ .mp_tx_agg_buf_size = MWIFIEX_MP_AGGR_BUF_SIZE_16K,
+ .mp_rx_agg_buf_size = MWIFIEX_MP_AGGR_BUF_SIZE_16K,
};
static const struct mwifiex_sdio_device mwifiex_sdio_sd8787 = {
@@ -325,6 +329,8 @@ static const struct mwifiex_sdio_device mwifiex_sdio_sd8787 = {
.supports_sdio_new_mode = false,
.has_control_mask = true,
.tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_2K,
+ .mp_tx_agg_buf_size = MWIFIEX_MP_AGGR_BUF_SIZE_16K,
+ .mp_rx_agg_buf_size = MWIFIEX_MP_AGGR_BUF_SIZE_16K,
};
static const struct mwifiex_sdio_device mwifiex_sdio_sd8797 = {
@@ -335,6 +341,8 @@ static const struct mwifiex_sdio_device mwifiex_sdio_sd8797 = {
.supports_sdio_new_mode = false,
.has_control_mask = true,
.tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_2K,
+ .mp_tx_agg_buf_size = MWIFIEX_MP_AGGR_BUF_SIZE_16K,
+ .mp_rx_agg_buf_size = MWIFIEX_MP_AGGR_BUF_SIZE_16K,
};
static const struct mwifiex_sdio_device mwifiex_sdio_sd8897 = {
@@ -345,6 +353,8 @@ static const struct mwifiex_sdio_device mwifiex_sdio_sd8897 = {
.supports_sdio_new_mode = true,
.has_control_mask = false,
.tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_4K,
+ .mp_tx_agg_buf_size = MWIFIEX_MP_AGGR_BUF_SIZE_32K,
+ .mp_rx_agg_buf_size = MWIFIEX_MP_AGGR_BUF_SIZE_32K,
};
/*
diff --git a/drivers/net/wireless/mwifiex/sta_cmd.c b/drivers/net/wireless/mwifiex/sta_cmd.c
index e3cac1495cc7..88202ce0c139 100644
--- a/drivers/net/wireless/mwifiex/sta_cmd.c
+++ b/drivers/net/wireless/mwifiex/sta_cmd.c
@@ -1546,6 +1546,7 @@ mwifiex_cmd_tdls_oper(struct mwifiex_private *priv,
struct mwifiex_ie_types_extcap *extcap;
struct mwifiex_ie_types_vhtcap *vht_capab;
struct mwifiex_ie_types_aid *aid;
+ struct mwifiex_ie_types_tdls_idle_timeout *timeout;
u8 *pos, qos_info;
u16 config_len = 0;
struct station_parameters *params = priv->sta_params;
@@ -1643,6 +1644,12 @@ mwifiex_cmd_tdls_oper(struct mwifiex_private *priv,
config_len += sizeof(struct mwifiex_ie_types_aid);
}
+ timeout = (void *)(pos + config_len);
+ timeout->header.type = cpu_to_le16(TLV_TYPE_TDLS_IDLE_TIMEOUT);
+ timeout->header.len = cpu_to_le16(sizeof(timeout->value));
+ timeout->value = cpu_to_le16(MWIFIEX_TDLS_IDLE_TIMEOUT);
+ config_len += sizeof(struct mwifiex_ie_types_tdls_idle_timeout);
+
break;
default:
dev_err(priv->adapter->dev, "Unknown TDLS operation\n");
diff --git a/drivers/net/wireless/mwifiex/sta_cmdresp.c b/drivers/net/wireless/mwifiex/sta_cmdresp.c
index bfebb0144df5..577f2979ed8f 100644
--- a/drivers/net/wireless/mwifiex/sta_cmdresp.c
+++ b/drivers/net/wireless/mwifiex/sta_cmdresp.c
@@ -865,14 +865,20 @@ static int mwifiex_ret_tdls_oper(struct mwifiex_private *priv,
switch (action) {
case ACT_TDLS_DELETE:
- if (reason)
- dev_err(priv->adapter->dev,
- "TDLS link delete for %pM failed: reason %d\n",
- cmd_tdls_oper->peer_mac, reason);
- else
+ if (reason) {
+ if (!node || reason == TDLS_ERR_LINK_NONEXISTENT)
+ dev_dbg(priv->adapter->dev,
+ "TDLS link delete for %pM failed: reason %d\n",
+ cmd_tdls_oper->peer_mac, reason);
+ else
+ dev_err(priv->adapter->dev,
+ "TDLS link delete for %pM failed: reason %d\n",
+ cmd_tdls_oper->peer_mac, reason);
+ } else {
dev_dbg(priv->adapter->dev,
- "TDLS link config for %pM successful\n",
+ "TDLS link delete for %pM successful\n",
cmd_tdls_oper->peer_mac);
+ }
break;
case ACT_TDLS_CREATE:
if (reason) {
diff --git a/drivers/net/wireless/mwifiex/sta_event.c b/drivers/net/wireless/mwifiex/sta_event.c
index 368450cc56c7..f6395ef11a72 100644
--- a/drivers/net/wireless/mwifiex/sta_event.c
+++ b/drivers/net/wireless/mwifiex/sta_event.c
@@ -134,6 +134,46 @@ mwifiex_reset_connect_state(struct mwifiex_private *priv, u16 reason_code)
netif_carrier_off(priv->netdev);
}
+static int mwifiex_parse_tdls_event(struct mwifiex_private *priv,
+ struct sk_buff *event_skb)
+{
+ int ret = 0;
+ struct mwifiex_adapter *adapter = priv->adapter;
+ struct mwifiex_sta_node *sta_ptr;
+ struct mwifiex_tdls_generic_event *tdls_evt =
+ (void *)event_skb->data + sizeof(adapter->event_cause);
+
+ /* reserved 2 bytes are not mandatory in tdls event */
+ if (event_skb->len < (sizeof(struct mwifiex_tdls_generic_event) -
+ sizeof(u16) - sizeof(adapter->event_cause))) {
+ dev_err(adapter->dev, "Invalid event length!\n");
+ return -1;
+ }
+
+ sta_ptr = mwifiex_get_sta_entry(priv, tdls_evt->peer_mac);
+ if (!sta_ptr) {
+ dev_err(adapter->dev, "cannot get sta entry!\n");
+ return -1;
+ }
+
+ switch (le16_to_cpu(tdls_evt->type)) {
+ case TDLS_EVENT_LINK_TEAR_DOWN:
+ cfg80211_tdls_oper_request(priv->netdev,
+ tdls_evt->peer_mac,
+ NL80211_TDLS_TEARDOWN,
+ le16_to_cpu(tdls_evt->u.reason_code),
+ GFP_KERNEL);
+ ret = mwifiex_tdls_oper(priv, tdls_evt->peer_mac,
+ MWIFIEX_TDLS_DISABLE_LINK);
+ queue_work(adapter->workqueue, &adapter->main_work);
+ break;
+ default:
+ break;
+ }
+
+ return ret;
+}
+
/*
* This function handles events generated by firmware.
*
@@ -459,6 +499,10 @@ int mwifiex_process_sta_event(struct mwifiex_private *priv)
false);
break;
+ case EVENT_TDLS_GENERIC_EVENT:
+ ret = mwifiex_parse_tdls_event(priv, adapter->event_skb);
+ break;
+
default:
dev_dbg(adapter->dev, "event: unknown event id: %#x\n",
eventcause);
diff --git a/drivers/net/wireless/mwifiex/sta_rx.c b/drivers/net/wireless/mwifiex/sta_rx.c
index ed26387eccf5..8b639d7fe6df 100644
--- a/drivers/net/wireless/mwifiex/sta_rx.c
+++ b/drivers/net/wireless/mwifiex/sta_rx.c
@@ -183,6 +183,7 @@ int mwifiex_process_sta_rx_packet(struct mwifiex_private *priv,
struct rx_packet_hdr *rx_pkt_hdr;
u8 ta[ETH_ALEN];
u16 rx_pkt_type, rx_pkt_offset, rx_pkt_length, seq_num;
+ struct mwifiex_sta_node *sta_ptr;
local_rx_pd = (struct rxpd *) (skb->data);
rx_pkt_type = le16_to_cpu(local_rx_pd->rx_pkt_type);
@@ -213,14 +214,25 @@ int mwifiex_process_sta_rx_packet(struct mwifiex_private *priv,
* If the packet is not an unicast packet then send the packet
* directly to os. Don't pass thru rx reordering
*/
- if (!IS_11N_ENABLED(priv) ||
+ if ((!IS_11N_ENABLED(priv) &&
+ !(ISSUPP_TDLS_ENABLED(priv->adapter->fw_cap_info) &&
+ !(local_rx_pd->flags & MWIFIEX_RXPD_FLAGS_TDLS_PACKET))) ||
!ether_addr_equal_unaligned(priv->curr_addr, rx_pkt_hdr->eth803_hdr.h_dest)) {
mwifiex_process_rx_packet(priv, skb);
return ret;
}
- if (mwifiex_queuing_ra_based(priv)) {
+ if (mwifiex_queuing_ra_based(priv) ||
+ (ISSUPP_TDLS_ENABLED(priv->adapter->fw_cap_info) &&
+ local_rx_pd->flags & MWIFIEX_RXPD_FLAGS_TDLS_PACKET)) {
memcpy(ta, rx_pkt_hdr->eth803_hdr.h_source, ETH_ALEN);
+ if (local_rx_pd->flags & MWIFIEX_RXPD_FLAGS_TDLS_PACKET &&
+ local_rx_pd->priority < MAX_NUM_TID) {
+ sta_ptr = mwifiex_get_sta_entry(priv, ta);
+ if (sta_ptr)
+ sta_ptr->rx_seq[local_rx_pd->priority] =
+ le16_to_cpu(local_rx_pd->seq_num);
+ }
} else {
if (rx_pkt_type != PKT_TYPE_BAR)
priv->rx_seq[local_rx_pd->priority] = seq_num;
diff --git a/drivers/net/wireless/mwifiex/sta_tx.c b/drivers/net/wireless/mwifiex/sta_tx.c
index 1236a5de7bca..5fce7e78a36e 100644
--- a/drivers/net/wireless/mwifiex/sta_tx.c
+++ b/drivers/net/wireless/mwifiex/sta_tx.c
@@ -128,6 +128,7 @@ int mwifiex_send_null_packet(struct mwifiex_private *priv, u8 flags)
{
struct mwifiex_adapter *adapter = priv->adapter;
struct txpd *local_tx_pd;
+ struct mwifiex_tx_param tx_param;
/* sizeof(struct txpd) + Interface specific header */
#define NULL_PACKET_HDR 64
u32 data_len = NULL_PACKET_HDR;
@@ -168,8 +169,9 @@ int mwifiex_send_null_packet(struct mwifiex_private *priv, u8 flags)
skb, NULL);
} else {
skb_push(skb, INTF_HEADER_LEN);
+ tx_param.next_pkt_len = 0;
ret = adapter->if_ops.host_to_card(adapter, MWIFIEX_TYPE_DATA,
- skb, NULL);
+ skb, &tx_param);
}
switch (ret) {
case -EBUSY:
diff --git a/drivers/net/wireless/mwifiex/tdls.c b/drivers/net/wireless/mwifiex/tdls.c
index 97662a1ba58c..e73034fbbde9 100644
--- a/drivers/net/wireless/mwifiex/tdls.c
+++ b/drivers/net/wireless/mwifiex/tdls.c
@@ -25,8 +25,8 @@
#define TDLS_RESP_FIX_LEN 8
#define TDLS_CONFIRM_FIX_LEN 6
-static void
-mwifiex_restore_tdls_packets(struct mwifiex_private *priv, u8 *mac, u8 status)
+static void mwifiex_restore_tdls_packets(struct mwifiex_private *priv,
+ const u8 *mac, u8 status)
{
struct mwifiex_ra_list_tbl *ra_list;
struct list_head *tid_list;
@@ -84,7 +84,8 @@ mwifiex_restore_tdls_packets(struct mwifiex_private *priv, u8 *mac, u8 status)
return;
}
-static void mwifiex_hold_tdls_packets(struct mwifiex_private *priv, u8 *mac)
+static void mwifiex_hold_tdls_packets(struct mwifiex_private *priv,
+ const u8 *mac)
{
struct mwifiex_ra_list_tbl *ra_list;
struct list_head *ra_list_head;
@@ -185,8 +186,50 @@ static int mwifiex_tdls_add_vht_capab(struct mwifiex_private *priv,
return 0;
}
+static int
+mwifiex_tdls_add_ht_oper(struct mwifiex_private *priv, const u8 *mac,
+ u8 vht_enabled, struct sk_buff *skb)
+{
+ struct ieee80211_ht_operation *ht_oper;
+ struct mwifiex_sta_node *sta_ptr;
+ struct mwifiex_bssdescriptor *bss_desc =
+ &priv->curr_bss_params.bss_descriptor;
+ u8 *pos;
+
+ sta_ptr = mwifiex_get_sta_entry(priv, mac);
+ if (unlikely(!sta_ptr)) {
+ dev_warn(priv->adapter->dev,
+ "TDLS peer station not found in list\n");
+ return -1;
+ }
+
+ pos = (void *)skb_put(skb, sizeof(struct ieee80211_ht_operation) + 2);
+ *pos++ = WLAN_EID_HT_OPERATION;
+ *pos++ = sizeof(struct ieee80211_ht_operation);
+ ht_oper = (void *)pos;
+
+ ht_oper->primary_chan = bss_desc->channel;
+
+ /* follow AP's channel bandwidth */
+ if (ISSUPP_CHANWIDTH40(priv->adapter->hw_dot_11n_dev_cap) &&
+ bss_desc->bcn_ht_cap &&
+ ISALLOWED_CHANWIDTH40(bss_desc->bcn_ht_oper->ht_param))
+ ht_oper->ht_param = bss_desc->bcn_ht_oper->ht_param;
+
+ if (vht_enabled) {
+ ht_oper->ht_param =
+ mwifiex_get_sec_chan_offset(bss_desc->channel);
+ ht_oper->ht_param |= BIT(2);
+ }
+
+ memcpy(&sta_ptr->tdls_cap.ht_oper, ht_oper,
+ sizeof(struct ieee80211_ht_operation));
+
+ return 0;
+}
+
static int mwifiex_tdls_add_vht_oper(struct mwifiex_private *priv,
- u8 *mac, struct sk_buff *skb)
+ const u8 *mac, struct sk_buff *skb)
{
struct mwifiex_bssdescriptor *bss_desc;
struct ieee80211_vht_operation *vht_oper;
@@ -325,8 +368,9 @@ static void mwifiex_tdls_add_qos_capab(struct sk_buff *skb)
}
static int mwifiex_prep_tdls_encap_data(struct mwifiex_private *priv,
- u8 *peer, u8 action_code, u8 dialog_token,
- u16 status_code, struct sk_buff *skb)
+ const u8 *peer, u8 action_code,
+ u8 dialog_token,
+ u16 status_code, struct sk_buff *skb)
{
struct ieee80211_tdls_data *tf;
int ret;
@@ -428,6 +472,17 @@ static int mwifiex_prep_tdls_encap_data(struct mwifiex_private *priv,
dev_kfree_skb_any(skb);
return ret;
}
+ ret = mwifiex_tdls_add_ht_oper(priv, peer, 1, skb);
+ if (ret) {
+ dev_kfree_skb_any(skb);
+ return ret;
+ }
+ } else {
+ ret = mwifiex_tdls_add_ht_oper(priv, peer, 0, skb);
+ if (ret) {
+ dev_kfree_skb_any(skb);
+ return ret;
+ }
}
break;
@@ -453,7 +508,8 @@ static int mwifiex_prep_tdls_encap_data(struct mwifiex_private *priv,
}
static void
-mwifiex_tdls_add_link_ie(struct sk_buff *skb, u8 *src_addr, u8 *peer, u8 *bssid)
+mwifiex_tdls_add_link_ie(struct sk_buff *skb, const u8 *src_addr,
+ const u8 *peer, const u8 *bssid)
{
struct ieee80211_tdls_lnkie *lnkid;
@@ -467,8 +523,8 @@ mwifiex_tdls_add_link_ie(struct sk_buff *skb, u8 *src_addr, u8 *peer, u8 *bssid)
memcpy(lnkid->resp_sta, peer, ETH_ALEN);
}
-int mwifiex_send_tdls_data_frame(struct mwifiex_private *priv,
- u8 *peer, u8 action_code, u8 dialog_token,
+int mwifiex_send_tdls_data_frame(struct mwifiex_private *priv, const u8 *peer,
+ u8 action_code, u8 dialog_token,
u16 status_code, const u8 *extra_ies,
size_t extra_ies_len)
{
@@ -560,7 +616,8 @@ int mwifiex_send_tdls_data_frame(struct mwifiex_private *priv,
}
static int
-mwifiex_construct_tdls_action_frame(struct mwifiex_private *priv, u8 *peer,
+mwifiex_construct_tdls_action_frame(struct mwifiex_private *priv,
+ const u8 *peer,
u8 action_code, u8 dialog_token,
u16 status_code, struct sk_buff *skb)
{
@@ -638,10 +695,10 @@ mwifiex_construct_tdls_action_frame(struct mwifiex_private *priv, u8 *peer,
return 0;
}
-int mwifiex_send_tdls_action_frame(struct mwifiex_private *priv,
- u8 *peer, u8 action_code, u8 dialog_token,
- u16 status_code, const u8 *extra_ies,
- size_t extra_ies_len)
+int mwifiex_send_tdls_action_frame(struct mwifiex_private *priv, const u8 *peer,
+ u8 action_code, u8 dialog_token,
+ u16 status_code, const u8 *extra_ies,
+ size_t extra_ies_len)
{
struct sk_buff *skb;
struct mwifiex_txinfo *tx_info;
@@ -848,7 +905,7 @@ void mwifiex_process_tdls_action_frame(struct mwifiex_private *priv,
}
static int
-mwifiex_tdls_process_config_link(struct mwifiex_private *priv, u8 *peer)
+mwifiex_tdls_process_config_link(struct mwifiex_private *priv, const u8 *peer)
{
struct mwifiex_sta_node *sta_ptr;
struct mwifiex_ds_tdls_oper tdls_oper;
@@ -869,7 +926,7 @@ mwifiex_tdls_process_config_link(struct mwifiex_private *priv, u8 *peer)
}
static int
-mwifiex_tdls_process_create_link(struct mwifiex_private *priv, u8 *peer)
+mwifiex_tdls_process_create_link(struct mwifiex_private *priv, const u8 *peer)
{
struct mwifiex_sta_node *sta_ptr;
struct mwifiex_ds_tdls_oper tdls_oper;
@@ -896,7 +953,7 @@ mwifiex_tdls_process_create_link(struct mwifiex_private *priv, u8 *peer)
}
static int
-mwifiex_tdls_process_disable_link(struct mwifiex_private *priv, u8 *peer)
+mwifiex_tdls_process_disable_link(struct mwifiex_private *priv, const u8 *peer)
{
struct mwifiex_sta_node *sta_ptr;
struct mwifiex_ds_tdls_oper tdls_oper;
@@ -925,7 +982,7 @@ mwifiex_tdls_process_disable_link(struct mwifiex_private *priv, u8 *peer)
}
static int
-mwifiex_tdls_process_enable_link(struct mwifiex_private *priv, u8 *peer)
+mwifiex_tdls_process_enable_link(struct mwifiex_private *priv, const u8 *peer)
{
struct mwifiex_sta_node *sta_ptr;
struct ieee80211_mcs_info mcs;
@@ -982,7 +1039,7 @@ mwifiex_tdls_process_enable_link(struct mwifiex_private *priv, u8 *peer)
return 0;
}
-int mwifiex_tdls_oper(struct mwifiex_private *priv, u8 *peer, u8 action)
+int mwifiex_tdls_oper(struct mwifiex_private *priv, const u8 *peer, u8 action)
{
switch (action) {
case MWIFIEX_TDLS_ENABLE_LINK:
@@ -997,7 +1054,7 @@ int mwifiex_tdls_oper(struct mwifiex_private *priv, u8 *peer, u8 action)
return 0;
}
-int mwifiex_get_tdls_link_status(struct mwifiex_private *priv, u8 *mac)
+int mwifiex_get_tdls_link_status(struct mwifiex_private *priv, const u8 *mac)
{
struct mwifiex_sta_node *sta_ptr;
diff --git a/drivers/net/wireless/mwifiex/uap_cmd.c b/drivers/net/wireless/mwifiex/uap_cmd.c
index 9be6544bdded..32643555dd2a 100644
--- a/drivers/net/wireless/mwifiex/uap_cmd.c
+++ b/drivers/net/wireless/mwifiex/uap_cmd.c
@@ -175,17 +175,19 @@ mwifiex_set_ht_params(struct mwifiex_private *priv,
switch (GET_RXSTBC(cap_info)) {
case MWIFIEX_RX_STBC1:
/* HT_CAP 1X1 mode */
- memset(&bss_cfg->ht_cap.mcs, 0xff, 1);
+ bss_cfg->ht_cap.mcs.rx_mask[0] = 0xff;
break;
case MWIFIEX_RX_STBC12: /* fall through */
case MWIFIEX_RX_STBC123:
/* HT_CAP 2X2 mode */
- memset(&bss_cfg->ht_cap.mcs, 0xff, 2);
+ bss_cfg->ht_cap.mcs.rx_mask[0] = 0xff;
+ bss_cfg->ht_cap.mcs.rx_mask[1] = 0xff;
break;
default:
dev_warn(priv->adapter->dev,
"Unsupported RX-STBC, default to 2x2\n");
- memset(&bss_cfg->ht_cap.mcs, 0xff, 2);
+ bss_cfg->ht_cap.mcs.rx_mask[0] = 0xff;
+ bss_cfg->ht_cap.mcs.rx_mask[1] = 0xff;
break;
}
priv->ap_11n_enabled = 1;
diff --git a/drivers/net/wireless/mwifiex/usb.c b/drivers/net/wireless/mwifiex/usb.c
index edbe4aff00d8..a8ce8130cfae 100644
--- a/drivers/net/wireless/mwifiex/usb.c
+++ b/drivers/net/wireless/mwifiex/usb.c
@@ -22,9 +22,9 @@
#define USB_VERSION "1.0"
+static u8 user_rmmod;
static struct mwifiex_if_ops usb_ops;
static struct semaphore add_remove_card_sem;
-static struct usb_card_rec *usb_card;
static struct usb_device_id mwifiex_usb_table[] = {
/* 8797 */
@@ -532,28 +532,38 @@ static int mwifiex_usb_resume(struct usb_interface *intf)
static void mwifiex_usb_disconnect(struct usb_interface *intf)
{
struct usb_card_rec *card = usb_get_intfdata(intf);
+ struct mwifiex_adapter *adapter;
- if (!card) {
- pr_err("%s: card is NULL\n", __func__);
+ if (!card || !card->adapter) {
+ pr_err("%s: card or card->adapter is NULL\n", __func__);
return;
}
- mwifiex_usb_free(card);
+ adapter = card->adapter;
+ if (!adapter->priv_num)
+ return;
- if (card->adapter) {
- struct mwifiex_adapter *adapter = card->adapter;
+ if (user_rmmod) {
+#ifdef CONFIG_PM
+ if (adapter->is_suspended)
+ mwifiex_usb_resume(intf);
+#endif
- if (!adapter->priv_num)
- return;
+ mwifiex_deauthenticate_all(adapter);
- dev_dbg(adapter->dev, "%s: removing card\n", __func__);
- mwifiex_remove_card(adapter, &add_remove_card_sem);
+ mwifiex_init_shutdown_fw(mwifiex_get_priv(adapter,
+ MWIFIEX_BSS_ROLE_ANY),
+ MWIFIEX_FUNC_SHUTDOWN);
}
+ mwifiex_usb_free(card);
+
+ dev_dbg(adapter->dev, "%s: removing card\n", __func__);
+ mwifiex_remove_card(adapter, &add_remove_card_sem);
+
usb_set_intfdata(intf, NULL);
usb_put_dev(interface_to_usbdev(intf));
kfree(card);
- usb_card = NULL;
return;
}
@@ -565,6 +575,7 @@ static struct usb_driver mwifiex_usb_driver = {
.id_table = mwifiex_usb_table,
.suspend = mwifiex_usb_suspend,
.resume = mwifiex_usb_resume,
+ .soft_unbind = 1,
};
static int mwifiex_usb_tx_init(struct mwifiex_adapter *adapter)
@@ -762,7 +773,6 @@ static int mwifiex_register_dev(struct mwifiex_adapter *adapter)
card->adapter = adapter;
adapter->dev = &card->udev->dev;
- usb_card = card;
switch (le16_to_cpu(card->udev->descriptor.idProduct)) {
case USB8897_PID_1:
@@ -1025,25 +1035,8 @@ static void mwifiex_usb_cleanup_module(void)
if (!down_interruptible(&add_remove_card_sem))
up(&add_remove_card_sem);
- if (usb_card && usb_card->adapter) {
- struct mwifiex_adapter *adapter = usb_card->adapter;
-
- /* In case driver is removed when asynchronous FW downloading is
- * in progress
- */
- wait_for_completion(&adapter->fw_load);
-
-#ifdef CONFIG_PM
- if (adapter->is_suspended)
- mwifiex_usb_resume(usb_card->intf);
-#endif
-
- mwifiex_deauthenticate_all(adapter);
-
- mwifiex_init_shutdown_fw(mwifiex_get_priv(adapter,
- MWIFIEX_BSS_ROLE_ANY),
- MWIFIEX_FUNC_SHUTDOWN);
- }
+ /* set the flag as user is removing this module */
+ user_rmmod = 1;
usb_deregister(&mwifiex_usb_driver);
}
diff --git a/drivers/net/wireless/mwifiex/util.c b/drivers/net/wireless/mwifiex/util.c
index c3824e37f3f2..6da5abf52e61 100644
--- a/drivers/net/wireless/mwifiex/util.c
+++ b/drivers/net/wireless/mwifiex/util.c
@@ -259,7 +259,7 @@ int mwifiex_complete_cmd(struct mwifiex_adapter *adapter,
* NULL is returned if station entry is not found in associated STA list.
*/
struct mwifiex_sta_node *
-mwifiex_get_sta_entry(struct mwifiex_private *priv, u8 *mac)
+mwifiex_get_sta_entry(struct mwifiex_private *priv, const u8 *mac)
{
struct mwifiex_sta_node *node;
@@ -280,7 +280,7 @@ mwifiex_get_sta_entry(struct mwifiex_private *priv, u8 *mac)
* If received mac address is NULL, NULL is returned.
*/
struct mwifiex_sta_node *
-mwifiex_add_sta_entry(struct mwifiex_private *priv, u8 *mac)
+mwifiex_add_sta_entry(struct mwifiex_private *priv, const u8 *mac)
{
struct mwifiex_sta_node *node;
unsigned long flags;
@@ -332,7 +332,7 @@ mwifiex_set_sta_ht_cap(struct mwifiex_private *priv, const u8 *ies,
}
/* This function will delete a station entry from station list */
-void mwifiex_del_sta_entry(struct mwifiex_private *priv, u8 *mac)
+void mwifiex_del_sta_entry(struct mwifiex_private *priv, const u8 *mac)
{
struct mwifiex_sta_node *node;
unsigned long flags;
diff --git a/drivers/net/wireless/mwifiex/wmm.c b/drivers/net/wireless/mwifiex/wmm.c
index 0a7cc742aed7..d3671d009f6c 100644
--- a/drivers/net/wireless/mwifiex/wmm.c
+++ b/drivers/net/wireless/mwifiex/wmm.c
@@ -92,7 +92,7 @@ mwifiex_wmm_ac_debug_print(const struct ieee_types_wmm_ac_parameters *ac_param)
* The function also initializes the list with the provided RA.
*/
static struct mwifiex_ra_list_tbl *
-mwifiex_wmm_allocate_ralist_node(struct mwifiex_adapter *adapter, u8 *ra)
+mwifiex_wmm_allocate_ralist_node(struct mwifiex_adapter *adapter, const u8 *ra)
{
struct mwifiex_ra_list_tbl *ra_list;
@@ -139,8 +139,7 @@ static u8 mwifiex_get_random_ba_threshold(void)
* This function allocates and adds a RA list for all TIDs
* with the given RA.
*/
-void
-mwifiex_ralist_add(struct mwifiex_private *priv, u8 *ra)
+void mwifiex_ralist_add(struct mwifiex_private *priv, const u8 *ra)
{
int i;
struct mwifiex_ra_list_tbl *ra_list;
@@ -164,6 +163,7 @@ mwifiex_ralist_add(struct mwifiex_private *priv, u8 *ra)
if (!mwifiex_queuing_ra_based(priv)) {
if (mwifiex_get_tdls_link_status(priv, ra) ==
TDLS_SETUP_COMPLETE) {
+ ra_list->tdls_link = true;
ra_list->is_11n_enabled =
mwifiex_tdls_peer_11n_enabled(priv, ra);
} else {
@@ -426,15 +426,6 @@ mwifiex_wmm_init(struct mwifiex_adapter *adapter)
priv->tos_to_tid_inv[i];
}
- priv->aggr_prio_tbl[6].amsdu
- = priv->aggr_prio_tbl[6].ampdu_ap
- = priv->aggr_prio_tbl[6].ampdu_user
- = BA_STREAM_NOT_ALLOWED;
-
- priv->aggr_prio_tbl[7].amsdu = priv->aggr_prio_tbl[7].ampdu_ap
- = priv->aggr_prio_tbl[7].ampdu_user
- = BA_STREAM_NOT_ALLOWED;
-
mwifiex_set_ba_params(priv);
mwifiex_reset_11n_rx_seq_num(priv);
@@ -575,7 +566,7 @@ mwifiex_clean_txrx(struct mwifiex_private *priv)
*/
static struct mwifiex_ra_list_tbl *
mwifiex_wmm_get_ralist_node(struct mwifiex_private *priv, u8 tid,
- u8 *ra_addr)
+ const u8 *ra_addr)
{
struct mwifiex_ra_list_tbl *ra_list;
@@ -596,7 +587,8 @@ mwifiex_wmm_get_ralist_node(struct mwifiex_private *priv, u8 tid,
* retrieved.
*/
struct mwifiex_ra_list_tbl *
-mwifiex_wmm_get_queue_raptr(struct mwifiex_private *priv, u8 tid, u8 *ra_addr)
+mwifiex_wmm_get_queue_raptr(struct mwifiex_private *priv, u8 tid,
+ const u8 *ra_addr)
{
struct mwifiex_ra_list_tbl *ra_list;
@@ -657,7 +649,7 @@ mwifiex_wmm_add_buf_txqueue(struct mwifiex_private *priv,
if (ntohs(eth_hdr->h_proto) == ETH_P_TDLS)
dev_dbg(adapter->dev,
"TDLS setup packet for %pM. Don't block\n", ra);
- else
+ else if (memcmp(priv->cfg_bssid, ra, ETH_ALEN))
tdls_status = mwifiex_get_tdls_link_status(priv, ra);
}
diff --git a/drivers/net/wireless/mwifiex/wmm.h b/drivers/net/wireless/mwifiex/wmm.h
index 83e42083ebff..eca56e371a57 100644
--- a/drivers/net/wireless/mwifiex/wmm.h
+++ b/drivers/net/wireless/mwifiex/wmm.h
@@ -99,7 +99,7 @@ mwifiex_wmm_is_ra_list_empty(struct list_head *ra_list_hhead)
void mwifiex_wmm_add_buf_txqueue(struct mwifiex_private *priv,
struct sk_buff *skb);
-void mwifiex_ralist_add(struct mwifiex_private *priv, u8 *ra);
+void mwifiex_ralist_add(struct mwifiex_private *priv, const u8 *ra);
void mwifiex_rotate_priolists(struct mwifiex_private *priv,
struct mwifiex_ra_list_tbl *ra, int tid);
@@ -123,7 +123,8 @@ void mwifiex_wmm_setup_ac_downgrade(struct mwifiex_private *priv);
int mwifiex_ret_wmm_get_status(struct mwifiex_private *priv,
const struct host_cmd_ds_command *resp);
struct mwifiex_ra_list_tbl *
-mwifiex_wmm_get_queue_raptr(struct mwifiex_private *priv, u8 tid, u8 *ra_addr);
+mwifiex_wmm_get_queue_raptr(struct mwifiex_private *priv, u8 tid,
+ const u8 *ra_addr);
u8 mwifiex_wmm_downgrade_tid(struct mwifiex_private *priv, u32 tid);
#endif /* !_MWIFIEX_WMM_H_ */
diff --git a/drivers/net/wireless/orinoco/hw.c b/drivers/net/wireless/orinoco/hw.c
index 49300d04efdf..e27e32851f1e 100644
--- a/drivers/net/wireless/orinoco/hw.c
+++ b/drivers/net/wireless/orinoco/hw.c
@@ -988,8 +988,8 @@ int __orinoco_hw_setup_enc(struct orinoco_private *priv)
* tsc must be NULL or up to 8 bytes
*/
int __orinoco_hw_set_tkip_key(struct orinoco_private *priv, int key_idx,
- int set_tx, u8 *key, u8 *rsc, size_t rsc_len,
- u8 *tsc, size_t tsc_len)
+ int set_tx, const u8 *key, const u8 *rsc,
+ size_t rsc_len, const u8 *tsc, size_t tsc_len)
{
struct {
__le16 idx;
diff --git a/drivers/net/wireless/orinoco/hw.h b/drivers/net/wireless/orinoco/hw.h
index 8f6831f4e328..466d1ede76f1 100644
--- a/drivers/net/wireless/orinoco/hw.h
+++ b/drivers/net/wireless/orinoco/hw.h
@@ -38,8 +38,8 @@ int __orinoco_hw_set_wap(struct orinoco_private *priv);
int __orinoco_hw_setup_wepkeys(struct orinoco_private *priv);
int __orinoco_hw_setup_enc(struct orinoco_private *priv);
int __orinoco_hw_set_tkip_key(struct orinoco_private *priv, int key_idx,
- int set_tx, u8 *key, u8 *rsc, size_t rsc_len,
- u8 *tsc, size_t tsc_len);
+ int set_tx, const u8 *key, const u8 *rsc,
+ size_t rsc_len, const u8 *tsc, size_t tsc_len);
int orinoco_clear_tkip_key(struct orinoco_private *priv, int key_idx);
int __orinoco_hw_set_multicast_list(struct orinoco_private *priv,
struct net_device *dev,
diff --git a/drivers/net/wireless/orinoco/orinoco_usb.c b/drivers/net/wireless/orinoco/orinoco_usb.c
index 3ac71339d040..c90939ced0e4 100644
--- a/drivers/net/wireless/orinoco/orinoco_usb.c
+++ b/drivers/net/wireless/orinoco/orinoco_usb.c
@@ -1673,7 +1673,7 @@ static int ezusb_probe(struct usb_interface *interface,
firmware.code = fw_entry->data;
}
if (firmware.size && firmware.code) {
- if (ezusb_firmware_download(upriv, &firmware))
+ if (ezusb_firmware_download(upriv, &firmware) < 0)
goto error;
} else {
err("No firmware to download");
diff --git a/drivers/net/wireless/orinoco/wext.c b/drivers/net/wireless/orinoco/wext.c
index b7a867b50b94..6abdaf0aa052 100644
--- a/drivers/net/wireless/orinoco/wext.c
+++ b/drivers/net/wireless/orinoco/wext.c
@@ -52,9 +52,9 @@ static int orinoco_set_key(struct orinoco_private *priv, int index,
priv->keys[index].seq_len = seq_len;
if (key_len)
- memcpy(priv->keys[index].key, key, key_len);
+ memcpy((void *)priv->keys[index].key, key, key_len);
if (seq_len)
- memcpy(priv->keys[index].seq, seq, seq_len);
+ memcpy((void *)priv->keys[index].seq, seq, seq_len);
switch (alg) {
case ORINOCO_ALG_TKIP:
diff --git a/drivers/net/wireless/p54/main.c b/drivers/net/wireless/p54/main.c
index eede90b63f84..7be3a4839640 100644
--- a/drivers/net/wireless/p54/main.c
+++ b/drivers/net/wireless/p54/main.c
@@ -669,7 +669,8 @@ static unsigned int p54_flush_count(struct p54_common *priv)
return total;
}
-static void p54_flush(struct ieee80211_hw *dev, u32 queues, bool drop)
+static void p54_flush(struct ieee80211_hw *dev, struct ieee80211_vif *vif,
+ u32 queues, bool drop)
{
struct p54_common *priv = dev->priv;
unsigned int total, i;
diff --git a/drivers/net/wireless/ray_cs.c b/drivers/net/wireless/ray_cs.c
index cbf0a589d32a..8330fa33e50b 100644
--- a/drivers/net/wireless/ray_cs.c
+++ b/drivers/net/wireless/ray_cs.c
@@ -343,7 +343,7 @@ static void ray_detach(struct pcmcia_device *link)
ray_release(link);
local = netdev_priv(dev);
- del_timer(&local->timer);
+ del_timer_sync(&local->timer);
if (link->priv) {
unregister_netdev(dev);
diff --git a/drivers/net/wireless/rndis_wlan.c b/drivers/net/wireless/rndis_wlan.c
index 39d22a154341..d2a9a08210be 100644
--- a/drivers/net/wireless/rndis_wlan.c
+++ b/drivers/net/wireless/rndis_wlan.c
@@ -517,7 +517,7 @@ static int rndis_set_default_key(struct wiphy *wiphy, struct net_device *netdev,
u8 key_index, bool unicast, bool multicast);
static int rndis_get_station(struct wiphy *wiphy, struct net_device *dev,
- u8 *mac, struct station_info *sinfo);
+ const u8 *mac, struct station_info *sinfo);
static int rndis_dump_station(struct wiphy *wiphy, struct net_device *dev,
int idx, u8 *mac, struct station_info *sinfo);
@@ -2490,7 +2490,7 @@ static void rndis_fill_station_info(struct usbnet *usbdev,
}
static int rndis_get_station(struct wiphy *wiphy, struct net_device *dev,
- u8 *mac, struct station_info *sinfo)
+ const u8 *mac, struct station_info *sinfo)
{
struct rndis_wlan_private *priv = wiphy_priv(wiphy);
struct usbnet *usbdev = priv->usbdev;
diff --git a/drivers/net/wireless/rsi/rsi_91x_mac80211.c b/drivers/net/wireless/rsi/rsi_91x_mac80211.c
index 84164747ace0..54aaeb09debf 100644
--- a/drivers/net/wireless/rsi/rsi_91x_mac80211.c
+++ b/drivers/net/wireless/rsi/rsi_91x_mac80211.c
@@ -656,6 +656,7 @@ static int rsi_mac80211_ampdu_action(struct ieee80211_hw *hw,
case IEEE80211_AMPDU_TX_START:
common->vif_info[ii].seq_start = seq_no;
ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
+ status = 0;
break;
case IEEE80211_AMPDU_TX_STOP_CONT:
diff --git a/drivers/net/wireless/rsi/rsi_91x_mgmt.c b/drivers/net/wireless/rsi/rsi_91x_mgmt.c
index 1b28cda6ca88..2eefbf159bc0 100644
--- a/drivers/net/wireless/rsi/rsi_91x_mgmt.c
+++ b/drivers/net/wireless/rsi/rsi_91x_mgmt.c
@@ -1083,7 +1083,7 @@ void rsi_inform_bss_status(struct rsi_common *common,
{
if (status) {
rsi_hal_send_sta_notify_frame(common,
- NL80211_IFTYPE_STATION,
+ RSI_IFTYPE_STATION,
STA_CONNECTED,
bssid,
qos_enable,
@@ -1092,7 +1092,7 @@ void rsi_inform_bss_status(struct rsi_common *common,
rsi_send_auto_rate_request(common);
} else {
rsi_hal_send_sta_notify_frame(common,
- NL80211_IFTYPE_STATION,
+ RSI_IFTYPE_STATION,
STA_DISCONNECTED,
bssid,
qos_enable,
diff --git a/drivers/net/wireless/rsi/rsi_common.h b/drivers/net/wireless/rsi/rsi_common.h
index f2f70784d4ad..d3fbe33d2324 100644
--- a/drivers/net/wireless/rsi/rsi_common.h
+++ b/drivers/net/wireless/rsi/rsi_common.h
@@ -63,7 +63,7 @@ static inline int rsi_create_kthread(struct rsi_common *common,
u8 *name)
{
init_completion(&thread->completion);
- thread->task = kthread_run(func_ptr, common, name);
+ thread->task = kthread_run(func_ptr, common, "%s", name);
if (IS_ERR(thread->task))
return (int)PTR_ERR(thread->task);
diff --git a/drivers/net/wireless/rsi/rsi_mgmt.h b/drivers/net/wireless/rsi/rsi_mgmt.h
index ac67c4ad63c2..225215a3b8bb 100644
--- a/drivers/net/wireless/rsi/rsi_mgmt.h
+++ b/drivers/net/wireless/rsi/rsi_mgmt.h
@@ -73,6 +73,7 @@
#define RX_BA_INDICATION 1
#define RSI_TBL_SZ 40
#define MAX_RETRIES 8
+#define RSI_IFTYPE_STATION 0
#define STD_RATE_MCS7 0x07
#define STD_RATE_MCS6 0x06
diff --git a/drivers/net/wireless/rt2x00/rt2800lib.c b/drivers/net/wireless/rt2x00/rt2800lib.c
index 41d4a8167dc3..c17fcf272728 100644
--- a/drivers/net/wireless/rt2x00/rt2800lib.c
+++ b/drivers/net/wireless/rt2x00/rt2800lib.c
@@ -1005,10 +1005,9 @@ void rt2800_write_beacon(struct queue_entry *entry, struct txentry_desc *txdesc)
entry->skb->len + padding_len);
/*
- * Enable beaconing again.
+ * Restore beaconing state.
*/
- rt2x00_set_field32(&reg, BCN_TIME_CFG_BEACON_GEN, 1);
- rt2800_register_write(rt2x00dev, BCN_TIME_CFG, reg);
+ rt2800_register_write(rt2x00dev, BCN_TIME_CFG, orig_reg);
/*
* Clean up beacon skb.
@@ -1039,13 +1038,14 @@ static inline void rt2800_clear_beacon_register(struct rt2x00_dev *rt2x00dev,
void rt2800_clear_beacon(struct queue_entry *entry)
{
struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
- u32 reg;
+ u32 orig_reg, reg;
/*
* Disable beaconing while we are reloading the beacon data,
* otherwise we might be sending out invalid data.
*/
- rt2800_register_read(rt2x00dev, BCN_TIME_CFG, &reg);
+ rt2800_register_read(rt2x00dev, BCN_TIME_CFG, &orig_reg);
+ reg = orig_reg;
rt2x00_set_field32(&reg, BCN_TIME_CFG_BEACON_GEN, 0);
rt2800_register_write(rt2x00dev, BCN_TIME_CFG, reg);
@@ -1055,10 +1055,9 @@ void rt2800_clear_beacon(struct queue_entry *entry)
rt2800_clear_beacon_register(rt2x00dev, entry->entry_idx);
/*
- * Enabled beaconing again.
+ * Restore beaconing state.
*/
- rt2x00_set_field32(&reg, BCN_TIME_CFG_BEACON_GEN, 1);
- rt2800_register_write(rt2x00dev, BCN_TIME_CFG, reg);
+ rt2800_register_write(rt2x00dev, BCN_TIME_CFG, orig_reg);
}
EXPORT_SYMBOL_GPL(rt2800_clear_beacon);
diff --git a/drivers/net/wireless/rt2x00/rt2x00.h b/drivers/net/wireless/rt2x00/rt2x00.h
index e3b885d8f7db..010b76505243 100644
--- a/drivers/net/wireless/rt2x00/rt2x00.h
+++ b/drivers/net/wireless/rt2x00/rt2x00.h
@@ -1448,7 +1448,8 @@ int rt2x00mac_conf_tx(struct ieee80211_hw *hw,
struct ieee80211_vif *vif, u16 queue,
const struct ieee80211_tx_queue_params *params);
void rt2x00mac_rfkill_poll(struct ieee80211_hw *hw);
-void rt2x00mac_flush(struct ieee80211_hw *hw, u32 queues, bool drop);
+void rt2x00mac_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ u32 queues, bool drop);
int rt2x00mac_set_antenna(struct ieee80211_hw *hw, u32 tx_ant, u32 rx_ant);
int rt2x00mac_get_antenna(struct ieee80211_hw *hw, u32 *tx_ant, u32 *rx_ant);
void rt2x00mac_get_ringparam(struct ieee80211_hw *hw,
diff --git a/drivers/net/wireless/rt2x00/rt2x00mac.c b/drivers/net/wireless/rt2x00/rt2x00mac.c
index a87ee9b6585a..212ac4842c16 100644
--- a/drivers/net/wireless/rt2x00/rt2x00mac.c
+++ b/drivers/net/wireless/rt2x00/rt2x00mac.c
@@ -749,7 +749,8 @@ void rt2x00mac_rfkill_poll(struct ieee80211_hw *hw)
}
EXPORT_SYMBOL_GPL(rt2x00mac_rfkill_poll);
-void rt2x00mac_flush(struct ieee80211_hw *hw, u32 queues, bool drop)
+void rt2x00mac_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ u32 queues, bool drop)
{
struct rt2x00_dev *rt2x00dev = hw->priv;
struct data_queue *queue;
diff --git a/drivers/net/wireless/rt2x00/rt2x00usb.c b/drivers/net/wireless/rt2x00/rt2x00usb.c
index 10572452cc21..86c43d112a4b 100644
--- a/drivers/net/wireless/rt2x00/rt2x00usb.c
+++ b/drivers/net/wireless/rt2x00/rt2x00usb.c
@@ -68,6 +68,12 @@ int rt2x00usb_vendor_request(struct rt2x00_dev *rt2x00dev,
}
}
+ /* If the port is powered down, we get a -EPROTO error, and this
+ * leads to a endless loop. So just say that the device is gone.
+ */
+ if (status == -EPROTO)
+ clear_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags);
+
rt2x00_err(rt2x00dev,
"Vendor Request 0x%02x failed for offset 0x%04x with error %d\n",
request, offset, status);
diff --git a/drivers/net/wireless/rt2x00/rt61pci.c b/drivers/net/wireless/rt2x00/rt61pci.c
index 24402984ee57..9048a9cbe52c 100644
--- a/drivers/net/wireless/rt2x00/rt61pci.c
+++ b/drivers/net/wireless/rt2x00/rt61pci.c
@@ -2031,13 +2031,14 @@ static void rt61pci_write_beacon(struct queue_entry *entry,
static void rt61pci_clear_beacon(struct queue_entry *entry)
{
struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
- u32 reg;
+ u32 orig_reg, reg;
/*
* Disable beaconing while we are reloading the beacon data,
* otherwise we might be sending out invalid data.
*/
- rt2x00mmio_register_read(rt2x00dev, TXRX_CSR9, &reg);
+ rt2x00mmio_register_read(rt2x00dev, TXRX_CSR9, &orig_reg);
+ reg = orig_reg;
rt2x00_set_field32(&reg, TXRX_CSR9_BEACON_GEN, 0);
rt2x00mmio_register_write(rt2x00dev, TXRX_CSR9, reg);
@@ -2048,10 +2049,9 @@ static void rt61pci_clear_beacon(struct queue_entry *entry)
HW_BEACON_OFFSET(entry->entry_idx), 0);
/*
- * Enable beaconing again.
+ * Restore global beaconing state.
*/
- rt2x00_set_field32(&reg, TXRX_CSR9_BEACON_GEN, 1);
- rt2x00mmio_register_write(rt2x00dev, TXRX_CSR9, reg);
+ rt2x00mmio_register_write(rt2x00dev, TXRX_CSR9, orig_reg);
}
/*
diff --git a/drivers/net/wireless/rt2x00/rt73usb.c b/drivers/net/wireless/rt2x00/rt73usb.c
index a140170b1eb3..95724ff9c726 100644
--- a/drivers/net/wireless/rt2x00/rt73usb.c
+++ b/drivers/net/wireless/rt2x00/rt73usb.c
@@ -1597,13 +1597,14 @@ static void rt73usb_clear_beacon(struct queue_entry *entry)
{
struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
unsigned int beacon_base;
- u32 reg;
+ u32 orig_reg, reg;
/*
* Disable beaconing while we are reloading the beacon data,
* otherwise we might be sending out invalid data.
*/
- rt2x00usb_register_read(rt2x00dev, TXRX_CSR9, &reg);
+ rt2x00usb_register_read(rt2x00dev, TXRX_CSR9, &orig_reg);
+ reg = orig_reg;
rt2x00_set_field32(&reg, TXRX_CSR9_BEACON_GEN, 0);
rt2x00usb_register_write(rt2x00dev, TXRX_CSR9, reg);
@@ -1614,10 +1615,9 @@ static void rt73usb_clear_beacon(struct queue_entry *entry)
rt2x00usb_register_write(rt2x00dev, beacon_base, 0);
/*
- * Enable beaconing again.
+ * Restore beaconing state.
*/
- rt2x00_set_field32(&reg, TXRX_CSR9_BEACON_GEN, 1);
- rt2x00usb_register_write(rt2x00dev, TXRX_CSR9, reg);
+ rt2x00usb_register_write(rt2x00dev, TXRX_CSR9, orig_reg);
}
static int rt73usb_get_tx_data_len(struct queue_entry *entry)
diff --git a/drivers/net/wireless/rtl818x/rtl8180/Makefile b/drivers/net/wireless/rtl818x/rtl8180/Makefile
index 08b056db4a3b..21005bd8b43c 100644
--- a/drivers/net/wireless/rtl818x/rtl8180/Makefile
+++ b/drivers/net/wireless/rtl818x/rtl8180/Makefile
@@ -1,5 +1,5 @@
-rtl8180-objs := dev.o rtl8225.o sa2400.o max2820.o grf5101.o rtl8225se.o
+rtl818x_pci-objs := dev.o rtl8225.o sa2400.o max2820.o grf5101.o rtl8225se.o
-obj-$(CONFIG_RTL8180) += rtl8180.o
+obj-$(CONFIG_RTL8180) += rtl818x_pci.o
ccflags-y += -Idrivers/net/wireless/rtl818x
diff --git a/drivers/net/wireless/rtl818x/rtl8180/dev.c b/drivers/net/wireless/rtl818x/rtl8180/dev.c
index 98d8256f0377..2c1c02bafa10 100644
--- a/drivers/net/wireless/rtl818x/rtl8180/dev.c
+++ b/drivers/net/wireless/rtl818x/rtl8180/dev.c
@@ -284,6 +284,8 @@ static void rtl8180_handle_rx(struct ieee80211_hw *dev)
rx_status.band = dev->conf.chandef.chan->band;
rx_status.mactime = tsft;
rx_status.flag |= RX_FLAG_MACTIME_START;
+ if (flags & RTL818X_RX_DESC_FLAG_SPLCP)
+ rx_status.flag |= RX_FLAG_SHORTPRE;
if (flags & RTL818X_RX_DESC_FLAG_CRC32_ERR)
rx_status.flag |= RX_FLAG_FAILED_FCS_CRC;
@@ -461,18 +463,23 @@ static void rtl8180_tx(struct ieee80211_hw *dev,
RTL818X_TX_DESC_FLAG_NO_ENC;
rc_flags = info->control.rates[0].flags;
+
+ /* HW will perform RTS-CTS when only RTS flags is set.
+ * HW will perform CTS-to-self when both RTS and CTS flags are set.
+ * RTS rate and RTS duration will be used also for CTS-to-self.
+ */
if (rc_flags & IEEE80211_TX_RC_USE_RTS_CTS) {
tx_flags |= RTL818X_TX_DESC_FLAG_RTS;
tx_flags |= ieee80211_get_rts_cts_rate(dev, info)->hw_value << 19;
+ rts_duration = ieee80211_rts_duration(dev, priv->vif,
+ skb->len, info);
} else if (rc_flags & IEEE80211_TX_RC_USE_CTS_PROTECT) {
- tx_flags |= RTL818X_TX_DESC_FLAG_CTS;
+ tx_flags |= RTL818X_TX_DESC_FLAG_RTS | RTL818X_TX_DESC_FLAG_CTS;
tx_flags |= ieee80211_get_rts_cts_rate(dev, info)->hw_value << 19;
+ rts_duration = ieee80211_ctstoself_duration(dev, priv->vif,
+ skb->len, info);
}
- if (rc_flags & IEEE80211_TX_RC_USE_RTS_CTS)
- rts_duration = ieee80211_rts_duration(dev, priv->vif, skb->len,
- info);
-
if (priv->chip_family == RTL818X_CHIP_FAMILY_RTL8180) {
unsigned int remainder;
@@ -683,9 +690,8 @@ static void rtl8180_int_enable(struct ieee80211_hw *dev)
struct rtl8180_priv *priv = dev->priv;
if (priv->chip_family == RTL818X_CHIP_FAMILY_RTL8187SE) {
- rtl818x_iowrite32(priv, &priv->map->IMR, IMR_TMGDOK |
- IMR_TBDER | IMR_THPDER |
- IMR_THPDER | IMR_THPDOK |
+ rtl818x_iowrite32(priv, &priv->map->IMR,
+ IMR_TBDER | IMR_TBDOK |
IMR_TVODER | IMR_TVODOK |
IMR_TVIDER | IMR_TVIDOK |
IMR_TBEDER | IMR_TBEDOK |
@@ -911,7 +917,10 @@ static int rtl8180_init_hw(struct ieee80211_hw *dev)
reg32 &= 0x00ffff00;
reg32 |= 0xb8000054;
rtl818x_iowrite32(priv, &priv->map->RF_PARA, reg32);
- }
+ } else
+ /* stop unused queus (no dma alloc) */
+ rtl818x_iowrite8(priv, &priv->map->TX_DMA_POLLING,
+ (1<<1) | (1<<2));
priv->rf->init(dev);
diff --git a/drivers/net/wireless/rtl818x/rtl8187/dev.c b/drivers/net/wireless/rtl818x/rtl8187/dev.c
index 0ca17cda48fa..629ad8cfa17b 100644
--- a/drivers/net/wireless/rtl818x/rtl8187/dev.c
+++ b/drivers/net/wireless/rtl818x/rtl8187/dev.c
@@ -253,14 +253,21 @@ static void rtl8187_tx(struct ieee80211_hw *dev,
flags |= ieee80211_get_tx_rate(dev, info)->hw_value << 24;
if (ieee80211_has_morefrags(tx_hdr->frame_control))
flags |= RTL818X_TX_DESC_FLAG_MOREFRAG;
+
+ /* HW will perform RTS-CTS when only RTS flags is set.
+ * HW will perform CTS-to-self when both RTS and CTS flags are set.
+ * RTS rate and RTS duration will be used also for CTS-to-self.
+ */
if (info->control.rates[0].flags & IEEE80211_TX_RC_USE_RTS_CTS) {
flags |= RTL818X_TX_DESC_FLAG_RTS;
flags |= ieee80211_get_rts_cts_rate(dev, info)->hw_value << 19;
rts_dur = ieee80211_rts_duration(dev, priv->vif,
skb->len, info);
} else if (info->control.rates[0].flags & IEEE80211_TX_RC_USE_CTS_PROTECT) {
- flags |= RTL818X_TX_DESC_FLAG_CTS;
+ flags |= RTL818X_TX_DESC_FLAG_RTS | RTL818X_TX_DESC_FLAG_CTS;
flags |= ieee80211_get_rts_cts_rate(dev, info)->hw_value << 19;
+ rts_dur = ieee80211_ctstoself_duration(dev, priv->vif,
+ skb->len, info);
}
if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) {
@@ -381,6 +388,8 @@ static void rtl8187_rx_cb(struct urb *urb)
rx_status.freq = dev->conf.chandef.chan->center_freq;
rx_status.band = dev->conf.chandef.chan->band;
rx_status.flag |= RX_FLAG_MACTIME_START;
+ if (flags & RTL818X_RX_DESC_FLAG_SPLCP)
+ rx_status.flag |= RX_FLAG_SHORTPRE;
if (flags & RTL818X_RX_DESC_FLAG_CRC32_ERR)
rx_status.flag |= RX_FLAG_FAILED_FCS_CRC;
memcpy(IEEE80211_SKB_RXCB(skb), &rx_status, sizeof(rx_status));
diff --git a/drivers/net/wireless/rtl818x/rtl818x.h b/drivers/net/wireless/rtl818x/rtl818x.h
index 45ea4e1c4abe..7abef95d278b 100644
--- a/drivers/net/wireless/rtl818x/rtl818x.h
+++ b/drivers/net/wireless/rtl818x/rtl818x.h
@@ -334,9 +334,9 @@ struct rtl818x_csr {
* I don't like to introduce a ton of "reserved"..
* They are for RTL8187SE
*/
-#define REG_ADDR1(addr) ((u8 __iomem *)priv->map + addr)
-#define REG_ADDR2(addr) ((__le16 __iomem *)priv->map + (addr >> 1))
-#define REG_ADDR4(addr) ((__le32 __iomem *)priv->map + (addr >> 2))
+#define REG_ADDR1(addr) ((u8 __iomem *)priv->map + (addr))
+#define REG_ADDR2(addr) ((__le16 __iomem *)priv->map + ((addr) >> 1))
+#define REG_ADDR4(addr) ((__le32 __iomem *)priv->map + ((addr) >> 2))
#define FEMR_SE REG_ADDR2(0x1D4)
#define ARFR REG_ADDR2(0x1E0)
diff --git a/drivers/net/wireless/rtlwifi/core.c b/drivers/net/wireless/rtlwifi/core.c
index 4ec424f26672..b1ed6d0796f6 100644
--- a/drivers/net/wireless/rtlwifi/core.c
+++ b/drivers/net/wireless/rtlwifi/core.c
@@ -1387,7 +1387,8 @@ static void rtl_op_rfkill_poll(struct ieee80211_hw *hw)
* before switch channel or power save, or tx buffer packet
* maybe send after offchannel or rf sleep, this may cause
* dis-association by AP */
-static void rtl_op_flush(struct ieee80211_hw *hw, u32 queues, bool drop)
+static void rtl_op_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ u32 queues, bool drop)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
diff --git a/drivers/net/wireless/rtlwifi/rtl8188ee/hw.c b/drivers/net/wireless/rtlwifi/rtl8188ee/hw.c
index 94cd9df98381..b14cf5a10f44 100644
--- a/drivers/net/wireless/rtlwifi/rtl8188ee/hw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8188ee/hw.c
@@ -2515,23 +2515,3 @@ void rtl88ee_suspend(struct ieee80211_hw *hw)
void rtl88ee_resume(struct ieee80211_hw *hw)
{
}
-
-/* Turn on AAP (RCR:bit 0) for promicuous mode. */
-void rtl88ee_allow_all_destaddr(struct ieee80211_hw *hw,
- bool allow_all_da, bool write_into_reg)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
-
- if (allow_all_da) /* Set BIT0 */
- rtlpci->receive_config |= RCR_AAP;
- else /* Clear BIT0 */
- rtlpci->receive_config &= ~RCR_AAP;
-
- if (write_into_reg)
- rtl_write_dword(rtlpriv, REG_RCR, rtlpci->receive_config);
-
- RT_TRACE(rtlpriv, COMP_TURBO | COMP_INIT, DBG_LOUD,
- "receive_config = 0x%08X, write_into_reg =%d\n",
- rtlpci->receive_config, write_into_reg);
-}
diff --git a/drivers/net/wireless/rtlwifi/rtl8188ee/hw.h b/drivers/net/wireless/rtlwifi/rtl8188ee/hw.h
index b4460a41bd01..1850fde881b5 100644
--- a/drivers/net/wireless/rtlwifi/rtl8188ee/hw.h
+++ b/drivers/net/wireless/rtlwifi/rtl8188ee/hw.h
@@ -61,8 +61,6 @@ void rtl8188ee_bt_reg_init(struct ieee80211_hw *hw);
void rtl8188ee_bt_hw_init(struct ieee80211_hw *hw);
void rtl88ee_suspend(struct ieee80211_hw *hw);
void rtl88ee_resume(struct ieee80211_hw *hw);
-void rtl88ee_allow_all_destaddr(struct ieee80211_hw *hw,
- bool allow_all_da, bool write_into_reg);
void rtl88ee_fw_clk_off_timer_callback(unsigned long data);
#endif
diff --git a/drivers/net/wireless/rtlwifi/rtl8188ee/sw.c b/drivers/net/wireless/rtlwifi/rtl8188ee/sw.c
index 1b4101bf9974..842d69349a37 100644
--- a/drivers/net/wireless/rtlwifi/rtl8188ee/sw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8188ee/sw.c
@@ -93,7 +93,7 @@ int rtl88e_init_sw_vars(struct ieee80211_hw *hw)
u8 tid;
rtl8188ee_bt_reg_init(hw);
- rtlpci->msi_support = true;
+ rtlpci->msi_support = rtlpriv->cfg->mod_params->msi_support;
rtlpriv->dm.dm_initialgain_enable = 1;
rtlpriv->dm.dm_flag = 0;
@@ -255,7 +255,6 @@ static struct rtl_hal_ops rtl8188ee_hal_ops = {
.enable_hw_sec = rtl88ee_enable_hw_security_config,
.set_key = rtl88ee_set_key,
.init_sw_leds = rtl88ee_init_sw_leds,
- .allow_all_destaddr = rtl88ee_allow_all_destaddr,
.get_bbreg = rtl88e_phy_query_bb_reg,
.set_bbreg = rtl88e_phy_set_bb_reg,
.get_rfreg = rtl88e_phy_query_rf_reg,
@@ -267,6 +266,7 @@ static struct rtl_mod_params rtl88ee_mod_params = {
.inactiveps = true,
.swctrl_lps = false,
.fwctrl_lps = true,
+ .msi_support = false,
.debug = DBG_EMERG,
};
@@ -383,10 +383,12 @@ module_param_named(debug, rtl88ee_mod_params.debug, int, 0444);
module_param_named(ips, rtl88ee_mod_params.inactiveps, bool, 0444);
module_param_named(swlps, rtl88ee_mod_params.swctrl_lps, bool, 0444);
module_param_named(fwlps, rtl88ee_mod_params.fwctrl_lps, bool, 0444);
+module_param_named(msi, rtl88ee_mod_params.msi_support, bool, 0444);
MODULE_PARM_DESC(swenc, "Set to 1 for software crypto (default 0)\n");
MODULE_PARM_DESC(ips, "Set to 0 to not use link power save (default 1)\n");
MODULE_PARM_DESC(swlps, "Set to 1 to use SW control power save (default 0)\n");
MODULE_PARM_DESC(fwlps, "Set to 1 to use FW control power save (default 1)\n");
+MODULE_PARM_DESC(msi, "Set to 1 to use MSI interrupts mode (default 0)\n");
MODULE_PARM_DESC(debug, "Set debug level (0-5) (default 0)");
static SIMPLE_DEV_PM_OPS(rtlwifi_pm_ops, rtl_pci_suspend, rtl_pci_resume);
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/hw.c b/drivers/net/wireless/rtlwifi/rtl8192ce/hw.c
index 55adf043aef7..cdecb0fd4d8e 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192ce/hw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192ce/hw.c
@@ -2423,24 +2423,3 @@ void rtl92ce_suspend(struct ieee80211_hw *hw)
void rtl92ce_resume(struct ieee80211_hw *hw)
{
}
-
-/* Turn on AAP (RCR:bit 0) for promicuous mode. */
-void rtl92ce_allow_all_destaddr(struct ieee80211_hw *hw,
- bool allow_all_da, bool write_into_reg)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
-
- if (allow_all_da) {/* Set BIT0 */
- rtlpci->receive_config |= RCR_AAP;
- } else {/* Clear BIT0 */
- rtlpci->receive_config &= ~RCR_AAP;
- }
-
- if (write_into_reg)
- rtl_write_dword(rtlpriv, REG_RCR, rtlpci->receive_config);
-
- RT_TRACE(rtlpriv, COMP_TURBO | COMP_INIT, DBG_LOUD,
- "receive_config=0x%08X, write_into_reg=%d\n",
- rtlpci->receive_config, write_into_reg);
-}
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/hw.h b/drivers/net/wireless/rtlwifi/rtl8192ce/hw.h
index 2d063b0c7760..5533070f266c 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192ce/hw.h
+++ b/drivers/net/wireless/rtlwifi/rtl8192ce/hw.h
@@ -76,7 +76,5 @@ void rtl8192ce_bt_reg_init(struct ieee80211_hw *hw);
void rtl8192ce_bt_hw_init(struct ieee80211_hw *hw);
void rtl92ce_suspend(struct ieee80211_hw *hw);
void rtl92ce_resume(struct ieee80211_hw *hw);
-void rtl92ce_allow_all_destaddr(struct ieee80211_hw *hw,
- bool allow_all_da, bool write_into_reg);
#endif
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/sw.c b/drivers/net/wireless/rtlwifi/rtl8192ce/sw.c
index b790320d2030..12f21f4073e8 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192ce/sw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192ce/sw.c
@@ -229,7 +229,6 @@ static struct rtl_hal_ops rtl8192ce_hal_ops = {
.enable_hw_sec = rtl92ce_enable_hw_security_config,
.set_key = rtl92ce_set_key,
.init_sw_leds = rtl92ce_init_sw_leds,
- .allow_all_destaddr = rtl92ce_allow_all_destaddr,
.get_bbreg = rtl92c_phy_query_bb_reg,
.set_bbreg = rtl92c_phy_set_bb_reg,
.set_rfreg = rtl92ce_phy_set_rf_reg,
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c b/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c
index 07cb06da6729..a903c2671b4d 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c
@@ -511,7 +511,7 @@ static int _rtl92cu_init_power_on(struct ieee80211_hw *hw)
pr_info("MAC auto ON okay!\n");
break;
}
- if (pollingCount++ > 100) {
+ if (pollingCount++ > 1000) {
RT_TRACE(rtlpriv, COMP_INIT, DBG_EMERG,
"Failed to polling REG_APS_FSMCO[APFM_ONMAC] done!\n");
return -ENODEV;
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c b/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
index c61311084d7e..361435f8608a 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
@@ -395,9 +395,6 @@ static struct usb_driver rtl8192cu_driver = {
/* .resume = rtl_usb_resume, */
/* .reset_resume = rtl8192c_resume, */
#endif /* CONFIG_PM */
-#ifdef CONFIG_AUTOSUSPEND
- .supports_autosuspend = 1,
-#endif
.disable_hub_initiated_lpm = 1,
};
diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/hw.c b/drivers/net/wireless/rtlwifi/rtl8192se/hw.c
index 9098558d916d..1c7101bcd790 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192se/hw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192se/hw.c
@@ -2544,23 +2544,3 @@ void rtl92se_resume(struct ieee80211_hw *hw)
pci_write_config_dword(rtlpci->pdev, 0x40,
val & 0xffff00ff);
}
-
-/* Turn on AAP (RCR:bit 0) for promicuous mode. */
-void rtl92se_allow_all_destaddr(struct ieee80211_hw *hw,
- bool allow_all_da, bool write_into_reg)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
-
- if (allow_all_da) /* Set BIT0 */
- rtlpci->receive_config |= RCR_AAP;
- else /* Clear BIT0 */
- rtlpci->receive_config &= ~RCR_AAP;
-
- if (write_into_reg)
- rtl_write_dword(rtlpriv, RCR, rtlpci->receive_config);
-
- RT_TRACE(rtlpriv, COMP_TURBO | COMP_INIT, DBG_LOUD,
- "receive_config=0x%08X, write_into_reg=%d\n",
- rtlpci->receive_config, write_into_reg);
-}
diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/hw.h b/drivers/net/wireless/rtlwifi/rtl8192se/hw.h
index da48aa8cbe6f..4cacee10f31e 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192se/hw.h
+++ b/drivers/net/wireless/rtlwifi/rtl8192se/hw.h
@@ -74,7 +74,5 @@ void rtl92se_set_key(struct ieee80211_hw *hw,
u8 enc_algo, bool is_wepkey, bool clear_all);
void rtl92se_suspend(struct ieee80211_hw *hw);
void rtl92se_resume(struct ieee80211_hw *hw);
-void rtl92se_allow_all_destaddr(struct ieee80211_hw *hw,
- bool allow_all_da, bool write_into_reg);
#endif
diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/sw.c b/drivers/net/wireless/rtlwifi/rtl8192se/sw.c
index 2e8e6f8d2d51..1bff2a0f7600 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192se/sw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192se/sw.c
@@ -290,7 +290,6 @@ static struct rtl_hal_ops rtl8192se_hal_ops = {
.enable_hw_sec = rtl92se_enable_hw_security_config,
.set_key = rtl92se_set_key,
.init_sw_leds = rtl92se_init_sw_leds,
- .allow_all_destaddr = rtl92se_allow_all_destaddr,
.get_bbreg = rtl92s_phy_query_bb_reg,
.set_bbreg = rtl92s_phy_set_bb_reg,
.get_rfreg = rtl92s_phy_query_rf_reg,
diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/hal_bt_coexist.c b/drivers/net/wireless/rtlwifi/rtl8723ae/hal_bt_coexist.c
index 48fee1be78c2..5b4a714f3c8c 100644
--- a/drivers/net/wireless/rtlwifi/rtl8723ae/hal_bt_coexist.c
+++ b/drivers/net/wireless/rtlwifi/rtl8723ae/hal_bt_coexist.c
@@ -32,7 +32,6 @@
#include "dm.h"
#include "fw.h"
#include "../rtl8723com/fw_common.h"
-#include "../rtl8723com/fw_common.h"
#include "phy.h"
#include "reg.h"
#include "hal_btc.h"
diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/hw.c b/drivers/net/wireless/rtlwifi/rtl8723ae/hw.c
index 65c9e80e1f78..87f69166a7ed 100644
--- a/drivers/net/wireless/rtlwifi/rtl8723ae/hw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8723ae/hw.c
@@ -2383,24 +2383,3 @@ void rtl8723ae_suspend(struct ieee80211_hw *hw)
void rtl8723ae_resume(struct ieee80211_hw *hw)
{
}
-
-/* Turn on AAP (RCR:bit 0) for promicuous mode. */
-void rtl8723ae_allow_all_destaddr(struct ieee80211_hw *hw,
- bool allow_all_da, bool write_into_reg)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
-
- if (allow_all_da) /* Set BIT0 */
- rtlpci->receive_config |= RCR_AAP;
- else /* Clear BIT0 */
- rtlpci->receive_config &= ~RCR_AAP;
-
- if (write_into_reg)
- rtl_write_dword(rtlpriv, REG_RCR, rtlpci->receive_config);
-
-
- RT_TRACE(rtlpriv, COMP_TURBO | COMP_INIT, DBG_LOUD,
- "receive_config=0x%08X, write_into_reg=%d\n",
- rtlpci->receive_config, write_into_reg);
-}
diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/hw.h b/drivers/net/wireless/rtlwifi/rtl8723ae/hw.h
index 6fa24f79b1d7..d3bc39fb27a5 100644
--- a/drivers/net/wireless/rtlwifi/rtl8723ae/hw.h
+++ b/drivers/net/wireless/rtlwifi/rtl8723ae/hw.h
@@ -67,7 +67,5 @@ void rtl8723ae_bt_reg_init(struct ieee80211_hw *hw);
void rtl8723ae_bt_hw_init(struct ieee80211_hw *hw);
void rtl8723ae_suspend(struct ieee80211_hw *hw);
void rtl8723ae_resume(struct ieee80211_hw *hw);
-void rtl8723ae_allow_all_destaddr(struct ieee80211_hw *hw,
- bool allow_all_da, bool write_into_reg);
#endif
diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/sw.c b/drivers/net/wireless/rtlwifi/rtl8723ae/sw.c
index 1087a3bd07fa..73cba1eec8cf 100644
--- a/drivers/net/wireless/rtlwifi/rtl8723ae/sw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8723ae/sw.c
@@ -238,7 +238,6 @@ static struct rtl_hal_ops rtl8723ae_hal_ops = {
.enable_hw_sec = rtl8723ae_enable_hw_security_config,
.set_key = rtl8723ae_set_key,
.init_sw_leds = rtl8723ae_init_sw_leds,
- .allow_all_destaddr = rtl8723ae_allow_all_destaddr,
.get_bbreg = rtl8723_phy_query_bb_reg,
.set_bbreg = rtl8723_phy_set_bb_reg,
.get_rfreg = rtl8723ae_phy_query_rf_reg,
diff --git a/drivers/net/wireless/rtlwifi/rtl8723be/hw.c b/drivers/net/wireless/rtlwifi/rtl8723be/hw.c
index 0fdf0909321f..3d555495b453 100644
--- a/drivers/net/wireless/rtlwifi/rtl8723be/hw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8723be/hw.c
@@ -2501,23 +2501,3 @@ void rtl8723be_suspend(struct ieee80211_hw *hw)
void rtl8723be_resume(struct ieee80211_hw *hw)
{
}
-
-/* Turn on AAP (RCR:bit 0) for promicuous mode. */
-void rtl8723be_allow_all_destaddr(struct ieee80211_hw *hw, bool allow_all_da,
- bool write_into_reg)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
-
- if (allow_all_da) /* Set BIT0 */
- rtlpci->receive_config |= RCR_AAP;
- else /* Clear BIT0 */
- rtlpci->receive_config &= ~RCR_AAP;
-
- if (write_into_reg)
- rtl_write_dword(rtlpriv, REG_RCR, rtlpci->receive_config);
-
- RT_TRACE(rtlpriv, COMP_TURBO | COMP_INIT, DBG_LOUD,
- "receive_config = 0x%08X, write_into_reg =%d\n",
- rtlpci->receive_config, write_into_reg);
-}
diff --git a/drivers/net/wireless/rtlwifi/rtl8723be/hw.h b/drivers/net/wireless/rtlwifi/rtl8723be/hw.h
index b7449a9b57e4..64c7551af6b7 100644
--- a/drivers/net/wireless/rtlwifi/rtl8723be/hw.h
+++ b/drivers/net/wireless/rtlwifi/rtl8723be/hw.h
@@ -59,6 +59,4 @@ void rtl8723be_bt_reg_init(struct ieee80211_hw *hw);
void rtl8723be_bt_hw_init(struct ieee80211_hw *hw);
void rtl8723be_suspend(struct ieee80211_hw *hw);
void rtl8723be_resume(struct ieee80211_hw *hw);
-void rtl8723be_allow_all_destaddr(struct ieee80211_hw *hw, bool allow_all_da,
- bool write_into_reg);
#endif
diff --git a/drivers/net/wireless/rtlwifi/rtl8723be/sw.c b/drivers/net/wireless/rtlwifi/rtl8723be/sw.c
index b4577ebc4bb0..ff12bf41644b 100644
--- a/drivers/net/wireless/rtlwifi/rtl8723be/sw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8723be/sw.c
@@ -92,7 +92,7 @@ int rtl8723be_init_sw_vars(struct ieee80211_hw *hw)
struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
rtl8723be_bt_reg_init(hw);
- rtlpci->msi_support = true;
+ rtlpci->msi_support = rtlpriv->cfg->mod_params->msi_support;
rtlpriv->btcoexist.btc_ops = rtl_btc_get_ops_pointer();
rtlpriv->dm.dm_initialgain_enable = 1;
@@ -253,6 +253,7 @@ static struct rtl_mod_params rtl8723be_mod_params = {
.inactiveps = true,
.swctrl_lps = false,
.fwctrl_lps = true,
+ .msi_support = false,
.debug = DBG_EMERG,
};
@@ -365,9 +366,11 @@ module_param_named(debug, rtl8723be_mod_params.debug, int, 0444);
module_param_named(ips, rtl8723be_mod_params.inactiveps, bool, 0444);
module_param_named(swlps, rtl8723be_mod_params.swctrl_lps, bool, 0444);
module_param_named(fwlps, rtl8723be_mod_params.fwctrl_lps, bool, 0444);
+module_param_named(msi, rtl8723be_mod_params.msi_support, bool, 0444);
MODULE_PARM_DESC(swenc, "using hardware crypto (default 0 [hardware])\n");
MODULE_PARM_DESC(ips, "using no link power save (default 1 is open)\n");
MODULE_PARM_DESC(fwlps, "using linked fw control power save (default 1 is open)\n");
+MODULE_PARM_DESC(msi, "Set to 1 to use MSI interrupts mode (default 0)\n");
MODULE_PARM_DESC(debug, "Set debug level (0-5) (default 0)");
static SIMPLE_DEV_PM_OPS(rtlwifi_pm_ops, rtl_pci_suspend, rtl_pci_resume);
diff --git a/drivers/net/wireless/rtlwifi/rtl8723be/trx.c b/drivers/net/wireless/rtlwifi/rtl8723be/trx.c
index e0a0d8c8fed5..969eaea5eddd 100644
--- a/drivers/net/wireless/rtlwifi/rtl8723be/trx.c
+++ b/drivers/net/wireless/rtlwifi/rtl8723be/trx.c
@@ -33,7 +33,6 @@
#include "trx.h"
#include "led.h"
#include "dm.h"
-#include "phy.h"
static u8 _rtl8723be_map_hwqueue_to_fwqueue(struct sk_buff *skb, u8 hw_queue)
{
diff --git a/drivers/net/wireless/rtlwifi/wifi.h b/drivers/net/wireless/rtlwifi/wifi.h
index 6965afdf572a..407a7936d364 100644
--- a/drivers/net/wireless/rtlwifi/wifi.h
+++ b/drivers/net/wireless/rtlwifi/wifi.h
@@ -1960,8 +1960,6 @@ struct rtl_hal_ops {
u32 regaddr, u32 bitmask);
void (*set_rfreg) (struct ieee80211_hw *hw, enum radio_path rfpath,
u32 regaddr, u32 bitmask, u32 data);
- void (*allow_all_destaddr)(struct ieee80211_hw *hw,
- bool allow_all_da, bool write_into_reg);
void (*linked_set_reg) (struct ieee80211_hw *hw);
void (*chk_switch_dmdp) (struct ieee80211_hw *hw);
void (*dualmac_easy_concurrent) (struct ieee80211_hw *hw);
@@ -2030,6 +2028,10 @@ struct rtl_mod_params {
/* default: 1 = using linked fw power save */
bool fwctrl_lps;
+
+ /* default: 0 = not using MSI interrupts mode */
+ /* submodules should set their own defalut value */
+ bool msi_support;
};
struct rtl_hal_usbint_cfg {
diff --git a/drivers/net/wireless/ti/wl1251/acx.c b/drivers/net/wireless/ti/wl1251/acx.c
index 5a4ec56c83d0..5695628757ee 100644
--- a/drivers/net/wireless/ti/wl1251/acx.c
+++ b/drivers/net/wireless/ti/wl1251/acx.c
@@ -2,7 +2,6 @@
#include <linux/module.h>
#include <linux/slab.h>
-#include <linux/crc7.h>
#include "wl1251.h"
#include "reg.h"
diff --git a/drivers/net/wireless/ti/wl1251/cmd.c b/drivers/net/wireless/ti/wl1251/cmd.c
index bf1fa18b9786..ede31f048ef9 100644
--- a/drivers/net/wireless/ti/wl1251/cmd.c
+++ b/drivers/net/wireless/ti/wl1251/cmd.c
@@ -2,7 +2,6 @@
#include <linux/module.h>
#include <linux/slab.h>
-#include <linux/crc7.h>
#include <linux/etherdevice.h>
#include "wl1251.h"
diff --git a/drivers/net/wireless/ti/wl1251/event.c b/drivers/net/wireless/ti/wl1251/event.c
index db0105313745..c98630394a1a 100644
--- a/drivers/net/wireless/ti/wl1251/event.c
+++ b/drivers/net/wireless/ti/wl1251/event.c
@@ -124,11 +124,12 @@ static int wl1251_event_process(struct wl1251 *wl, struct event_mailbox *mbox)
return ret;
}
- if (wl->vif && vector & SYNCHRONIZATION_TIMEOUT_EVENT_ID) {
+ if (vector & SYNCHRONIZATION_TIMEOUT_EVENT_ID) {
wl1251_debug(DEBUG_EVENT, "SYNCHRONIZATION_TIMEOUT_EVENT");
/* indicate to the stack, that beacons have been lost */
- ieee80211_beacon_loss(wl->vif);
+ if (wl->vif && wl->vif->type == NL80211_IFTYPE_STATION)
+ ieee80211_beacon_loss(wl->vif);
}
if (vector & REGAINED_BSS_EVENT_ID) {
diff --git a/drivers/net/wireless/ti/wl1251/main.c b/drivers/net/wireless/ti/wl1251/main.c
index 757e25784a8a..4e782f18ae34 100644
--- a/drivers/net/wireless/ti/wl1251/main.c
+++ b/drivers/net/wireless/ti/wl1251/main.c
@@ -550,6 +550,34 @@ static void wl1251_op_remove_interface(struct ieee80211_hw *hw,
mutex_unlock(&wl->mutex);
}
+static int wl1251_build_null_data(struct wl1251 *wl)
+{
+ struct sk_buff *skb = NULL;
+ int size;
+ void *ptr;
+ int ret = -ENOMEM;
+
+ if (wl->bss_type == BSS_TYPE_IBSS) {
+ size = sizeof(struct wl12xx_null_data_template);
+ ptr = NULL;
+ } else {
+ skb = ieee80211_nullfunc_get(wl->hw, wl->vif);
+ if (!skb)
+ goto out;
+ size = skb->len;
+ ptr = skb->data;
+ }
+
+ ret = wl1251_cmd_template_set(wl, CMD_NULL_DATA, ptr, size);
+
+out:
+ dev_kfree_skb(skb);
+ if (ret)
+ wl1251_warning("cmd buld null data failed: %d", ret);
+
+ return ret;
+}
+
static int wl1251_build_qos_null_data(struct wl1251 *wl)
{
struct ieee80211_qos_hdr template;
@@ -687,16 +715,6 @@ static int wl1251_op_config(struct ieee80211_hw *hw, u32 changed)
wl->power_level = conf->power_level;
}
- /*
- * Tell stack that connection is lost because hw encryption isn't
- * supported in monitor mode.
- * This requires temporary enabling of the hw connection monitor flag
- */
- if ((changed & IEEE80211_CONF_CHANGE_MONITOR) && wl->vif) {
- wl->hw->flags |= IEEE80211_HW_CONNECTION_MONITOR;
- ieee80211_connection_loss(wl->vif);
- }
-
out_sleep:
wl1251_ps_elp_sleep(wl);
@@ -1103,24 +1121,19 @@ static void wl1251_op_bss_info_changed(struct ieee80211_hw *hw,
wl->rssi_thold = bss_conf->cqm_rssi_thold;
}
- if (changed & BSS_CHANGED_BSSID) {
+ if ((changed & BSS_CHANGED_BSSID) &&
+ memcmp(wl->bssid, bss_conf->bssid, ETH_ALEN)) {
memcpy(wl->bssid, bss_conf->bssid, ETH_ALEN);
- skb = ieee80211_nullfunc_get(wl->hw, wl->vif);
- if (!skb)
- goto out_sleep;
-
- ret = wl1251_cmd_template_set(wl, CMD_NULL_DATA,
- skb->data, skb->len);
- dev_kfree_skb(skb);
- if (ret < 0)
- goto out_sleep;
+ if (!is_zero_ether_addr(wl->bssid)) {
+ ret = wl1251_build_null_data(wl);
+ if (ret < 0)
+ goto out_sleep;
- ret = wl1251_build_qos_null_data(wl);
- if (ret < 0)
- goto out;
+ ret = wl1251_build_qos_null_data(wl);
+ if (ret < 0)
+ goto out_sleep;
- if (wl->bss_type != BSS_TYPE_IBSS) {
ret = wl1251_join(wl, wl->bss_type, wl->channel,
wl->beacon_int, wl->dtim_period);
if (ret < 0)
@@ -1129,9 +1142,6 @@ static void wl1251_op_bss_info_changed(struct ieee80211_hw *hw,
}
if (changed & BSS_CHANGED_ASSOC) {
- /* Disable temporary enabled hw connection monitor flag */
- wl->hw->flags &= ~IEEE80211_HW_CONNECTION_MONITOR;
-
if (bss_conf->assoc) {
wl->beacon_int = bss_conf->beacon_int;
@@ -1216,8 +1226,8 @@ static void wl1251_op_bss_info_changed(struct ieee80211_hw *hw,
if (ret < 0)
goto out_sleep;
- ret = wl1251_join(wl, wl->bss_type, wl->beacon_int,
- wl->channel, wl->dtim_period);
+ ret = wl1251_join(wl, wl->bss_type, wl->channel,
+ wl->beacon_int, wl->dtim_period);
if (ret < 0)
goto out_sleep;
diff --git a/drivers/net/wireless/ti/wl1251/spi.c b/drivers/net/wireless/ti/wl1251/spi.c
index b06d36d99362..a0aa8fa72392 100644
--- a/drivers/net/wireless/ti/wl1251/spi.c
+++ b/drivers/net/wireless/ti/wl1251/spi.c
@@ -23,6 +23,7 @@
#include <linux/irq.h>
#include <linux/module.h>
#include <linux/slab.h>
+#include <linux/swab.h>
#include <linux/crc7.h>
#include <linux/spi/spi.h>
#include <linux/wl12xx.h>
@@ -83,47 +84,44 @@ static void wl1251_spi_reset(struct wl1251 *wl)
static void wl1251_spi_wake(struct wl1251 *wl)
{
- u8 crc[WSPI_INIT_CMD_CRC_LEN], *cmd;
struct spi_transfer t;
struct spi_message m;
+ u8 *cmd = kzalloc(WSPI_INIT_CMD_LEN, GFP_KERNEL);
- cmd = kzalloc(WSPI_INIT_CMD_LEN, GFP_KERNEL);
if (!cmd) {
wl1251_error("could not allocate cmd for spi init");
return;
}
- memset(crc, 0, sizeof(crc));
memset(&t, 0, sizeof(t));
spi_message_init(&m);
/* Set WSPI_INIT_COMMAND
* the data is being send from the MSB to LSB
*/
- cmd[2] = 0xff;
- cmd[3] = 0xff;
- cmd[1] = WSPI_INIT_CMD_START | WSPI_INIT_CMD_TX;
- cmd[0] = 0;
- cmd[7] = 0;
- cmd[6] |= HW_ACCESS_WSPI_INIT_CMD_MASK << 3;
- cmd[6] |= HW_ACCESS_WSPI_FIXED_BUSY_LEN & WSPI_INIT_CMD_FIXEDBUSY_LEN;
+ cmd[0] = 0xff;
+ cmd[1] = 0xff;
+ cmd[2] = WSPI_INIT_CMD_START | WSPI_INIT_CMD_TX;
+ cmd[3] = 0;
+ cmd[4] = 0;
+ cmd[5] = HW_ACCESS_WSPI_INIT_CMD_MASK << 3;
+ cmd[5] |= HW_ACCESS_WSPI_FIXED_BUSY_LEN & WSPI_INIT_CMD_FIXEDBUSY_LEN;
+
+ cmd[6] = WSPI_INIT_CMD_IOD | WSPI_INIT_CMD_IP | WSPI_INIT_CMD_CS
+ | WSPI_INIT_CMD_WSPI | WSPI_INIT_CMD_WS;
if (HW_ACCESS_WSPI_FIXED_BUSY_LEN == 0)
- cmd[5] |= WSPI_INIT_CMD_DIS_FIXEDBUSY;
+ cmd[6] |= WSPI_INIT_CMD_DIS_FIXEDBUSY;
else
- cmd[5] |= WSPI_INIT_CMD_EN_FIXEDBUSY;
-
- cmd[5] |= WSPI_INIT_CMD_IOD | WSPI_INIT_CMD_IP | WSPI_INIT_CMD_CS
- | WSPI_INIT_CMD_WSPI | WSPI_INIT_CMD_WS;
-
- crc[0] = cmd[1];
- crc[1] = cmd[0];
- crc[2] = cmd[7];
- crc[3] = cmd[6];
- crc[4] = cmd[5];
+ cmd[6] |= WSPI_INIT_CMD_EN_FIXEDBUSY;
- cmd[4] |= crc7(0, crc, WSPI_INIT_CMD_CRC_LEN) << 1;
- cmd[4] |= WSPI_INIT_CMD_END;
+ cmd[7] = crc7_be(0, cmd+2, WSPI_INIT_CMD_CRC_LEN) | WSPI_INIT_CMD_END;
+ /*
+ * The above is the logical order; it must actually be stored
+ * in the buffer byte-swapped.
+ */
+ __swab32s((u32 *)cmd);
+ __swab32s((u32 *)cmd+1);
t.tx_buf = cmd;
t.len = WSPI_INIT_CMD_LEN;
diff --git a/drivers/net/wireless/ti/wlcore/debugfs.h b/drivers/net/wireless/ti/wlcore/debugfs.h
index f7381dd69009..0f2cfb0d2a9e 100644
--- a/drivers/net/wireless/ti/wlcore/debugfs.h
+++ b/drivers/net/wireless/ti/wlcore/debugfs.h
@@ -57,7 +57,7 @@ static const struct file_operations name## _ops = { \
wl, &name## _ops); \
if (!entry || IS_ERR(entry)) \
goto err; \
- } while (0);
+ } while (0)
#define DEBUGFS_ADD_PREFIX(prefix, name, parent) \
@@ -66,7 +66,7 @@ static const struct file_operations name## _ops = { \
wl, &prefix## _## name## _ops); \
if (!entry || IS_ERR(entry)) \
goto err; \
- } while (0);
+ } while (0)
#define DEBUGFS_FWSTATS_FILE(sub, name, fmt, struct_type) \
static ssize_t sub## _ ##name## _read(struct file *file, \
diff --git a/drivers/net/wireless/ti/wlcore/main.c b/drivers/net/wireless/ti/wlcore/main.c
index e71eae353368..3d6028e62750 100644
--- a/drivers/net/wireless/ti/wlcore/main.c
+++ b/drivers/net/wireless/ti/wlcore/main.c
@@ -1416,7 +1416,7 @@ void wl1271_rx_filter_free(struct wl12xx_rx_filter *filter)
int wl1271_rx_filter_alloc_field(struct wl12xx_rx_filter *filter,
u16 offset, u8 flags,
- u8 *pattern, u8 len)
+ const u8 *pattern, u8 len)
{
struct wl12xx_rx_filter_field *field;
@@ -5184,7 +5184,8 @@ out:
mutex_unlock(&wl->mutex);
}
-static void wlcore_op_flush(struct ieee80211_hw *hw, u32 queues, bool drop)
+static void wlcore_op_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ u32 queues, bool drop)
{
struct wl1271 *wl = hw->priv;
diff --git a/drivers/net/wireless/ti/wlcore/sdio.c b/drivers/net/wireless/ti/wlcore/sdio.c
index 29ef2492951f..d3dd7bfdf3f1 100644
--- a/drivers/net/wireless/ti/wlcore/sdio.c
+++ b/drivers/net/wireless/ti/wlcore/sdio.c
@@ -217,7 +217,7 @@ static struct wl1271_if_operations sdio_ops = {
static int wl1271_probe(struct sdio_func *func,
const struct sdio_device_id *id)
{
- struct wlcore_platdev_data *pdev_data;
+ struct wlcore_platdev_data pdev_data;
struct wl12xx_sdio_glue *glue;
struct resource res[1];
mmc_pm_flag_t mmcflags;
@@ -228,16 +228,13 @@ static int wl1271_probe(struct sdio_func *func,
if (func->num != 0x02)
return -ENODEV;
- pdev_data = kzalloc(sizeof(*pdev_data), GFP_KERNEL);
- if (!pdev_data)
- goto out;
-
- pdev_data->if_ops = &sdio_ops;
+ memset(&pdev_data, 0x00, sizeof(pdev_data));
+ pdev_data.if_ops = &sdio_ops;
glue = kzalloc(sizeof(*glue), GFP_KERNEL);
if (!glue) {
dev_err(&func->dev, "can't allocate glue\n");
- goto out_free_pdev_data;
+ goto out;
}
glue->dev = &func->dev;
@@ -248,9 +245,9 @@ static int wl1271_probe(struct sdio_func *func,
/* Use block mode for transferring over one block size of data */
func->card->quirks |= MMC_QUIRK_BLKSZ_FOR_BYTE_MODE;
- pdev_data->pdata = wl12xx_get_platform_data();
- if (IS_ERR(pdev_data->pdata)) {
- ret = PTR_ERR(pdev_data->pdata);
+ pdev_data.pdata = wl12xx_get_platform_data();
+ if (IS_ERR(pdev_data.pdata)) {
+ ret = PTR_ERR(pdev_data.pdata);
dev_err(glue->dev, "missing wlan platform data: %d\n", ret);
goto out_free_glue;
}
@@ -260,7 +257,7 @@ static int wl1271_probe(struct sdio_func *func,
dev_dbg(glue->dev, "sdio PM caps = 0x%x\n", mmcflags);
if (mmcflags & MMC_PM_KEEP_POWER)
- pdev_data->pdata->pwr_in_suspend = true;
+ pdev_data.pdata->pwr_in_suspend = true;
sdio_set_drvdata(func, glue);
@@ -289,7 +286,7 @@ static int wl1271_probe(struct sdio_func *func,
memset(res, 0x00, sizeof(res));
- res[0].start = pdev_data->pdata->irq;
+ res[0].start = pdev_data.pdata->irq;
res[0].flags = IORESOURCE_IRQ;
res[0].name = "irq";
@@ -299,8 +296,8 @@ static int wl1271_probe(struct sdio_func *func,
goto out_dev_put;
}
- ret = platform_device_add_data(glue->core, pdev_data,
- sizeof(*pdev_data));
+ ret = platform_device_add_data(glue->core, &pdev_data,
+ sizeof(pdev_data));
if (ret) {
dev_err(glue->dev, "can't add platform data\n");
goto out_dev_put;
@@ -319,9 +316,6 @@ out_dev_put:
out_free_glue:
kfree(glue);
-out_free_pdev_data:
- kfree(pdev_data);
-
out:
return ret;
}
diff --git a/drivers/net/wireless/ti/wlcore/spi.c b/drivers/net/wireless/ti/wlcore/spi.c
index dbe826dd7c23..392c882b28f0 100644
--- a/drivers/net/wireless/ti/wlcore/spi.c
+++ b/drivers/net/wireless/ti/wlcore/spi.c
@@ -24,11 +24,12 @@
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/swab.h>
#include <linux/crc7.h>
#include <linux/spi/spi.h>
#include <linux/wl12xx.h>
#include <linux/platform_device.h>
-#include <linux/slab.h>
#include "wlcore.h"
#include "wl12xx_80211.h"
@@ -110,18 +111,16 @@ static void wl12xx_spi_reset(struct device *child)
static void wl12xx_spi_init(struct device *child)
{
struct wl12xx_spi_glue *glue = dev_get_drvdata(child->parent);
- u8 crc[WSPI_INIT_CMD_CRC_LEN], *cmd;
struct spi_transfer t;
struct spi_message m;
+ u8 *cmd = kzalloc(WSPI_INIT_CMD_LEN, GFP_KERNEL);
- cmd = kzalloc(WSPI_INIT_CMD_LEN, GFP_KERNEL);
if (!cmd) {
dev_err(child->parent,
"could not allocate cmd for spi init\n");
return;
}
- memset(crc, 0, sizeof(crc));
memset(&t, 0, sizeof(t));
spi_message_init(&m);
@@ -129,30 +128,29 @@ static void wl12xx_spi_init(struct device *child)
* Set WSPI_INIT_COMMAND
* the data is being send from the MSB to LSB
*/
- cmd[2] = 0xff;
- cmd[3] = 0xff;
- cmd[1] = WSPI_INIT_CMD_START | WSPI_INIT_CMD_TX;
- cmd[0] = 0;
- cmd[7] = 0;
- cmd[6] |= HW_ACCESS_WSPI_INIT_CMD_MASK << 3;
- cmd[6] |= HW_ACCESS_WSPI_FIXED_BUSY_LEN & WSPI_INIT_CMD_FIXEDBUSY_LEN;
+ cmd[0] = 0xff;
+ cmd[1] = 0xff;
+ cmd[2] = WSPI_INIT_CMD_START | WSPI_INIT_CMD_TX;
+ cmd[3] = 0;
+ cmd[4] = 0;
+ cmd[5] = HW_ACCESS_WSPI_INIT_CMD_MASK << 3;
+ cmd[5] |= HW_ACCESS_WSPI_FIXED_BUSY_LEN & WSPI_INIT_CMD_FIXEDBUSY_LEN;
+
+ cmd[6] = WSPI_INIT_CMD_IOD | WSPI_INIT_CMD_IP | WSPI_INIT_CMD_CS
+ | WSPI_INIT_CMD_WSPI | WSPI_INIT_CMD_WS;
if (HW_ACCESS_WSPI_FIXED_BUSY_LEN == 0)
- cmd[5] |= WSPI_INIT_CMD_DIS_FIXEDBUSY;
+ cmd[6] |= WSPI_INIT_CMD_DIS_FIXEDBUSY;
else
- cmd[5] |= WSPI_INIT_CMD_EN_FIXEDBUSY;
-
- cmd[5] |= WSPI_INIT_CMD_IOD | WSPI_INIT_CMD_IP | WSPI_INIT_CMD_CS
- | WSPI_INIT_CMD_WSPI | WSPI_INIT_CMD_WS;
-
- crc[0] = cmd[1];
- crc[1] = cmd[0];
- crc[2] = cmd[7];
- crc[3] = cmd[6];
- crc[4] = cmd[5];
+ cmd[6] |= WSPI_INIT_CMD_EN_FIXEDBUSY;
- cmd[4] |= crc7(0, crc, WSPI_INIT_CMD_CRC_LEN) << 1;
- cmd[4] |= WSPI_INIT_CMD_END;
+ cmd[7] = crc7_be(0, cmd+2, WSPI_INIT_CMD_CRC_LEN) | WSPI_INIT_CMD_END;
+ /*
+ * The above is the logical order; it must actually be stored
+ * in the buffer byte-swapped.
+ */
+ __swab32s((u32 *)cmd);
+ __swab32s((u32 *)cmd+1);
t.tx_buf = cmd;
t.len = WSPI_INIT_CMD_LEN;
@@ -327,27 +325,25 @@ static struct wl1271_if_operations spi_ops = {
static int wl1271_probe(struct spi_device *spi)
{
struct wl12xx_spi_glue *glue;
- struct wlcore_platdev_data *pdev_data;
+ struct wlcore_platdev_data pdev_data;
struct resource res[1];
int ret = -ENOMEM;
- pdev_data = kzalloc(sizeof(*pdev_data), GFP_KERNEL);
- if (!pdev_data)
- goto out;
+ memset(&pdev_data, 0x00, sizeof(pdev_data));
- pdev_data->pdata = dev_get_platdata(&spi->dev);
- if (!pdev_data->pdata) {
+ pdev_data.pdata = dev_get_platdata(&spi->dev);
+ if (!pdev_data.pdata) {
dev_err(&spi->dev, "no platform data\n");
ret = -ENODEV;
- goto out_free_pdev_data;
+ goto out;
}
- pdev_data->if_ops = &spi_ops;
+ pdev_data.if_ops = &spi_ops;
glue = kzalloc(sizeof(*glue), GFP_KERNEL);
if (!glue) {
dev_err(&spi->dev, "can't allocate glue\n");
- goto out_free_pdev_data;
+ goto out;
}
glue->dev = &spi->dev;
@@ -385,8 +381,8 @@ static int wl1271_probe(struct spi_device *spi)
goto out_dev_put;
}
- ret = platform_device_add_data(glue->core, pdev_data,
- sizeof(*pdev_data));
+ ret = platform_device_add_data(glue->core, &pdev_data,
+ sizeof(pdev_data));
if (ret) {
dev_err(glue->dev, "can't add platform data\n");
goto out_dev_put;
@@ -406,9 +402,6 @@ out_dev_put:
out_free_glue:
kfree(glue);
-out_free_pdev_data:
- kfree(pdev_data);
-
out:
return ret;
}
diff --git a/drivers/net/wireless/ti/wlcore/wlcore_i.h b/drivers/net/wireless/ti/wlcore/wlcore_i.h
index 756e890bc5ee..c2c34a84ff3d 100644
--- a/drivers/net/wireless/ti/wlcore/wlcore_i.h
+++ b/drivers/net/wireless/ti/wlcore/wlcore_i.h
@@ -512,8 +512,8 @@ int wl1271_recalc_rx_streaming(struct wl1271 *wl, struct wl12xx_vif *wlvif);
void wl12xx_queue_recovery_work(struct wl1271 *wl);
size_t wl12xx_copy_fwlog(struct wl1271 *wl, u8 *memblock, size_t maxlen);
int wl1271_rx_filter_alloc_field(struct wl12xx_rx_filter *filter,
- u16 offset, u8 flags,
- u8 *pattern, u8 len);
+ u16 offset, u8 flags,
+ const u8 *pattern, u8 len);
void wl1271_rx_filter_free(struct wl12xx_rx_filter *filter);
struct wl12xx_rx_filter *wl1271_rx_filter_alloc(void);
int wl1271_rx_filter_get_fields_size(struct wl12xx_rx_filter *filter);
diff --git a/drivers/net/xen-netback/common.h b/drivers/net/xen-netback/common.h
index 0d4a285cbd7e..4dd7c4a1923b 100644
--- a/drivers/net/xen-netback/common.h
+++ b/drivers/net/xen-netback/common.h
@@ -99,22 +99,43 @@ struct xenvif_rx_meta {
*/
#define XEN_NETBK_LEGACY_SLOTS_MAX XEN_NETIF_NR_SLOTS_MIN
-struct xenvif {
- /* Unique identifier for this interface. */
- domid_t domid;
- unsigned int handle;
+/* Queue name is interface name with "-qNNN" appended */
+#define QUEUE_NAME_SIZE (IFNAMSIZ + 5)
- /* Is this interface disabled? True when backend discovers
- * frontend is rogue.
+/* IRQ name is queue name with "-tx" or "-rx" appended */
+#define IRQ_NAME_SIZE (QUEUE_NAME_SIZE + 3)
+
+struct xenvif;
+
+struct xenvif_stats {
+ /* Stats fields to be updated per-queue.
+ * A subset of struct net_device_stats that contains only the
+ * fields that are updated in netback.c for each queue.
*/
- bool disabled;
+ unsigned int rx_bytes;
+ unsigned int rx_packets;
+ unsigned int tx_bytes;
+ unsigned int tx_packets;
+
+ /* Additional stats used by xenvif */
+ unsigned long rx_gso_checksum_fixup;
+ unsigned long tx_zerocopy_sent;
+ unsigned long tx_zerocopy_success;
+ unsigned long tx_zerocopy_fail;
+ unsigned long tx_frag_overflow;
+};
+
+struct xenvif_queue { /* Per-queue data for xenvif */
+ unsigned int id; /* Queue ID, 0-based */
+ char name[QUEUE_NAME_SIZE]; /* DEVNAME-qN */
+ struct xenvif *vif; /* Parent VIF */
/* Use NAPI for guest TX */
struct napi_struct napi;
/* When feature-split-event-channels = 0, tx_irq = rx_irq. */
unsigned int tx_irq;
/* Only used when feature-split-event-channels = 1 */
- char tx_irq_name[IFNAMSIZ+4]; /* DEVNAME-tx */
+ char tx_irq_name[IRQ_NAME_SIZE]; /* DEVNAME-qN-tx */
struct xen_netif_tx_back_ring tx;
struct sk_buff_head tx_queue;
struct page *mmap_pages[MAX_PENDING_REQS];
@@ -150,7 +171,7 @@ struct xenvif {
/* When feature-split-event-channels = 0, tx_irq = rx_irq. */
unsigned int rx_irq;
/* Only used when feature-split-event-channels = 1 */
- char rx_irq_name[IFNAMSIZ+4]; /* DEVNAME-rx */
+ char rx_irq_name[IRQ_NAME_SIZE]; /* DEVNAME-qN-rx */
struct xen_netif_rx_back_ring rx;
struct sk_buff_head rx_queue;
RING_IDX rx_last_skb_slots;
@@ -158,14 +179,29 @@ struct xenvif {
struct timer_list wake_queue;
- /* This array is allocated seperately as it is large */
- struct gnttab_copy *grant_copy_op;
+ struct gnttab_copy grant_copy_op[MAX_GRANT_COPY_OPS];
/* We create one meta structure per ring request we consume, so
* the maximum number is the same as the ring size.
*/
struct xenvif_rx_meta meta[XEN_NETIF_RX_RING_SIZE];
+ /* Transmit shaping: allow 'credit_bytes' every 'credit_usec'. */
+ unsigned long credit_bytes;
+ unsigned long credit_usec;
+ unsigned long remaining_credit;
+ struct timer_list credit_timeout;
+ u64 credit_window_start;
+
+ /* Statistics */
+ struct xenvif_stats stats;
+};
+
+struct xenvif {
+ /* Unique identifier for this interface. */
+ domid_t domid;
+ unsigned int handle;
+
u8 fe_dev_addr[6];
/* Frontend feature information. */
@@ -179,19 +215,13 @@ struct xenvif {
/* Internal feature information. */
u8 can_queue:1; /* can queue packets for receiver? */
- /* Transmit shaping: allow 'credit_bytes' every 'credit_usec'. */
- unsigned long credit_bytes;
- unsigned long credit_usec;
- unsigned long remaining_credit;
- struct timer_list credit_timeout;
- u64 credit_window_start;
+ /* Is this interface disabled? True when backend discovers
+ * frontend is rogue.
+ */
+ bool disabled;
- /* Statistics */
- unsigned long rx_gso_checksum_fixup;
- unsigned long tx_zerocopy_sent;
- unsigned long tx_zerocopy_success;
- unsigned long tx_zerocopy_fail;
- unsigned long tx_frag_overflow;
+ /* Queues */
+ struct xenvif_queue *queues;
/* Miscellaneous private stuff. */
struct net_device *dev;
@@ -206,7 +236,10 @@ struct xenvif *xenvif_alloc(struct device *parent,
domid_t domid,
unsigned int handle);
-int xenvif_connect(struct xenvif *vif, unsigned long tx_ring_ref,
+int xenvif_init_queue(struct xenvif_queue *queue);
+void xenvif_deinit_queue(struct xenvif_queue *queue);
+
+int xenvif_connect(struct xenvif_queue *queue, unsigned long tx_ring_ref,
unsigned long rx_ring_ref, unsigned int tx_evtchn,
unsigned int rx_evtchn);
void xenvif_disconnect(struct xenvif *vif);
@@ -217,44 +250,47 @@ void xenvif_xenbus_fini(void);
int xenvif_schedulable(struct xenvif *vif);
-int xenvif_must_stop_queue(struct xenvif *vif);
+int xenvif_must_stop_queue(struct xenvif_queue *queue);
+
+int xenvif_queue_stopped(struct xenvif_queue *queue);
+void xenvif_wake_queue(struct xenvif_queue *queue);
/* (Un)Map communication rings. */
-void xenvif_unmap_frontend_rings(struct xenvif *vif);
-int xenvif_map_frontend_rings(struct xenvif *vif,
+void xenvif_unmap_frontend_rings(struct xenvif_queue *queue);
+int xenvif_map_frontend_rings(struct xenvif_queue *queue,
grant_ref_t tx_ring_ref,
grant_ref_t rx_ring_ref);
/* Check for SKBs from frontend and schedule backend processing */
-void xenvif_napi_schedule_or_enable_events(struct xenvif *vif);
+void xenvif_napi_schedule_or_enable_events(struct xenvif_queue *queue);
/* Prevent the device from generating any further traffic. */
void xenvif_carrier_off(struct xenvif *vif);
-int xenvif_tx_action(struct xenvif *vif, int budget);
+int xenvif_tx_action(struct xenvif_queue *queue, int budget);
int xenvif_kthread_guest_rx(void *data);
-void xenvif_kick_thread(struct xenvif *vif);
+void xenvif_kick_thread(struct xenvif_queue *queue);
int xenvif_dealloc_kthread(void *data);
/* Determine whether the needed number of slots (req) are available,
* and set req_event if not.
*/
-bool xenvif_rx_ring_slots_available(struct xenvif *vif, int needed);
+bool xenvif_rx_ring_slots_available(struct xenvif_queue *queue, int needed);
-void xenvif_stop_queue(struct xenvif *vif);
+void xenvif_carrier_on(struct xenvif *vif);
/* Callback from stack when TX packet can be released */
void xenvif_zerocopy_callback(struct ubuf_info *ubuf, bool zerocopy_success);
/* Unmap a pending page and release it back to the guest */
-void xenvif_idx_unmap(struct xenvif *vif, u16 pending_idx);
+void xenvif_idx_unmap(struct xenvif_queue *queue, u16 pending_idx);
-static inline pending_ring_idx_t nr_pending_reqs(struct xenvif *vif)
+static inline pending_ring_idx_t nr_pending_reqs(struct xenvif_queue *queue)
{
return MAX_PENDING_REQS -
- vif->pending_prod + vif->pending_cons;
+ queue->pending_prod + queue->pending_cons;
}
/* Callback from stack when TX packet can be released */
@@ -264,5 +300,6 @@ extern bool separate_tx_rx_irq;
extern unsigned int rx_drain_timeout_msecs;
extern unsigned int rx_drain_timeout_jiffies;
+extern unsigned int xenvif_max_queues;
#endif /* __XEN_NETBACK__COMMON_H__ */
diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c
index 20e9defa1060..852da34b8961 100644
--- a/drivers/net/xen-netback/interface.c
+++ b/drivers/net/xen-netback/interface.c
@@ -43,6 +43,16 @@
#define XENVIF_QUEUE_LENGTH 32
#define XENVIF_NAPI_WEIGHT 64
+static inline void xenvif_stop_queue(struct xenvif_queue *queue)
+{
+ struct net_device *dev = queue->vif->dev;
+
+ if (!queue->vif->can_queue)
+ return;
+
+ netif_tx_stop_queue(netdev_get_tx_queue(dev, queue->id));
+}
+
int xenvif_schedulable(struct xenvif *vif)
{
return netif_running(vif->dev) && netif_carrier_ok(vif->dev);
@@ -50,33 +60,34 @@ int xenvif_schedulable(struct xenvif *vif)
static irqreturn_t xenvif_tx_interrupt(int irq, void *dev_id)
{
- struct xenvif *vif = dev_id;
+ struct xenvif_queue *queue = dev_id;
- if (RING_HAS_UNCONSUMED_REQUESTS(&vif->tx))
- napi_schedule(&vif->napi);
+ if (RING_HAS_UNCONSUMED_REQUESTS(&queue->tx))
+ napi_schedule(&queue->napi);
return IRQ_HANDLED;
}
-static int xenvif_poll(struct napi_struct *napi, int budget)
+int xenvif_poll(struct napi_struct *napi, int budget)
{
- struct xenvif *vif = container_of(napi, struct xenvif, napi);
+ struct xenvif_queue *queue =
+ container_of(napi, struct xenvif_queue, napi);
int work_done;
/* This vif is rogue, we pretend we've there is nothing to do
* for this vif to deschedule it from NAPI. But this interface
* will be turned off in thread context later.
*/
- if (unlikely(vif->disabled)) {
+ if (unlikely(queue->vif->disabled)) {
napi_complete(napi);
return 0;
}
- work_done = xenvif_tx_action(vif, budget);
+ work_done = xenvif_tx_action(queue, budget);
if (work_done < budget) {
napi_complete(napi);
- xenvif_napi_schedule_or_enable_events(vif);
+ xenvif_napi_schedule_or_enable_events(queue);
}
return work_done;
@@ -84,9 +95,9 @@ static int xenvif_poll(struct napi_struct *napi, int budget)
static irqreturn_t xenvif_rx_interrupt(int irq, void *dev_id)
{
- struct xenvif *vif = dev_id;
+ struct xenvif_queue *queue = dev_id;
- xenvif_kick_thread(vif);
+ xenvif_kick_thread(queue);
return IRQ_HANDLED;
}
@@ -99,28 +110,80 @@ static irqreturn_t xenvif_interrupt(int irq, void *dev_id)
return IRQ_HANDLED;
}
-static void xenvif_wake_queue(unsigned long data)
+int xenvif_queue_stopped(struct xenvif_queue *queue)
{
- struct xenvif *vif = (struct xenvif *)data;
+ struct net_device *dev = queue->vif->dev;
+ unsigned int id = queue->id;
+ return netif_tx_queue_stopped(netdev_get_tx_queue(dev, id));
+}
- if (netif_queue_stopped(vif->dev)) {
- netdev_err(vif->dev, "draining TX queue\n");
- vif->rx_queue_purge = true;
- xenvif_kick_thread(vif);
- netif_wake_queue(vif->dev);
+void xenvif_wake_queue(struct xenvif_queue *queue)
+{
+ struct net_device *dev = queue->vif->dev;
+ unsigned int id = queue->id;
+ netif_tx_wake_queue(netdev_get_tx_queue(dev, id));
+}
+
+/* Callback to wake the queue and drain it on timeout */
+static void xenvif_wake_queue_callback(unsigned long data)
+{
+ struct xenvif_queue *queue = (struct xenvif_queue *)data;
+
+ if (xenvif_queue_stopped(queue)) {
+ netdev_err(queue->vif->dev, "draining TX queue\n");
+ queue->rx_queue_purge = true;
+ xenvif_kick_thread(queue);
+ xenvif_wake_queue(queue);
}
}
+static u16 xenvif_select_queue(struct net_device *dev, struct sk_buff *skb,
+ void *accel_priv, select_queue_fallback_t fallback)
+{
+ unsigned int num_queues = dev->real_num_tx_queues;
+ u32 hash;
+ u16 queue_index;
+
+ /* First, check if there is only one queue to optimise the
+ * single-queue or old frontend scenario.
+ */
+ if (num_queues == 1) {
+ queue_index = 0;
+ } else {
+ /* Use skb_get_hash to obtain an L4 hash if available */
+ hash = skb_get_hash(skb);
+ queue_index = hash % num_queues;
+ }
+
+ return queue_index;
+}
+
static int xenvif_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct xenvif *vif = netdev_priv(dev);
+ struct xenvif_queue *queue = NULL;
+ unsigned int num_queues = dev->real_num_tx_queues;
+ u16 index;
int min_slots_needed;
BUG_ON(skb->dev != dev);
- /* Drop the packet if vif is not ready */
- if (vif->task == NULL ||
- vif->dealloc_task == NULL ||
+ /* Drop the packet if queues are not set up */
+ if (num_queues < 1)
+ goto drop;
+
+ /* Obtain the queue to be used to transmit this packet */
+ index = skb_get_queue_mapping(skb);
+ if (index >= num_queues) {
+ pr_warn_ratelimited("Invalid queue %hu for packet on interface %s\n.",
+ index, vif->dev->name);
+ index %= num_queues;
+ }
+ queue = &vif->queues[index];
+
+ /* Drop the packet if queue is not ready */
+ if (queue->task == NULL ||
+ queue->dealloc_task == NULL ||
!xenvif_schedulable(vif))
goto drop;
@@ -139,16 +202,16 @@ static int xenvif_start_xmit(struct sk_buff *skb, struct net_device *dev)
* then turn off the queue to give the ring a chance to
* drain.
*/
- if (!xenvif_rx_ring_slots_available(vif, min_slots_needed)) {
- vif->wake_queue.function = xenvif_wake_queue;
- vif->wake_queue.data = (unsigned long)vif;
- xenvif_stop_queue(vif);
- mod_timer(&vif->wake_queue,
+ if (!xenvif_rx_ring_slots_available(queue, min_slots_needed)) {
+ queue->wake_queue.function = xenvif_wake_queue_callback;
+ queue->wake_queue.data = (unsigned long)queue;
+ xenvif_stop_queue(queue);
+ mod_timer(&queue->wake_queue,
jiffies + rx_drain_timeout_jiffies);
}
- skb_queue_tail(&vif->rx_queue, skb);
- xenvif_kick_thread(vif);
+ skb_queue_tail(&queue->rx_queue, skb);
+ xenvif_kick_thread(queue);
return NETDEV_TX_OK;
@@ -161,25 +224,65 @@ static int xenvif_start_xmit(struct sk_buff *skb, struct net_device *dev)
static struct net_device_stats *xenvif_get_stats(struct net_device *dev)
{
struct xenvif *vif = netdev_priv(dev);
+ struct xenvif_queue *queue = NULL;
+ unsigned int num_queues = dev->real_num_tx_queues;
+ unsigned long rx_bytes = 0;
+ unsigned long rx_packets = 0;
+ unsigned long tx_bytes = 0;
+ unsigned long tx_packets = 0;
+ unsigned int index;
+
+ if (vif->queues == NULL)
+ goto out;
+
+ /* Aggregate tx and rx stats from each queue */
+ for (index = 0; index < num_queues; ++index) {
+ queue = &vif->queues[index];
+ rx_bytes += queue->stats.rx_bytes;
+ rx_packets += queue->stats.rx_packets;
+ tx_bytes += queue->stats.tx_bytes;
+ tx_packets += queue->stats.tx_packets;
+ }
+
+out:
+ vif->dev->stats.rx_bytes = rx_bytes;
+ vif->dev->stats.rx_packets = rx_packets;
+ vif->dev->stats.tx_bytes = tx_bytes;
+ vif->dev->stats.tx_packets = tx_packets;
+
return &vif->dev->stats;
}
static void xenvif_up(struct xenvif *vif)
{
- napi_enable(&vif->napi);
- enable_irq(vif->tx_irq);
- if (vif->tx_irq != vif->rx_irq)
- enable_irq(vif->rx_irq);
- xenvif_napi_schedule_or_enable_events(vif);
+ struct xenvif_queue *queue = NULL;
+ unsigned int num_queues = vif->dev->real_num_tx_queues;
+ unsigned int queue_index;
+
+ for (queue_index = 0; queue_index < num_queues; ++queue_index) {
+ queue = &vif->queues[queue_index];
+ napi_enable(&queue->napi);
+ enable_irq(queue->tx_irq);
+ if (queue->tx_irq != queue->rx_irq)
+ enable_irq(queue->rx_irq);
+ xenvif_napi_schedule_or_enable_events(queue);
+ }
}
static void xenvif_down(struct xenvif *vif)
{
- napi_disable(&vif->napi);
- disable_irq(vif->tx_irq);
- if (vif->tx_irq != vif->rx_irq)
- disable_irq(vif->rx_irq);
- del_timer_sync(&vif->credit_timeout);
+ struct xenvif_queue *queue = NULL;
+ unsigned int num_queues = vif->dev->real_num_tx_queues;
+ unsigned int queue_index;
+
+ for (queue_index = 0; queue_index < num_queues; ++queue_index) {
+ queue = &vif->queues[queue_index];
+ napi_disable(&queue->napi);
+ disable_irq(queue->tx_irq);
+ if (queue->tx_irq != queue->rx_irq)
+ disable_irq(queue->rx_irq);
+ del_timer_sync(&queue->credit_timeout);
+ }
}
static int xenvif_open(struct net_device *dev)
@@ -187,7 +290,7 @@ static int xenvif_open(struct net_device *dev)
struct xenvif *vif = netdev_priv(dev);
if (netif_carrier_ok(dev))
xenvif_up(vif);
- netif_start_queue(dev);
+ netif_tx_start_all_queues(dev);
return 0;
}
@@ -196,7 +299,7 @@ static int xenvif_close(struct net_device *dev)
struct xenvif *vif = netdev_priv(dev);
if (netif_carrier_ok(dev))
xenvif_down(vif);
- netif_stop_queue(dev);
+ netif_tx_stop_all_queues(dev);
return 0;
}
@@ -236,29 +339,29 @@ static const struct xenvif_stat {
} xenvif_stats[] = {
{
"rx_gso_checksum_fixup",
- offsetof(struct xenvif, rx_gso_checksum_fixup)
+ offsetof(struct xenvif_stats, rx_gso_checksum_fixup)
},
/* If (sent != success + fail), there are probably packets never
* freed up properly!
*/
{
"tx_zerocopy_sent",
- offsetof(struct xenvif, tx_zerocopy_sent),
+ offsetof(struct xenvif_stats, tx_zerocopy_sent),
},
{
"tx_zerocopy_success",
- offsetof(struct xenvif, tx_zerocopy_success),
+ offsetof(struct xenvif_stats, tx_zerocopy_success),
},
{
"tx_zerocopy_fail",
- offsetof(struct xenvif, tx_zerocopy_fail)
+ offsetof(struct xenvif_stats, tx_zerocopy_fail)
},
/* Number of packets exceeding MAX_SKB_FRAG slots. You should use
* a guest with the same MAX_SKB_FRAG
*/
{
"tx_frag_overflow",
- offsetof(struct xenvif, tx_frag_overflow)
+ offsetof(struct xenvif_stats, tx_frag_overflow)
},
};
@@ -275,11 +378,20 @@ static int xenvif_get_sset_count(struct net_device *dev, int string_set)
static void xenvif_get_ethtool_stats(struct net_device *dev,
struct ethtool_stats *stats, u64 * data)
{
- void *vif = netdev_priv(dev);
+ struct xenvif *vif = netdev_priv(dev);
+ unsigned int num_queues = dev->real_num_tx_queues;
int i;
-
- for (i = 0; i < ARRAY_SIZE(xenvif_stats); i++)
- data[i] = *(unsigned long *)(vif + xenvif_stats[i].offset);
+ unsigned int queue_index;
+ struct xenvif_stats *vif_stats;
+
+ for (i = 0; i < ARRAY_SIZE(xenvif_stats); i++) {
+ unsigned long accum = 0;
+ for (queue_index = 0; queue_index < num_queues; ++queue_index) {
+ vif_stats = &vif->queues[queue_index].stats;
+ accum += *(unsigned long *)(vif_stats + xenvif_stats[i].offset);
+ }
+ data[i] = accum;
+ }
}
static void xenvif_get_strings(struct net_device *dev, u32 stringset, u8 * data)
@@ -312,6 +424,7 @@ static const struct net_device_ops xenvif_netdev_ops = {
.ndo_fix_features = xenvif_fix_features,
.ndo_set_mac_address = eth_mac_addr,
.ndo_validate_addr = eth_validate_addr,
+ .ndo_select_queue = xenvif_select_queue,
};
struct xenvif *xenvif_alloc(struct device *parent, domid_t domid,
@@ -321,10 +434,14 @@ struct xenvif *xenvif_alloc(struct device *parent, domid_t domid,
struct net_device *dev;
struct xenvif *vif;
char name[IFNAMSIZ] = {};
- int i;
snprintf(name, IFNAMSIZ - 1, "vif%u.%u", domid, handle);
- dev = alloc_netdev(sizeof(struct xenvif), name, ether_setup);
+ /* Allocate a netdev with the max. supported number of queues.
+ * When the guest selects the desired number, it will be updated
+ * via netif_set_real_num_tx_queues().
+ */
+ dev = alloc_netdev_mq(sizeof(struct xenvif), name, ether_setup,
+ xenvif_max_queues);
if (dev == NULL) {
pr_warn("Could not allocate netdev for %s\n", name);
return ERR_PTR(-ENOMEM);
@@ -334,66 +451,28 @@ struct xenvif *xenvif_alloc(struct device *parent, domid_t domid,
vif = netdev_priv(dev);
- vif->grant_copy_op = vmalloc(sizeof(struct gnttab_copy) *
- MAX_GRANT_COPY_OPS);
- if (vif->grant_copy_op == NULL) {
- pr_warn("Could not allocate grant copy space for %s\n", name);
- free_netdev(dev);
- return ERR_PTR(-ENOMEM);
- }
-
vif->domid = domid;
vif->handle = handle;
vif->can_sg = 1;
vif->ip_csum = 1;
vif->dev = dev;
-
vif->disabled = false;
- vif->credit_bytes = vif->remaining_credit = ~0UL;
- vif->credit_usec = 0UL;
- init_timer(&vif->credit_timeout);
- vif->credit_window_start = get_jiffies_64();
-
- init_timer(&vif->wake_queue);
+ /* Start out with no queues. The call below does not require
+ * rtnl_lock() as it happens before register_netdev().
+ */
+ vif->queues = NULL;
+ netif_set_real_num_tx_queues(dev, 0);
dev->netdev_ops = &xenvif_netdev_ops;
dev->hw_features = NETIF_F_SG |
NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
NETIF_F_TSO | NETIF_F_TSO6;
dev->features = dev->hw_features | NETIF_F_RXCSUM;
- SET_ETHTOOL_OPS(dev, &xenvif_ethtool_ops);
+ dev->ethtool_ops = &xenvif_ethtool_ops;
dev->tx_queue_len = XENVIF_QUEUE_LENGTH;
- skb_queue_head_init(&vif->rx_queue);
- skb_queue_head_init(&vif->tx_queue);
-
- vif->pending_cons = 0;
- vif->pending_prod = MAX_PENDING_REQS;
- for (i = 0; i < MAX_PENDING_REQS; i++)
- vif->pending_ring[i] = i;
- spin_lock_init(&vif->callback_lock);
- spin_lock_init(&vif->response_lock);
- /* If ballooning is disabled, this will consume real memory, so you
- * better enable it. The long term solution would be to use just a
- * bunch of valid page descriptors, without dependency on ballooning
- */
- err = alloc_xenballooned_pages(MAX_PENDING_REQS,
- vif->mmap_pages,
- false);
- if (err) {
- netdev_err(dev, "Could not reserve mmap_pages\n");
- return ERR_PTR(-ENOMEM);
- }
- for (i = 0; i < MAX_PENDING_REQS; i++) {
- vif->pending_tx_info[i].callback_struct = (struct ubuf_info)
- { .callback = xenvif_zerocopy_callback,
- .ctx = NULL,
- .desc = i };
- vif->grant_tx_handle[i] = NETBACK_INVALID_HANDLE;
- }
-
/*
* Initialise a dummy MAC address. We choose the numerically
* largest non-broadcast address to prevent the address getting
@@ -403,8 +482,6 @@ struct xenvif *xenvif_alloc(struct device *parent, domid_t domid,
memset(dev->dev_addr, 0xFF, ETH_ALEN);
dev->dev_addr[0] &= ~0x01;
- netif_napi_add(dev, &vif->napi, xenvif_poll, XENVIF_NAPI_WEIGHT);
-
netif_carrier_off(dev);
err = register_netdev(dev);
@@ -421,98 +498,147 @@ struct xenvif *xenvif_alloc(struct device *parent, domid_t domid,
return vif;
}
-int xenvif_connect(struct xenvif *vif, unsigned long tx_ring_ref,
+int xenvif_init_queue(struct xenvif_queue *queue)
+{
+ int err, i;
+
+ queue->credit_bytes = queue->remaining_credit = ~0UL;
+ queue->credit_usec = 0UL;
+ init_timer(&queue->credit_timeout);
+ queue->credit_window_start = get_jiffies_64();
+
+ skb_queue_head_init(&queue->rx_queue);
+ skb_queue_head_init(&queue->tx_queue);
+
+ queue->pending_cons = 0;
+ queue->pending_prod = MAX_PENDING_REQS;
+ for (i = 0; i < MAX_PENDING_REQS; ++i)
+ queue->pending_ring[i] = i;
+
+ spin_lock_init(&queue->callback_lock);
+ spin_lock_init(&queue->response_lock);
+
+ /* If ballooning is disabled, this will consume real memory, so you
+ * better enable it. The long term solution would be to use just a
+ * bunch of valid page descriptors, without dependency on ballooning
+ */
+ err = alloc_xenballooned_pages(MAX_PENDING_REQS,
+ queue->mmap_pages,
+ false);
+ if (err) {
+ netdev_err(queue->vif->dev, "Could not reserve mmap_pages\n");
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < MAX_PENDING_REQS; i++) {
+ queue->pending_tx_info[i].callback_struct = (struct ubuf_info)
+ { .callback = xenvif_zerocopy_callback,
+ .ctx = NULL,
+ .desc = i };
+ queue->grant_tx_handle[i] = NETBACK_INVALID_HANDLE;
+ }
+
+ init_timer(&queue->wake_queue);
+
+ netif_napi_add(queue->vif->dev, &queue->napi, xenvif_poll,
+ XENVIF_NAPI_WEIGHT);
+
+ return 0;
+}
+
+void xenvif_carrier_on(struct xenvif *vif)
+{
+ rtnl_lock();
+ if (!vif->can_sg && vif->dev->mtu > ETH_DATA_LEN)
+ dev_set_mtu(vif->dev, ETH_DATA_LEN);
+ netdev_update_features(vif->dev);
+ netif_carrier_on(vif->dev);
+ if (netif_running(vif->dev))
+ xenvif_up(vif);
+ rtnl_unlock();
+}
+
+int xenvif_connect(struct xenvif_queue *queue, unsigned long tx_ring_ref,
unsigned long rx_ring_ref, unsigned int tx_evtchn,
unsigned int rx_evtchn)
{
struct task_struct *task;
int err = -ENOMEM;
- BUG_ON(vif->tx_irq);
- BUG_ON(vif->task);
- BUG_ON(vif->dealloc_task);
+ BUG_ON(queue->tx_irq);
+ BUG_ON(queue->task);
+ BUG_ON(queue->dealloc_task);
- err = xenvif_map_frontend_rings(vif, tx_ring_ref, rx_ring_ref);
+ err = xenvif_map_frontend_rings(queue, tx_ring_ref, rx_ring_ref);
if (err < 0)
goto err;
- init_waitqueue_head(&vif->wq);
- init_waitqueue_head(&vif->dealloc_wq);
+ init_waitqueue_head(&queue->wq);
+ init_waitqueue_head(&queue->dealloc_wq);
if (tx_evtchn == rx_evtchn) {
/* feature-split-event-channels == 0 */
err = bind_interdomain_evtchn_to_irqhandler(
- vif->domid, tx_evtchn, xenvif_interrupt, 0,
- vif->dev->name, vif);
+ queue->vif->domid, tx_evtchn, xenvif_interrupt, 0,
+ queue->name, queue);
if (err < 0)
goto err_unmap;
- vif->tx_irq = vif->rx_irq = err;
- disable_irq(vif->tx_irq);
+ queue->tx_irq = queue->rx_irq = err;
+ disable_irq(queue->tx_irq);
} else {
/* feature-split-event-channels == 1 */
- snprintf(vif->tx_irq_name, sizeof(vif->tx_irq_name),
- "%s-tx", vif->dev->name);
+ snprintf(queue->tx_irq_name, sizeof(queue->tx_irq_name),
+ "%s-tx", queue->name);
err = bind_interdomain_evtchn_to_irqhandler(
- vif->domid, tx_evtchn, xenvif_tx_interrupt, 0,
- vif->tx_irq_name, vif);
+ queue->vif->domid, tx_evtchn, xenvif_tx_interrupt, 0,
+ queue->tx_irq_name, queue);
if (err < 0)
goto err_unmap;
- vif->tx_irq = err;
- disable_irq(vif->tx_irq);
+ queue->tx_irq = err;
+ disable_irq(queue->tx_irq);
- snprintf(vif->rx_irq_name, sizeof(vif->rx_irq_name),
- "%s-rx", vif->dev->name);
+ snprintf(queue->rx_irq_name, sizeof(queue->rx_irq_name),
+ "%s-rx", queue->name);
err = bind_interdomain_evtchn_to_irqhandler(
- vif->domid, rx_evtchn, xenvif_rx_interrupt, 0,
- vif->rx_irq_name, vif);
+ queue->vif->domid, rx_evtchn, xenvif_rx_interrupt, 0,
+ queue->rx_irq_name, queue);
if (err < 0)
goto err_tx_unbind;
- vif->rx_irq = err;
- disable_irq(vif->rx_irq);
+ queue->rx_irq = err;
+ disable_irq(queue->rx_irq);
}
task = kthread_create(xenvif_kthread_guest_rx,
- (void *)vif, "%s-guest-rx", vif->dev->name);
+ (void *)queue, "%s-guest-rx", queue->name);
if (IS_ERR(task)) {
- pr_warn("Could not allocate kthread for %s\n", vif->dev->name);
+ pr_warn("Could not allocate kthread for %s\n", queue->name);
err = PTR_ERR(task);
goto err_rx_unbind;
}
-
- vif->task = task;
+ queue->task = task;
task = kthread_create(xenvif_dealloc_kthread,
- (void *)vif, "%s-dealloc", vif->dev->name);
+ (void *)queue, "%s-dealloc", queue->name);
if (IS_ERR(task)) {
- pr_warn("Could not allocate kthread for %s\n", vif->dev->name);
+ pr_warn("Could not allocate kthread for %s\n", queue->name);
err = PTR_ERR(task);
goto err_rx_unbind;
}
+ queue->dealloc_task = task;
- vif->dealloc_task = task;
-
- rtnl_lock();
- if (!vif->can_sg && vif->dev->mtu > ETH_DATA_LEN)
- dev_set_mtu(vif->dev, ETH_DATA_LEN);
- netdev_update_features(vif->dev);
- netif_carrier_on(vif->dev);
- if (netif_running(vif->dev))
- xenvif_up(vif);
- rtnl_unlock();
-
- wake_up_process(vif->task);
- wake_up_process(vif->dealloc_task);
+ wake_up_process(queue->task);
+ wake_up_process(queue->dealloc_task);
return 0;
err_rx_unbind:
- unbind_from_irqhandler(vif->rx_irq, vif);
- vif->rx_irq = 0;
+ unbind_from_irqhandler(queue->rx_irq, queue);
+ queue->rx_irq = 0;
err_tx_unbind:
- unbind_from_irqhandler(vif->tx_irq, vif);
- vif->tx_irq = 0;
+ unbind_from_irqhandler(queue->tx_irq, queue);
+ queue->tx_irq = 0;
err_unmap:
- xenvif_unmap_frontend_rings(vif);
+ xenvif_unmap_frontend_rings(queue);
err:
module_put(THIS_MODULE);
return err;
@@ -529,38 +655,77 @@ void xenvif_carrier_off(struct xenvif *vif)
rtnl_unlock();
}
+static void xenvif_wait_unmap_timeout(struct xenvif_queue *queue,
+ unsigned int worst_case_skb_lifetime)
+{
+ int i, unmap_timeout = 0;
+
+ for (i = 0; i < MAX_PENDING_REQS; ++i) {
+ if (queue->grant_tx_handle[i] != NETBACK_INVALID_HANDLE) {
+ unmap_timeout++;
+ schedule_timeout(msecs_to_jiffies(1000));
+ if (unmap_timeout > worst_case_skb_lifetime &&
+ net_ratelimit())
+ netdev_err(queue->vif->dev,
+ "Page still granted! Index: %x\n",
+ i);
+ i = -1;
+ }
+ }
+}
+
void xenvif_disconnect(struct xenvif *vif)
{
+ struct xenvif_queue *queue = NULL;
+ unsigned int num_queues = vif->dev->real_num_tx_queues;
+ unsigned int queue_index;
+
if (netif_carrier_ok(vif->dev))
xenvif_carrier_off(vif);
- if (vif->task) {
- del_timer_sync(&vif->wake_queue);
- kthread_stop(vif->task);
- vif->task = NULL;
- }
+ for (queue_index = 0; queue_index < num_queues; ++queue_index) {
+ queue = &vif->queues[queue_index];
- if (vif->dealloc_task) {
- kthread_stop(vif->dealloc_task);
- vif->dealloc_task = NULL;
- }
+ if (queue->task) {
+ del_timer_sync(&queue->wake_queue);
+ kthread_stop(queue->task);
+ queue->task = NULL;
+ }
- if (vif->tx_irq) {
- if (vif->tx_irq == vif->rx_irq)
- unbind_from_irqhandler(vif->tx_irq, vif);
- else {
- unbind_from_irqhandler(vif->tx_irq, vif);
- unbind_from_irqhandler(vif->rx_irq, vif);
+ if (queue->dealloc_task) {
+ kthread_stop(queue->dealloc_task);
+ queue->dealloc_task = NULL;
}
- vif->tx_irq = 0;
+
+ if (queue->tx_irq) {
+ if (queue->tx_irq == queue->rx_irq)
+ unbind_from_irqhandler(queue->tx_irq, queue);
+ else {
+ unbind_from_irqhandler(queue->tx_irq, queue);
+ unbind_from_irqhandler(queue->rx_irq, queue);
+ }
+ queue->tx_irq = 0;
+ }
+
+ xenvif_unmap_frontend_rings(queue);
}
+}
- xenvif_unmap_frontend_rings(vif);
+/* Reverse the relevant parts of xenvif_init_queue().
+ * Used for queue teardown from xenvif_free(), and on the
+ * error handling paths in xenbus.c:connect().
+ */
+void xenvif_deinit_queue(struct xenvif_queue *queue)
+{
+ free_xenballooned_pages(MAX_PENDING_REQS, queue->mmap_pages);
+ netif_napi_del(&queue->napi);
}
void xenvif_free(struct xenvif *vif)
{
- int i, unmap_timeout = 0;
+ struct xenvif_queue *queue = NULL;
+ unsigned int num_queues = vif->dev->real_num_tx_queues;
+ unsigned int queue_index;
/* Here we want to avoid timeout messages if an skb can be legitimately
* stuck somewhere else. Realistically this could be an another vif's
* internal or QDisc queue. That another vif also has this
@@ -575,33 +740,21 @@ void xenvif_free(struct xenvif *vif)
unsigned int worst_case_skb_lifetime = (rx_drain_timeout_msecs/1000) *
DIV_ROUND_UP(XENVIF_QUEUE_LENGTH, (XEN_NETIF_RX_RING_SIZE / MAX_SKB_FRAGS));
- for (i = 0; i < MAX_PENDING_REQS; ++i) {
- if (vif->grant_tx_handle[i] != NETBACK_INVALID_HANDLE) {
- unmap_timeout++;
- schedule_timeout(msecs_to_jiffies(1000));
- if (unmap_timeout > worst_case_skb_lifetime &&
- net_ratelimit())
- netdev_err(vif->dev,
- "Page still granted! Index: %x\n",
- i);
- /* If there are still unmapped pages, reset the loop to
- * start checking again. We shouldn't exit here until
- * dealloc thread and NAPI instance release all the
- * pages. If a kernel bug causes the skbs to stall
- * somewhere, the interface cannot be brought down
- * properly.
- */
- i = -1;
- }
- }
-
- free_xenballooned_pages(MAX_PENDING_REQS, vif->mmap_pages);
+ unregister_netdev(vif->dev);
- netif_napi_del(&vif->napi);
+ for (queue_index = 0; queue_index < num_queues; ++queue_index) {
+ queue = &vif->queues[queue_index];
+ xenvif_wait_unmap_timeout(queue, worst_case_skb_lifetime);
+ xenvif_deinit_queue(queue);
+ }
- unregister_netdev(vif->dev);
+ /* Free the array of queues. The call below does not require
+ * rtnl_lock() because it happens after unregister_netdev().
+ */
+ netif_set_real_num_tx_queues(vif->dev, 0);
+ vfree(vif->queues);
+ vif->queues = NULL;
- vfree(vif->grant_copy_op);
free_netdev(vif->dev);
module_put(THIS_MODULE);
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
index 7367208ee8cd..1844a47636b6 100644
--- a/drivers/net/xen-netback/netback.c
+++ b/drivers/net/xen-netback/netback.c
@@ -62,6 +62,11 @@ unsigned int rx_drain_timeout_msecs = 10000;
module_param(rx_drain_timeout_msecs, uint, 0444);
unsigned int rx_drain_timeout_jiffies;
+unsigned int xenvif_max_queues;
+module_param_named(max_queues, xenvif_max_queues, uint, 0644);
+MODULE_PARM_DESC(max_queues,
+ "Maximum number of queues per virtual interface");
+
/*
* This is the maximum slots a skb can have. If a guest sends a skb
* which exceeds this limit it is considered malicious.
@@ -70,33 +75,33 @@ unsigned int rx_drain_timeout_jiffies;
static unsigned int fatal_skb_slots = FATAL_SKB_SLOTS_DEFAULT;
module_param(fatal_skb_slots, uint, 0444);
-static void xenvif_idx_release(struct xenvif *vif, u16 pending_idx,
+static void xenvif_idx_release(struct xenvif_queue *queue, u16 pending_idx,
u8 status);
-static void make_tx_response(struct xenvif *vif,
+static void make_tx_response(struct xenvif_queue *queue,
struct xen_netif_tx_request *txp,
s8 st);
-static inline int tx_work_todo(struct xenvif *vif);
-static inline int rx_work_todo(struct xenvif *vif);
+static inline int tx_work_todo(struct xenvif_queue *queue);
+static inline int rx_work_todo(struct xenvif_queue *queue);
-static struct xen_netif_rx_response *make_rx_response(struct xenvif *vif,
+static struct xen_netif_rx_response *make_rx_response(struct xenvif_queue *queue,
u16 id,
s8 st,
u16 offset,
u16 size,
u16 flags);
-static inline unsigned long idx_to_pfn(struct xenvif *vif,
+static inline unsigned long idx_to_pfn(struct xenvif_queue *queue,
u16 idx)
{
- return page_to_pfn(vif->mmap_pages[idx]);
+ return page_to_pfn(queue->mmap_pages[idx]);
}
-static inline unsigned long idx_to_kaddr(struct xenvif *vif,
+static inline unsigned long idx_to_kaddr(struct xenvif_queue *queue,
u16 idx)
{
- return (unsigned long)pfn_to_kaddr(idx_to_pfn(vif, idx));
+ return (unsigned long)pfn_to_kaddr(idx_to_pfn(queue, idx));
}
#define callback_param(vif, pending_idx) \
@@ -104,13 +109,13 @@ static inline unsigned long idx_to_kaddr(struct xenvif *vif,
/* Find the containing VIF's structure from a pointer in pending_tx_info array
*/
-static inline struct xenvif *ubuf_to_vif(const struct ubuf_info *ubuf)
+static inline struct xenvif_queue *ubuf_to_queue(const struct ubuf_info *ubuf)
{
u16 pending_idx = ubuf->desc;
struct pending_tx_info *temp =
container_of(ubuf, struct pending_tx_info, callback_struct);
return container_of(temp - pending_idx,
- struct xenvif,
+ struct xenvif_queue,
pending_tx_info[0]);
}
@@ -136,24 +141,24 @@ static inline pending_ring_idx_t pending_index(unsigned i)
return i & (MAX_PENDING_REQS-1);
}
-bool xenvif_rx_ring_slots_available(struct xenvif *vif, int needed)
+bool xenvif_rx_ring_slots_available(struct xenvif_queue *queue, int needed)
{
RING_IDX prod, cons;
do {
- prod = vif->rx.sring->req_prod;
- cons = vif->rx.req_cons;
+ prod = queue->rx.sring->req_prod;
+ cons = queue->rx.req_cons;
if (prod - cons >= needed)
return true;
- vif->rx.sring->req_event = prod + 1;
+ queue->rx.sring->req_event = prod + 1;
/* Make sure event is visible before we check prod
* again.
*/
mb();
- } while (vif->rx.sring->req_prod != prod);
+ } while (queue->rx.sring->req_prod != prod);
return false;
}
@@ -163,7 +168,8 @@ bool xenvif_rx_ring_slots_available(struct xenvif *vif, int needed)
* adding 'size' bytes to a buffer which currently contains 'offset'
* bytes.
*/
-static bool start_new_rx_buffer(int offset, unsigned long size, int head)
+static bool start_new_rx_buffer(int offset, unsigned long size, int head,
+ bool full_coalesce)
{
/* simple case: we have completely filled the current buffer. */
if (offset == MAX_BUFFER_OFFSET)
@@ -175,6 +181,7 @@ static bool start_new_rx_buffer(int offset, unsigned long size, int head)
* (i) this frag would fit completely in the next buffer
* and (ii) there is already some data in the current buffer
* and (iii) this is not the head buffer.
+ * and (iv) there is no need to fully utilize the buffers
*
* Where:
* - (i) stops us splitting a frag into two copies
@@ -185,6 +192,8 @@ static bool start_new_rx_buffer(int offset, unsigned long size, int head)
* by (ii) but is explicitly checked because
* netfront relies on the first buffer being
* non-empty and can crash otherwise.
+ * - (iv) is needed for skbs which can use up more than MAX_SKB_FRAGS
+ * slot
*
* This means we will effectively linearise small
* frags but do not needlessly split large buffers
@@ -192,7 +201,8 @@ static bool start_new_rx_buffer(int offset, unsigned long size, int head)
* own buffers as before.
*/
BUG_ON(size > MAX_BUFFER_OFFSET);
- if ((offset + size > MAX_BUFFER_OFFSET) && offset && !head)
+ if ((offset + size > MAX_BUFFER_OFFSET) && offset && !head &&
+ !full_coalesce)
return true;
return false;
@@ -207,13 +217,13 @@ struct netrx_pending_operations {
grant_ref_t copy_gref;
};
-static struct xenvif_rx_meta *get_next_rx_buffer(struct xenvif *vif,
+static struct xenvif_rx_meta *get_next_rx_buffer(struct xenvif_queue *queue,
struct netrx_pending_operations *npo)
{
struct xenvif_rx_meta *meta;
struct xen_netif_rx_request *req;
- req = RING_GET_REQUEST(&vif->rx, vif->rx.req_cons++);
+ req = RING_GET_REQUEST(&queue->rx, queue->rx.req_cons++);
meta = npo->meta + npo->meta_prod++;
meta->gso_type = XEN_NETIF_GSO_TYPE_NONE;
@@ -227,15 +237,22 @@ static struct xenvif_rx_meta *get_next_rx_buffer(struct xenvif *vif,
return meta;
}
+struct xenvif_rx_cb {
+ int meta_slots_used;
+ bool full_coalesce;
+};
+
+#define XENVIF_RX_CB(skb) ((struct xenvif_rx_cb *)(skb)->cb)
+
/*
* Set up the grant operations for this fragment. If it's a flipping
* interface, we also set up the unmap request from here.
*/
-static void xenvif_gop_frag_copy(struct xenvif *vif, struct sk_buff *skb,
+static void xenvif_gop_frag_copy(struct xenvif_queue *queue, struct sk_buff *skb,
struct netrx_pending_operations *npo,
struct page *page, unsigned long size,
unsigned long offset, int *head,
- struct xenvif *foreign_vif,
+ struct xenvif_queue *foreign_queue,
grant_ref_t foreign_gref)
{
struct gnttab_copy *copy_gop;
@@ -261,14 +278,17 @@ static void xenvif_gop_frag_copy(struct xenvif *vif, struct sk_buff *skb,
if (bytes > size)
bytes = size;
- if (start_new_rx_buffer(npo->copy_off, bytes, *head)) {
+ if (start_new_rx_buffer(npo->copy_off,
+ bytes,
+ *head,
+ XENVIF_RX_CB(skb)->full_coalesce)) {
/*
* Netfront requires there to be some data in the head
* buffer.
*/
BUG_ON(*head);
- meta = get_next_rx_buffer(vif, npo);
+ meta = get_next_rx_buffer(queue, npo);
}
if (npo->copy_off + bytes > MAX_BUFFER_OFFSET)
@@ -278,8 +298,8 @@ static void xenvif_gop_frag_copy(struct xenvif *vif, struct sk_buff *skb,
copy_gop->flags = GNTCOPY_dest_gref;
copy_gop->len = bytes;
- if (foreign_vif) {
- copy_gop->source.domid = foreign_vif->domid;
+ if (foreign_queue) {
+ copy_gop->source.domid = foreign_queue->vif->domid;
copy_gop->source.u.ref = foreign_gref;
copy_gop->flags |= GNTCOPY_source_gref;
} else {
@@ -289,7 +309,7 @@ static void xenvif_gop_frag_copy(struct xenvif *vif, struct sk_buff *skb,
}
copy_gop->source.offset = offset;
- copy_gop->dest.domid = vif->domid;
+ copy_gop->dest.domid = queue->vif->domid;
copy_gop->dest.offset = npo->copy_off;
copy_gop->dest.u.ref = npo->copy_gref;
@@ -314,8 +334,8 @@ static void xenvif_gop_frag_copy(struct xenvif *vif, struct sk_buff *skb,
gso_type = XEN_NETIF_GSO_TYPE_TCPV6;
}
- if (*head && ((1 << gso_type) & vif->gso_mask))
- vif->rx.req_cons++;
+ if (*head && ((1 << gso_type) & queue->vif->gso_mask))
+ queue->rx.req_cons++;
*head = 0; /* There must be something in this buffer now. */
@@ -337,13 +357,13 @@ static const struct ubuf_info *xenvif_find_gref(const struct sk_buff *const skb,
const int i,
const struct ubuf_info *ubuf)
{
- struct xenvif *foreign_vif = ubuf_to_vif(ubuf);
+ struct xenvif_queue *foreign_queue = ubuf_to_queue(ubuf);
do {
u16 pending_idx = ubuf->desc;
if (skb_shinfo(skb)->frags[i].page.p ==
- foreign_vif->mmap_pages[pending_idx])
+ foreign_queue->mmap_pages[pending_idx])
break;
ubuf = (struct ubuf_info *) ubuf->ctx;
} while (ubuf);
@@ -364,7 +384,8 @@ static const struct ubuf_info *xenvif_find_gref(const struct sk_buff *const skb,
* frontend-side LRO).
*/
static int xenvif_gop_skb(struct sk_buff *skb,
- struct netrx_pending_operations *npo)
+ struct netrx_pending_operations *npo,
+ struct xenvif_queue *queue)
{
struct xenvif *vif = netdev_priv(skb->dev);
int nr_frags = skb_shinfo(skb)->nr_frags;
@@ -390,7 +411,7 @@ static int xenvif_gop_skb(struct sk_buff *skb,
/* Set up a GSO prefix descriptor, if necessary */
if ((1 << gso_type) & vif->gso_prefix_mask) {
- req = RING_GET_REQUEST(&vif->rx, vif->rx.req_cons++);
+ req = RING_GET_REQUEST(&queue->rx, queue->rx.req_cons++);
meta = npo->meta + npo->meta_prod++;
meta->gso_type = gso_type;
meta->gso_size = skb_shinfo(skb)->gso_size;
@@ -398,7 +419,7 @@ static int xenvif_gop_skb(struct sk_buff *skb,
meta->id = req->id;
}
- req = RING_GET_REQUEST(&vif->rx, vif->rx.req_cons++);
+ req = RING_GET_REQUEST(&queue->rx, queue->rx.req_cons++);
meta = npo->meta + npo->meta_prod++;
if ((1 << gso_type) & vif->gso_mask) {
@@ -422,7 +443,7 @@ static int xenvif_gop_skb(struct sk_buff *skb,
if (data + len > skb_tail_pointer(skb))
len = skb_tail_pointer(skb) - data;
- xenvif_gop_frag_copy(vif, skb, npo,
+ xenvif_gop_frag_copy(queue, skb, npo,
virt_to_page(data), len, offset, &head,
NULL,
0);
@@ -433,7 +454,7 @@ static int xenvif_gop_skb(struct sk_buff *skb,
/* This variable also signals whether foreign_gref has a real
* value or not.
*/
- struct xenvif *foreign_vif = NULL;
+ struct xenvif_queue *foreign_queue = NULL;
grant_ref_t foreign_gref;
if ((skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) &&
@@ -458,8 +479,9 @@ static int xenvif_gop_skb(struct sk_buff *skb,
if (likely(ubuf)) {
u16 pending_idx = ubuf->desc;
- foreign_vif = ubuf_to_vif(ubuf);
- foreign_gref = foreign_vif->pending_tx_info[pending_idx].req.gref;
+ foreign_queue = ubuf_to_queue(ubuf);
+ foreign_gref =
+ foreign_queue->pending_tx_info[pending_idx].req.gref;
/* Just a safety measure. If this was the last
* element on the list, the for loop will
* iterate again if a local page were added to
@@ -477,13 +499,13 @@ static int xenvif_gop_skb(struct sk_buff *skb,
*/
ubuf = head_ubuf;
}
- xenvif_gop_frag_copy(vif, skb, npo,
+ xenvif_gop_frag_copy(queue, skb, npo,
skb_frag_page(&skb_shinfo(skb)->frags[i]),
skb_frag_size(&skb_shinfo(skb)->frags[i]),
skb_shinfo(skb)->frags[i].page_offset,
&head,
- foreign_vif,
- foreign_vif ? foreign_gref : UINT_MAX);
+ foreign_queue,
+ foreign_queue ? foreign_gref : UINT_MAX);
}
return npo->meta_prod - old_meta_prod;
@@ -515,7 +537,7 @@ static int xenvif_check_gop(struct xenvif *vif, int nr_meta_slots,
return status;
}
-static void xenvif_add_frag_responses(struct xenvif *vif, int status,
+static void xenvif_add_frag_responses(struct xenvif_queue *queue, int status,
struct xenvif_rx_meta *meta,
int nr_meta_slots)
{
@@ -536,23 +558,17 @@ static void xenvif_add_frag_responses(struct xenvif *vif, int status,
flags = XEN_NETRXF_more_data;
offset = 0;
- make_rx_response(vif, meta[i].id, status, offset,
+ make_rx_response(queue, meta[i].id, status, offset,
meta[i].size, flags);
}
}
-struct xenvif_rx_cb {
- int meta_slots_used;
-};
-
-#define XENVIF_RX_CB(skb) ((struct xenvif_rx_cb *)(skb)->cb)
-
-void xenvif_kick_thread(struct xenvif *vif)
+void xenvif_kick_thread(struct xenvif_queue *queue)
{
- wake_up(&vif->wq);
+ wake_up(&queue->wq);
}
-static void xenvif_rx_action(struct xenvif *vif)
+static void xenvif_rx_action(struct xenvif_queue *queue)
{
s8 status;
u16 flags;
@@ -565,13 +581,13 @@ static void xenvif_rx_action(struct xenvif *vif)
bool need_to_notify = false;
struct netrx_pending_operations npo = {
- .copy = vif->grant_copy_op,
- .meta = vif->meta,
+ .copy = queue->grant_copy_op,
+ .meta = queue->meta,
};
skb_queue_head_init(&rxq);
- while ((skb = skb_dequeue(&vif->rx_queue)) != NULL) {
+ while ((skb = skb_dequeue(&queue->rx_queue)) != NULL) {
RING_IDX max_slots_needed;
RING_IDX old_req_cons;
RING_IDX ring_slots_used;
@@ -602,10 +618,15 @@ static void xenvif_rx_action(struct xenvif *vif)
/* To avoid the estimate becoming too pessimal for some
* frontends that limit posted rx requests, cap the estimate
- * at MAX_SKB_FRAGS.
+ * at MAX_SKB_FRAGS. In this case netback will fully coalesce
+ * the skb into the provided slots.
*/
- if (max_slots_needed > MAX_SKB_FRAGS)
+ if (max_slots_needed > MAX_SKB_FRAGS) {
max_slots_needed = MAX_SKB_FRAGS;
+ XENVIF_RX_CB(skb)->full_coalesce = true;
+ } else {
+ XENVIF_RX_CB(skb)->full_coalesce = false;
+ }
/* We may need one more slot for GSO metadata */
if (skb_is_gso(skb) &&
@@ -614,42 +635,42 @@ static void xenvif_rx_action(struct xenvif *vif)
max_slots_needed++;
/* If the skb may not fit then bail out now */
- if (!xenvif_rx_ring_slots_available(vif, max_slots_needed)) {
- skb_queue_head(&vif->rx_queue, skb);
+ if (!xenvif_rx_ring_slots_available(queue, max_slots_needed)) {
+ skb_queue_head(&queue->rx_queue, skb);
need_to_notify = true;
- vif->rx_last_skb_slots = max_slots_needed;
+ queue->rx_last_skb_slots = max_slots_needed;
break;
} else
- vif->rx_last_skb_slots = 0;
+ queue->rx_last_skb_slots = 0;
- old_req_cons = vif->rx.req_cons;
- XENVIF_RX_CB(skb)->meta_slots_used = xenvif_gop_skb(skb, &npo);
- ring_slots_used = vif->rx.req_cons - old_req_cons;
+ old_req_cons = queue->rx.req_cons;
+ XENVIF_RX_CB(skb)->meta_slots_used = xenvif_gop_skb(skb, &npo, queue);
+ ring_slots_used = queue->rx.req_cons - old_req_cons;
BUG_ON(ring_slots_used > max_slots_needed);
__skb_queue_tail(&rxq, skb);
}
- BUG_ON(npo.meta_prod > ARRAY_SIZE(vif->meta));
+ BUG_ON(npo.meta_prod > ARRAY_SIZE(queue->meta));
if (!npo.copy_prod)
goto done;
BUG_ON(npo.copy_prod > MAX_GRANT_COPY_OPS);
- gnttab_batch_copy(vif->grant_copy_op, npo.copy_prod);
+ gnttab_batch_copy(queue->grant_copy_op, npo.copy_prod);
while ((skb = __skb_dequeue(&rxq)) != NULL) {
- if ((1 << vif->meta[npo.meta_cons].gso_type) &
- vif->gso_prefix_mask) {
- resp = RING_GET_RESPONSE(&vif->rx,
- vif->rx.rsp_prod_pvt++);
+ if ((1 << queue->meta[npo.meta_cons].gso_type) &
+ queue->vif->gso_prefix_mask) {
+ resp = RING_GET_RESPONSE(&queue->rx,
+ queue->rx.rsp_prod_pvt++);
resp->flags = XEN_NETRXF_gso_prefix | XEN_NETRXF_more_data;
- resp->offset = vif->meta[npo.meta_cons].gso_size;
- resp->id = vif->meta[npo.meta_cons].id;
+ resp->offset = queue->meta[npo.meta_cons].gso_size;
+ resp->id = queue->meta[npo.meta_cons].id;
resp->status = XENVIF_RX_CB(skb)->meta_slots_used;
npo.meta_cons++;
@@ -657,10 +678,10 @@ static void xenvif_rx_action(struct xenvif *vif)
}
- vif->dev->stats.tx_bytes += skb->len;
- vif->dev->stats.tx_packets++;
+ queue->stats.tx_bytes += skb->len;
+ queue->stats.tx_packets++;
- status = xenvif_check_gop(vif,
+ status = xenvif_check_gop(queue->vif,
XENVIF_RX_CB(skb)->meta_slots_used,
&npo);
@@ -676,22 +697,22 @@ static void xenvif_rx_action(struct xenvif *vif)
flags |= XEN_NETRXF_data_validated;
offset = 0;
- resp = make_rx_response(vif, vif->meta[npo.meta_cons].id,
+ resp = make_rx_response(queue, queue->meta[npo.meta_cons].id,
status, offset,
- vif->meta[npo.meta_cons].size,
+ queue->meta[npo.meta_cons].size,
flags);
- if ((1 << vif->meta[npo.meta_cons].gso_type) &
- vif->gso_mask) {
+ if ((1 << queue->meta[npo.meta_cons].gso_type) &
+ queue->vif->gso_mask) {
struct xen_netif_extra_info *gso =
(struct xen_netif_extra_info *)
- RING_GET_RESPONSE(&vif->rx,
- vif->rx.rsp_prod_pvt++);
+ RING_GET_RESPONSE(&queue->rx,
+ queue->rx.rsp_prod_pvt++);
resp->flags |= XEN_NETRXF_extra_info;
- gso->u.gso.type = vif->meta[npo.meta_cons].gso_type;
- gso->u.gso.size = vif->meta[npo.meta_cons].gso_size;
+ gso->u.gso.type = queue->meta[npo.meta_cons].gso_type;
+ gso->u.gso.size = queue->meta[npo.meta_cons].gso_size;
gso->u.gso.pad = 0;
gso->u.gso.features = 0;
@@ -699,11 +720,11 @@ static void xenvif_rx_action(struct xenvif *vif)
gso->flags = 0;
}
- xenvif_add_frag_responses(vif, status,
- vif->meta + npo.meta_cons + 1,
+ xenvif_add_frag_responses(queue, status,
+ queue->meta + npo.meta_cons + 1,
XENVIF_RX_CB(skb)->meta_slots_used);
- RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&vif->rx, ret);
+ RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&queue->rx, ret);
need_to_notify |= !!ret;
@@ -713,20 +734,20 @@ static void xenvif_rx_action(struct xenvif *vif)
done:
if (need_to_notify)
- notify_remote_via_irq(vif->rx_irq);
+ notify_remote_via_irq(queue->rx_irq);
}
-void xenvif_napi_schedule_or_enable_events(struct xenvif *vif)
+void xenvif_napi_schedule_or_enable_events(struct xenvif_queue *queue)
{
int more_to_do;
- RING_FINAL_CHECK_FOR_REQUESTS(&vif->tx, more_to_do);
+ RING_FINAL_CHECK_FOR_REQUESTS(&queue->tx, more_to_do);
if (more_to_do)
- napi_schedule(&vif->napi);
+ napi_schedule(&queue->napi);
}
-static void tx_add_credit(struct xenvif *vif)
+static void tx_add_credit(struct xenvif_queue *queue)
{
unsigned long max_burst, max_credit;
@@ -734,55 +755,57 @@ static void tx_add_credit(struct xenvif *vif)
* Allow a burst big enough to transmit a jumbo packet of up to 128kB.
* Otherwise the interface can seize up due to insufficient credit.
*/
- max_burst = RING_GET_REQUEST(&vif->tx, vif->tx.req_cons)->size;
+ max_burst = RING_GET_REQUEST(&queue->tx, queue->tx.req_cons)->size;
max_burst = min(max_burst, 131072UL);
- max_burst = max(max_burst, vif->credit_bytes);
+ max_burst = max(max_burst, queue->credit_bytes);
/* Take care that adding a new chunk of credit doesn't wrap to zero. */
- max_credit = vif->remaining_credit + vif->credit_bytes;
- if (max_credit < vif->remaining_credit)
+ max_credit = queue->remaining_credit + queue->credit_bytes;
+ if (max_credit < queue->remaining_credit)
max_credit = ULONG_MAX; /* wrapped: clamp to ULONG_MAX */
- vif->remaining_credit = min(max_credit, max_burst);
+ queue->remaining_credit = min(max_credit, max_burst);
}
static void tx_credit_callback(unsigned long data)
{
- struct xenvif *vif = (struct xenvif *)data;
- tx_add_credit(vif);
- xenvif_napi_schedule_or_enable_events(vif);
+ struct xenvif_queue *queue = (struct xenvif_queue *)data;
+ tx_add_credit(queue);
+ xenvif_napi_schedule_or_enable_events(queue);
}
-static void xenvif_tx_err(struct xenvif *vif,
+static void xenvif_tx_err(struct xenvif_queue *queue,
struct xen_netif_tx_request *txp, RING_IDX end)
{
- RING_IDX cons = vif->tx.req_cons;
+ RING_IDX cons = queue->tx.req_cons;
unsigned long flags;
do {
- spin_lock_irqsave(&vif->response_lock, flags);
- make_tx_response(vif, txp, XEN_NETIF_RSP_ERROR);
- spin_unlock_irqrestore(&vif->response_lock, flags);
+ spin_lock_irqsave(&queue->response_lock, flags);
+ make_tx_response(queue, txp, XEN_NETIF_RSP_ERROR);
+ spin_unlock_irqrestore(&queue->response_lock, flags);
if (cons == end)
break;
- txp = RING_GET_REQUEST(&vif->tx, cons++);
+ txp = RING_GET_REQUEST(&queue->tx, cons++);
} while (1);
- vif->tx.req_cons = cons;
+ queue->tx.req_cons = cons;
}
static void xenvif_fatal_tx_err(struct xenvif *vif)
{
netdev_err(vif->dev, "fatal error; disabling device\n");
vif->disabled = true;
- xenvif_kick_thread(vif);
+ /* Disable the vif from queue 0's kthread */
+ if (vif->queues)
+ xenvif_kick_thread(&vif->queues[0]);
}
-static int xenvif_count_requests(struct xenvif *vif,
+static int xenvif_count_requests(struct xenvif_queue *queue,
struct xen_netif_tx_request *first,
struct xen_netif_tx_request *txp,
int work_to_do)
{
- RING_IDX cons = vif->tx.req_cons;
+ RING_IDX cons = queue->tx.req_cons;
int slots = 0;
int drop_err = 0;
int more_data;
@@ -794,10 +817,10 @@ static int xenvif_count_requests(struct xenvif *vif,
struct xen_netif_tx_request dropped_tx = { 0 };
if (slots >= work_to_do) {
- netdev_err(vif->dev,
+ netdev_err(queue->vif->dev,
"Asked for %d slots but exceeds this limit\n",
work_to_do);
- xenvif_fatal_tx_err(vif);
+ xenvif_fatal_tx_err(queue->vif);
return -ENODATA;
}
@@ -805,10 +828,10 @@ static int xenvif_count_requests(struct xenvif *vif,
* considered malicious.
*/
if (unlikely(slots >= fatal_skb_slots)) {
- netdev_err(vif->dev,
+ netdev_err(queue->vif->dev,
"Malicious frontend using %d slots, threshold %u\n",
slots, fatal_skb_slots);
- xenvif_fatal_tx_err(vif);
+ xenvif_fatal_tx_err(queue->vif);
return -E2BIG;
}
@@ -821,7 +844,7 @@ static int xenvif_count_requests(struct xenvif *vif,
*/
if (!drop_err && slots >= XEN_NETBK_LEGACY_SLOTS_MAX) {
if (net_ratelimit())
- netdev_dbg(vif->dev,
+ netdev_dbg(queue->vif->dev,
"Too many slots (%d) exceeding limit (%d), dropping packet\n",
slots, XEN_NETBK_LEGACY_SLOTS_MAX);
drop_err = -E2BIG;
@@ -830,7 +853,7 @@ static int xenvif_count_requests(struct xenvif *vif,
if (drop_err)
txp = &dropped_tx;
- memcpy(txp, RING_GET_REQUEST(&vif->tx, cons + slots),
+ memcpy(txp, RING_GET_REQUEST(&queue->tx, cons + slots),
sizeof(*txp));
/* If the guest submitted a frame >= 64 KiB then
@@ -844,7 +867,7 @@ static int xenvif_count_requests(struct xenvif *vif,
*/
if (!drop_err && txp->size > first->size) {
if (net_ratelimit())
- netdev_dbg(vif->dev,
+ netdev_dbg(queue->vif->dev,
"Invalid tx request, slot size %u > remaining size %u\n",
txp->size, first->size);
drop_err = -EIO;
@@ -854,9 +877,9 @@ static int xenvif_count_requests(struct xenvif *vif,
slots++;
if (unlikely((txp->offset + txp->size) > PAGE_SIZE)) {
- netdev_err(vif->dev, "Cross page boundary, txp->offset: %x, size: %u\n",
+ netdev_err(queue->vif->dev, "Cross page boundary, txp->offset: %x, size: %u\n",
txp->offset, txp->size);
- xenvif_fatal_tx_err(vif);
+ xenvif_fatal_tx_err(queue->vif);
return -EINVAL;
}
@@ -868,7 +891,7 @@ static int xenvif_count_requests(struct xenvif *vif,
} while (more_data);
if (drop_err) {
- xenvif_tx_err(vif, first, cons + slots);
+ xenvif_tx_err(queue, first, cons + slots);
return drop_err;
}
@@ -882,17 +905,17 @@ struct xenvif_tx_cb {
#define XENVIF_TX_CB(skb) ((struct xenvif_tx_cb *)(skb)->cb)
-static inline void xenvif_tx_create_map_op(struct xenvif *vif,
+static inline void xenvif_tx_create_map_op(struct xenvif_queue *queue,
u16 pending_idx,
struct xen_netif_tx_request *txp,
struct gnttab_map_grant_ref *mop)
{
- vif->pages_to_map[mop-vif->tx_map_ops] = vif->mmap_pages[pending_idx];
- gnttab_set_map_op(mop, idx_to_kaddr(vif, pending_idx),
+ queue->pages_to_map[mop-queue->tx_map_ops] = queue->mmap_pages[pending_idx];
+ gnttab_set_map_op(mop, idx_to_kaddr(queue, pending_idx),
GNTMAP_host_map | GNTMAP_readonly,
- txp->gref, vif->domid);
+ txp->gref, queue->vif->domid);
- memcpy(&vif->pending_tx_info[pending_idx].req, txp,
+ memcpy(&queue->pending_tx_info[pending_idx].req, txp,
sizeof(*txp));
}
@@ -913,7 +936,7 @@ static inline struct sk_buff *xenvif_alloc_skb(unsigned int size)
return skb;
}
-static struct gnttab_map_grant_ref *xenvif_get_requests(struct xenvif *vif,
+static struct gnttab_map_grant_ref *xenvif_get_requests(struct xenvif_queue *queue,
struct sk_buff *skb,
struct xen_netif_tx_request *txp,
struct gnttab_map_grant_ref *gop)
@@ -940,9 +963,9 @@ static struct gnttab_map_grant_ref *xenvif_get_requests(struct xenvif *vif,
for (shinfo->nr_frags = start; shinfo->nr_frags < nr_slots;
shinfo->nr_frags++, txp++, gop++) {
- index = pending_index(vif->pending_cons++);
- pending_idx = vif->pending_ring[index];
- xenvif_tx_create_map_op(vif, pending_idx, txp, gop);
+ index = pending_index(queue->pending_cons++);
+ pending_idx = queue->pending_ring[index];
+ xenvif_tx_create_map_op(queue, pending_idx, txp, gop);
frag_set_pending_idx(&frags[shinfo->nr_frags], pending_idx);
}
@@ -950,7 +973,7 @@ static struct gnttab_map_grant_ref *xenvif_get_requests(struct xenvif *vif,
struct sk_buff *nskb = xenvif_alloc_skb(0);
if (unlikely(nskb == NULL)) {
if (net_ratelimit())
- netdev_err(vif->dev,
+ netdev_err(queue->vif->dev,
"Can't allocate the frag_list skb.\n");
return NULL;
}
@@ -960,9 +983,9 @@ static struct gnttab_map_grant_ref *xenvif_get_requests(struct xenvif *vif,
for (shinfo->nr_frags = 0; shinfo->nr_frags < frag_overflow;
shinfo->nr_frags++, txp++, gop++) {
- index = pending_index(vif->pending_cons++);
- pending_idx = vif->pending_ring[index];
- xenvif_tx_create_map_op(vif, pending_idx, txp, gop);
+ index = pending_index(queue->pending_cons++);
+ pending_idx = queue->pending_ring[index];
+ xenvif_tx_create_map_op(queue, pending_idx, txp, gop);
frag_set_pending_idx(&frags[shinfo->nr_frags],
pending_idx);
}
@@ -973,34 +996,34 @@ static struct gnttab_map_grant_ref *xenvif_get_requests(struct xenvif *vif,
return gop;
}
-static inline void xenvif_grant_handle_set(struct xenvif *vif,
+static inline void xenvif_grant_handle_set(struct xenvif_queue *queue,
u16 pending_idx,
grant_handle_t handle)
{
- if (unlikely(vif->grant_tx_handle[pending_idx] !=
+ if (unlikely(queue->grant_tx_handle[pending_idx] !=
NETBACK_INVALID_HANDLE)) {
- netdev_err(vif->dev,
+ netdev_err(queue->vif->dev,
"Trying to overwrite active handle! pending_idx: %x\n",
pending_idx);
BUG();
}
- vif->grant_tx_handle[pending_idx] = handle;
+ queue->grant_tx_handle[pending_idx] = handle;
}
-static inline void xenvif_grant_handle_reset(struct xenvif *vif,
+static inline void xenvif_grant_handle_reset(struct xenvif_queue *queue,
u16 pending_idx)
{
- if (unlikely(vif->grant_tx_handle[pending_idx] ==
+ if (unlikely(queue->grant_tx_handle[pending_idx] ==
NETBACK_INVALID_HANDLE)) {
- netdev_err(vif->dev,
+ netdev_err(queue->vif->dev,
"Trying to unmap invalid handle! pending_idx: %x\n",
pending_idx);
BUG();
}
- vif->grant_tx_handle[pending_idx] = NETBACK_INVALID_HANDLE;
+ queue->grant_tx_handle[pending_idx] = NETBACK_INVALID_HANDLE;
}
-static int xenvif_tx_check_gop(struct xenvif *vif,
+static int xenvif_tx_check_gop(struct xenvif_queue *queue,
struct sk_buff *skb,
struct gnttab_map_grant_ref **gopp_map,
struct gnttab_copy **gopp_copy)
@@ -1017,12 +1040,12 @@ static int xenvif_tx_check_gop(struct xenvif *vif,
(*gopp_copy)++;
if (unlikely(err)) {
if (net_ratelimit())
- netdev_dbg(vif->dev,
+ netdev_dbg(queue->vif->dev,
"Grant copy of header failed! status: %d pending_idx: %u ref: %u\n",
(*gopp_copy)->status,
pending_idx,
(*gopp_copy)->source.u.ref);
- xenvif_idx_release(vif, pending_idx, XEN_NETIF_RSP_ERROR);
+ xenvif_idx_release(queue, pending_idx, XEN_NETIF_RSP_ERROR);
}
check_frags:
@@ -1035,24 +1058,24 @@ check_frags:
newerr = gop_map->status;
if (likely(!newerr)) {
- xenvif_grant_handle_set(vif,
+ xenvif_grant_handle_set(queue,
pending_idx,
gop_map->handle);
/* Had a previous error? Invalidate this fragment. */
if (unlikely(err))
- xenvif_idx_unmap(vif, pending_idx);
+ xenvif_idx_unmap(queue, pending_idx);
continue;
}
/* Error on this fragment: respond to client with an error. */
if (net_ratelimit())
- netdev_dbg(vif->dev,
+ netdev_dbg(queue->vif->dev,
"Grant map of %d. frag failed! status: %d pending_idx: %u ref: %u\n",
i,
gop_map->status,
pending_idx,
gop_map->ref);
- xenvif_idx_release(vif, pending_idx, XEN_NETIF_RSP_ERROR);
+ xenvif_idx_release(queue, pending_idx, XEN_NETIF_RSP_ERROR);
/* Not the first error? Preceding frags already invalidated. */
if (err)
@@ -1060,7 +1083,7 @@ check_frags:
/* First error: invalidate preceding fragments. */
for (j = 0; j < i; j++) {
pending_idx = frag_get_pending_idx(&shinfo->frags[j]);
- xenvif_idx_unmap(vif, pending_idx);
+ xenvif_idx_unmap(queue, pending_idx);
}
/* Remember the error: invalidate all subsequent fragments. */
@@ -1084,7 +1107,7 @@ check_frags:
shinfo = skb_shinfo(first_skb);
for (j = 0; j < shinfo->nr_frags; j++) {
pending_idx = frag_get_pending_idx(&shinfo->frags[j]);
- xenvif_idx_unmap(vif, pending_idx);
+ xenvif_idx_unmap(queue, pending_idx);
}
}
@@ -1092,7 +1115,7 @@ check_frags:
return err;
}
-static void xenvif_fill_frags(struct xenvif *vif, struct sk_buff *skb)
+static void xenvif_fill_frags(struct xenvif_queue *queue, struct sk_buff *skb)
{
struct skb_shared_info *shinfo = skb_shinfo(skb);
int nr_frags = shinfo->nr_frags;
@@ -1110,23 +1133,23 @@ static void xenvif_fill_frags(struct xenvif *vif, struct sk_buff *skb)
/* If this is not the first frag, chain it to the previous*/
if (prev_pending_idx == INVALID_PENDING_IDX)
skb_shinfo(skb)->destructor_arg =
- &callback_param(vif, pending_idx);
+ &callback_param(queue, pending_idx);
else
- callback_param(vif, prev_pending_idx).ctx =
- &callback_param(vif, pending_idx);
+ callback_param(queue, prev_pending_idx).ctx =
+ &callback_param(queue, pending_idx);
- callback_param(vif, pending_idx).ctx = NULL;
+ callback_param(queue, pending_idx).ctx = NULL;
prev_pending_idx = pending_idx;
- txp = &vif->pending_tx_info[pending_idx].req;
- page = virt_to_page(idx_to_kaddr(vif, pending_idx));
+ txp = &queue->pending_tx_info[pending_idx].req;
+ page = virt_to_page(idx_to_kaddr(queue, pending_idx));
__skb_fill_page_desc(skb, i, page, txp->offset, txp->size);
skb->len += txp->size;
skb->data_len += txp->size;
skb->truesize += txp->size;
/* Take an extra reference to offset network stack's put_page */
- get_page(vif->mmap_pages[pending_idx]);
+ get_page(queue->mmap_pages[pending_idx]);
}
/* FIXME: __skb_fill_page_desc set this to true because page->pfmemalloc
* overlaps with "index", and "mapping" is not set. I think mapping
@@ -1136,33 +1159,33 @@ static void xenvif_fill_frags(struct xenvif *vif, struct sk_buff *skb)
skb->pfmemalloc = false;
}
-static int xenvif_get_extras(struct xenvif *vif,
+static int xenvif_get_extras(struct xenvif_queue *queue,
struct xen_netif_extra_info *extras,
int work_to_do)
{
struct xen_netif_extra_info extra;
- RING_IDX cons = vif->tx.req_cons;
+ RING_IDX cons = queue->tx.req_cons;
do {
if (unlikely(work_to_do-- <= 0)) {
- netdev_err(vif->dev, "Missing extra info\n");
- xenvif_fatal_tx_err(vif);
+ netdev_err(queue->vif->dev, "Missing extra info\n");
+ xenvif_fatal_tx_err(queue->vif);
return -EBADR;
}
- memcpy(&extra, RING_GET_REQUEST(&vif->tx, cons),
+ memcpy(&extra, RING_GET_REQUEST(&queue->tx, cons),
sizeof(extra));
if (unlikely(!extra.type ||
extra.type >= XEN_NETIF_EXTRA_TYPE_MAX)) {
- vif->tx.req_cons = ++cons;
- netdev_err(vif->dev,
+ queue->tx.req_cons = ++cons;
+ netdev_err(queue->vif->dev,
"Invalid extra type: %d\n", extra.type);
- xenvif_fatal_tx_err(vif);
+ xenvif_fatal_tx_err(queue->vif);
return -EINVAL;
}
memcpy(&extras[extra.type - 1], &extra, sizeof(extra));
- vif->tx.req_cons = ++cons;
+ queue->tx.req_cons = ++cons;
} while (extra.flags & XEN_NETIF_EXTRA_FLAG_MORE);
return work_to_do;
@@ -1197,7 +1220,7 @@ static int xenvif_set_skb_gso(struct xenvif *vif,
return 0;
}
-static int checksum_setup(struct xenvif *vif, struct sk_buff *skb)
+static int checksum_setup(struct xenvif_queue *queue, struct sk_buff *skb)
{
bool recalculate_partial_csum = false;
@@ -1207,7 +1230,7 @@ static int checksum_setup(struct xenvif *vif, struct sk_buff *skb)
* recalculate the partial checksum.
*/
if (skb->ip_summed != CHECKSUM_PARTIAL && skb_is_gso(skb)) {
- vif->rx_gso_checksum_fixup++;
+ queue->stats.rx_gso_checksum_fixup++;
skb->ip_summed = CHECKSUM_PARTIAL;
recalculate_partial_csum = true;
}
@@ -1219,31 +1242,31 @@ static int checksum_setup(struct xenvif *vif, struct sk_buff *skb)
return skb_checksum_setup(skb, recalculate_partial_csum);
}
-static bool tx_credit_exceeded(struct xenvif *vif, unsigned size)
+static bool tx_credit_exceeded(struct xenvif_queue *queue, unsigned size)
{
u64 now = get_jiffies_64();
- u64 next_credit = vif->credit_window_start +
- msecs_to_jiffies(vif->credit_usec / 1000);
+ u64 next_credit = queue->credit_window_start +
+ msecs_to_jiffies(queue->credit_usec / 1000);
/* Timer could already be pending in rare cases. */
- if (timer_pending(&vif->credit_timeout))
+ if (timer_pending(&queue->credit_timeout))
return true;
/* Passed the point where we can replenish credit? */
if (time_after_eq64(now, next_credit)) {
- vif->credit_window_start = now;
- tx_add_credit(vif);
+ queue->credit_window_start = now;
+ tx_add_credit(queue);
}
/* Still too big to send right now? Set a callback. */
- if (size > vif->remaining_credit) {
- vif->credit_timeout.data =
- (unsigned long)vif;
- vif->credit_timeout.function =
+ if (size > queue->remaining_credit) {
+ queue->credit_timeout.data =
+ (unsigned long)queue;
+ queue->credit_timeout.function =
tx_credit_callback;
- mod_timer(&vif->credit_timeout,
+ mod_timer(&queue->credit_timeout,
next_credit);
- vif->credit_window_start = next_credit;
+ queue->credit_window_start = next_credit;
return true;
}
@@ -1251,16 +1274,16 @@ static bool tx_credit_exceeded(struct xenvif *vif, unsigned size)
return false;
}
-static void xenvif_tx_build_gops(struct xenvif *vif,
+static void xenvif_tx_build_gops(struct xenvif_queue *queue,
int budget,
unsigned *copy_ops,
unsigned *map_ops)
{
- struct gnttab_map_grant_ref *gop = vif->tx_map_ops, *request_gop;
+ struct gnttab_map_grant_ref *gop = queue->tx_map_ops, *request_gop;
struct sk_buff *skb;
int ret;
- while (skb_queue_len(&vif->tx_queue) < budget) {
+ while (skb_queue_len(&queue->tx_queue) < budget) {
struct xen_netif_tx_request txreq;
struct xen_netif_tx_request txfrags[XEN_NETBK_LEGACY_SLOTS_MAX];
struct xen_netif_extra_info extras[XEN_NETIF_EXTRA_TYPE_MAX-1];
@@ -1270,69 +1293,69 @@ static void xenvif_tx_build_gops(struct xenvif *vif,
unsigned int data_len;
pending_ring_idx_t index;
- if (vif->tx.sring->req_prod - vif->tx.req_cons >
+ if (queue->tx.sring->req_prod - queue->tx.req_cons >
XEN_NETIF_TX_RING_SIZE) {
- netdev_err(vif->dev,
+ netdev_err(queue->vif->dev,
"Impossible number of requests. "
"req_prod %d, req_cons %d, size %ld\n",
- vif->tx.sring->req_prod, vif->tx.req_cons,
+ queue->tx.sring->req_prod, queue->tx.req_cons,
XEN_NETIF_TX_RING_SIZE);
- xenvif_fatal_tx_err(vif);
+ xenvif_fatal_tx_err(queue->vif);
break;
}
- work_to_do = RING_HAS_UNCONSUMED_REQUESTS(&vif->tx);
+ work_to_do = RING_HAS_UNCONSUMED_REQUESTS(&queue->tx);
if (!work_to_do)
break;
- idx = vif->tx.req_cons;
+ idx = queue->tx.req_cons;
rmb(); /* Ensure that we see the request before we copy it. */
- memcpy(&txreq, RING_GET_REQUEST(&vif->tx, idx), sizeof(txreq));
+ memcpy(&txreq, RING_GET_REQUEST(&queue->tx, idx), sizeof(txreq));
/* Credit-based scheduling. */
- if (txreq.size > vif->remaining_credit &&
- tx_credit_exceeded(vif, txreq.size))
+ if (txreq.size > queue->remaining_credit &&
+ tx_credit_exceeded(queue, txreq.size))
break;
- vif->remaining_credit -= txreq.size;
+ queue->remaining_credit -= txreq.size;
work_to_do--;
- vif->tx.req_cons = ++idx;
+ queue->tx.req_cons = ++idx;
memset(extras, 0, sizeof(extras));
if (txreq.flags & XEN_NETTXF_extra_info) {
- work_to_do = xenvif_get_extras(vif, extras,
+ work_to_do = xenvif_get_extras(queue, extras,
work_to_do);
- idx = vif->tx.req_cons;
+ idx = queue->tx.req_cons;
if (unlikely(work_to_do < 0))
break;
}
- ret = xenvif_count_requests(vif, &txreq, txfrags, work_to_do);
+ ret = xenvif_count_requests(queue, &txreq, txfrags, work_to_do);
if (unlikely(ret < 0))
break;
idx += ret;
if (unlikely(txreq.size < ETH_HLEN)) {
- netdev_dbg(vif->dev,
+ netdev_dbg(queue->vif->dev,
"Bad packet size: %d\n", txreq.size);
- xenvif_tx_err(vif, &txreq, idx);
+ xenvif_tx_err(queue, &txreq, idx);
break;
}
/* No crossing a page as the payload mustn't fragment. */
if (unlikely((txreq.offset + txreq.size) > PAGE_SIZE)) {
- netdev_err(vif->dev,
+ netdev_err(queue->vif->dev,
"txreq.offset: %x, size: %u, end: %lu\n",
txreq.offset, txreq.size,
(txreq.offset&~PAGE_MASK) + txreq.size);
- xenvif_fatal_tx_err(vif);
+ xenvif_fatal_tx_err(queue->vif);
break;
}
- index = pending_index(vif->pending_cons);
- pending_idx = vif->pending_ring[index];
+ index = pending_index(queue->pending_cons);
+ pending_idx = queue->pending_ring[index];
data_len = (txreq.size > PKT_PROT_LEN &&
ret < XEN_NETBK_LEGACY_SLOTS_MAX) ?
@@ -1340,9 +1363,9 @@ static void xenvif_tx_build_gops(struct xenvif *vif,
skb = xenvif_alloc_skb(data_len);
if (unlikely(skb == NULL)) {
- netdev_dbg(vif->dev,
+ netdev_dbg(queue->vif->dev,
"Can't allocate a skb in start_xmit.\n");
- xenvif_tx_err(vif, &txreq, idx);
+ xenvif_tx_err(queue, &txreq, idx);
break;
}
@@ -1350,7 +1373,7 @@ static void xenvif_tx_build_gops(struct xenvif *vif,
struct xen_netif_extra_info *gso;
gso = &extras[XEN_NETIF_EXTRA_TYPE_GSO - 1];
- if (xenvif_set_skb_gso(vif, skb, gso)) {
+ if (xenvif_set_skb_gso(queue->vif, skb, gso)) {
/* Failure in xenvif_set_skb_gso is fatal. */
kfree_skb(skb);
break;
@@ -1360,18 +1383,18 @@ static void xenvif_tx_build_gops(struct xenvif *vif,
XENVIF_TX_CB(skb)->pending_idx = pending_idx;
__skb_put(skb, data_len);
- vif->tx_copy_ops[*copy_ops].source.u.ref = txreq.gref;
- vif->tx_copy_ops[*copy_ops].source.domid = vif->domid;
- vif->tx_copy_ops[*copy_ops].source.offset = txreq.offset;
+ queue->tx_copy_ops[*copy_ops].source.u.ref = txreq.gref;
+ queue->tx_copy_ops[*copy_ops].source.domid = queue->vif->domid;
+ queue->tx_copy_ops[*copy_ops].source.offset = txreq.offset;
- vif->tx_copy_ops[*copy_ops].dest.u.gmfn =
+ queue->tx_copy_ops[*copy_ops].dest.u.gmfn =
virt_to_mfn(skb->data);
- vif->tx_copy_ops[*copy_ops].dest.domid = DOMID_SELF;
- vif->tx_copy_ops[*copy_ops].dest.offset =
+ queue->tx_copy_ops[*copy_ops].dest.domid = DOMID_SELF;
+ queue->tx_copy_ops[*copy_ops].dest.offset =
offset_in_page(skb->data);
- vif->tx_copy_ops[*copy_ops].len = data_len;
- vif->tx_copy_ops[*copy_ops].flags = GNTCOPY_source_gref;
+ queue->tx_copy_ops[*copy_ops].len = data_len;
+ queue->tx_copy_ops[*copy_ops].flags = GNTCOPY_source_gref;
(*copy_ops)++;
@@ -1380,42 +1403,42 @@ static void xenvif_tx_build_gops(struct xenvif *vif,
skb_shinfo(skb)->nr_frags++;
frag_set_pending_idx(&skb_shinfo(skb)->frags[0],
pending_idx);
- xenvif_tx_create_map_op(vif, pending_idx, &txreq, gop);
+ xenvif_tx_create_map_op(queue, pending_idx, &txreq, gop);
gop++;
} else {
frag_set_pending_idx(&skb_shinfo(skb)->frags[0],
INVALID_PENDING_IDX);
- memcpy(&vif->pending_tx_info[pending_idx].req, &txreq,
+ memcpy(&queue->pending_tx_info[pending_idx].req, &txreq,
sizeof(txreq));
}
- vif->pending_cons++;
+ queue->pending_cons++;
- request_gop = xenvif_get_requests(vif, skb, txfrags, gop);
+ request_gop = xenvif_get_requests(queue, skb, txfrags, gop);
if (request_gop == NULL) {
kfree_skb(skb);
- xenvif_tx_err(vif, &txreq, idx);
+ xenvif_tx_err(queue, &txreq, idx);
break;
}
gop = request_gop;
- __skb_queue_tail(&vif->tx_queue, skb);
+ __skb_queue_tail(&queue->tx_queue, skb);
- vif->tx.req_cons = idx;
+ queue->tx.req_cons = idx;
- if (((gop-vif->tx_map_ops) >= ARRAY_SIZE(vif->tx_map_ops)) ||
- (*copy_ops >= ARRAY_SIZE(vif->tx_copy_ops)))
+ if (((gop-queue->tx_map_ops) >= ARRAY_SIZE(queue->tx_map_ops)) ||
+ (*copy_ops >= ARRAY_SIZE(queue->tx_copy_ops)))
break;
}
- (*map_ops) = gop - vif->tx_map_ops;
+ (*map_ops) = gop - queue->tx_map_ops;
return;
}
/* Consolidate skb with a frag_list into a brand new one with local pages on
* frags. Returns 0 or -ENOMEM if can't allocate new pages.
*/
-static int xenvif_handle_frag_list(struct xenvif *vif, struct sk_buff *skb)
+static int xenvif_handle_frag_list(struct xenvif_queue *queue, struct sk_buff *skb)
{
unsigned int offset = skb_headlen(skb);
skb_frag_t frags[MAX_SKB_FRAGS];
@@ -1423,10 +1446,10 @@ static int xenvif_handle_frag_list(struct xenvif *vif, struct sk_buff *skb)
struct ubuf_info *uarg;
struct sk_buff *nskb = skb_shinfo(skb)->frag_list;
- vif->tx_zerocopy_sent += 2;
- vif->tx_frag_overflow++;
+ queue->stats.tx_zerocopy_sent += 2;
+ queue->stats.tx_frag_overflow++;
- xenvif_fill_frags(vif, nskb);
+ xenvif_fill_frags(queue, nskb);
/* Subtract frags size, we will correct it later */
skb->truesize -= skb->data_len;
skb->len += nskb->len;
@@ -1478,37 +1501,37 @@ static int xenvif_handle_frag_list(struct xenvif *vif, struct sk_buff *skb)
return 0;
}
-static int xenvif_tx_submit(struct xenvif *vif)
+static int xenvif_tx_submit(struct xenvif_queue *queue)
{
- struct gnttab_map_grant_ref *gop_map = vif->tx_map_ops;
- struct gnttab_copy *gop_copy = vif->tx_copy_ops;
+ struct gnttab_map_grant_ref *gop_map = queue->tx_map_ops;
+ struct gnttab_copy *gop_copy = queue->tx_copy_ops;
struct sk_buff *skb;
int work_done = 0;
- while ((skb = __skb_dequeue(&vif->tx_queue)) != NULL) {
+ while ((skb = __skb_dequeue(&queue->tx_queue)) != NULL) {
struct xen_netif_tx_request *txp;
u16 pending_idx;
unsigned data_len;
pending_idx = XENVIF_TX_CB(skb)->pending_idx;
- txp = &vif->pending_tx_info[pending_idx].req;
+ txp = &queue->pending_tx_info[pending_idx].req;
/* Check the remap error code. */
- if (unlikely(xenvif_tx_check_gop(vif, skb, &gop_map, &gop_copy))) {
+ if (unlikely(xenvif_tx_check_gop(queue, skb, &gop_map, &gop_copy))) {
skb_shinfo(skb)->nr_frags = 0;
kfree_skb(skb);
continue;
}
data_len = skb->len;
- callback_param(vif, pending_idx).ctx = NULL;
+ callback_param(queue, pending_idx).ctx = NULL;
if (data_len < txp->size) {
/* Append the packet payload as a fragment. */
txp->offset += data_len;
txp->size -= data_len;
} else {
/* Schedule a response immediately. */
- xenvif_idx_release(vif, pending_idx,
+ xenvif_idx_release(queue, pending_idx,
XEN_NETIF_RSP_OKAY);
}
@@ -1517,12 +1540,12 @@ static int xenvif_tx_submit(struct xenvif *vif)
else if (txp->flags & XEN_NETTXF_data_validated)
skb->ip_summed = CHECKSUM_UNNECESSARY;
- xenvif_fill_frags(vif, skb);
+ xenvif_fill_frags(queue, skb);
if (unlikely(skb_has_frag_list(skb))) {
- if (xenvif_handle_frag_list(vif, skb)) {
+ if (xenvif_handle_frag_list(queue, skb)) {
if (net_ratelimit())
- netdev_err(vif->dev,
+ netdev_err(queue->vif->dev,
"Not enough memory to consolidate frag_list!\n");
skb_shinfo(skb)->tx_flags |= SKBTX_DEV_ZEROCOPY;
kfree_skb(skb);
@@ -1535,12 +1558,12 @@ static int xenvif_tx_submit(struct xenvif *vif)
__pskb_pull_tail(skb, target - skb_headlen(skb));
}
- skb->dev = vif->dev;
+ skb->dev = queue->vif->dev;
skb->protocol = eth_type_trans(skb, skb->dev);
skb_reset_network_header(skb);
- if (checksum_setup(vif, skb)) {
- netdev_dbg(vif->dev,
+ if (checksum_setup(queue, skb)) {
+ netdev_dbg(queue->vif->dev,
"Can't setup checksum in net_tx_action\n");
/* We have to set this flag to trigger the callback */
if (skb_shinfo(skb)->destructor_arg)
@@ -1565,8 +1588,8 @@ static int xenvif_tx_submit(struct xenvif *vif)
DIV_ROUND_UP(skb->len - hdrlen, mss);
}
- vif->dev->stats.rx_bytes += skb->len;
- vif->dev->stats.rx_packets++;
+ queue->stats.rx_bytes += skb->len;
+ queue->stats.rx_packets++;
work_done++;
@@ -1577,7 +1600,7 @@ static int xenvif_tx_submit(struct xenvif *vif)
*/
if (skb_shinfo(skb)->destructor_arg) {
skb_shinfo(skb)->tx_flags |= SKBTX_DEV_ZEROCOPY;
- vif->tx_zerocopy_sent++;
+ queue->stats.tx_zerocopy_sent++;
}
netif_receive_skb(skb);
@@ -1590,47 +1613,47 @@ void xenvif_zerocopy_callback(struct ubuf_info *ubuf, bool zerocopy_success)
{
unsigned long flags;
pending_ring_idx_t index;
- struct xenvif *vif = ubuf_to_vif(ubuf);
+ struct xenvif_queue *queue = ubuf_to_queue(ubuf);
/* This is the only place where we grab this lock, to protect callbacks
* from each other.
*/
- spin_lock_irqsave(&vif->callback_lock, flags);
+ spin_lock_irqsave(&queue->callback_lock, flags);
do {
u16 pending_idx = ubuf->desc;
ubuf = (struct ubuf_info *) ubuf->ctx;
- BUG_ON(vif->dealloc_prod - vif->dealloc_cons >=
+ BUG_ON(queue->dealloc_prod - queue->dealloc_cons >=
MAX_PENDING_REQS);
- index = pending_index(vif->dealloc_prod);
- vif->dealloc_ring[index] = pending_idx;
+ index = pending_index(queue->dealloc_prod);
+ queue->dealloc_ring[index] = pending_idx;
/* Sync with xenvif_tx_dealloc_action:
* insert idx then incr producer.
*/
smp_wmb();
- vif->dealloc_prod++;
+ queue->dealloc_prod++;
} while (ubuf);
- wake_up(&vif->dealloc_wq);
- spin_unlock_irqrestore(&vif->callback_lock, flags);
+ wake_up(&queue->dealloc_wq);
+ spin_unlock_irqrestore(&queue->callback_lock, flags);
if (likely(zerocopy_success))
- vif->tx_zerocopy_success++;
+ queue->stats.tx_zerocopy_success++;
else
- vif->tx_zerocopy_fail++;
+ queue->stats.tx_zerocopy_fail++;
}
-static inline void xenvif_tx_dealloc_action(struct xenvif *vif)
+static inline void xenvif_tx_dealloc_action(struct xenvif_queue *queue)
{
struct gnttab_unmap_grant_ref *gop;
pending_ring_idx_t dc, dp;
u16 pending_idx, pending_idx_release[MAX_PENDING_REQS];
unsigned int i = 0;
- dc = vif->dealloc_cons;
- gop = vif->tx_unmap_ops;
+ dc = queue->dealloc_cons;
+ gop = queue->tx_unmap_ops;
/* Free up any grants we have finished using */
do {
- dp = vif->dealloc_prod;
+ dp = queue->dealloc_prod;
/* Ensure we see all indices enqueued by all
* xenvif_zerocopy_callback().
@@ -1638,38 +1661,38 @@ static inline void xenvif_tx_dealloc_action(struct xenvif *vif)
smp_rmb();
while (dc != dp) {
- BUG_ON(gop - vif->tx_unmap_ops > MAX_PENDING_REQS);
+ BUG_ON(gop - queue->tx_unmap_ops > MAX_PENDING_REQS);
pending_idx =
- vif->dealloc_ring[pending_index(dc++)];
+ queue->dealloc_ring[pending_index(dc++)];
- pending_idx_release[gop-vif->tx_unmap_ops] =
+ pending_idx_release[gop-queue->tx_unmap_ops] =
pending_idx;
- vif->pages_to_unmap[gop-vif->tx_unmap_ops] =
- vif->mmap_pages[pending_idx];
+ queue->pages_to_unmap[gop-queue->tx_unmap_ops] =
+ queue->mmap_pages[pending_idx];
gnttab_set_unmap_op(gop,
- idx_to_kaddr(vif, pending_idx),
+ idx_to_kaddr(queue, pending_idx),
GNTMAP_host_map,
- vif->grant_tx_handle[pending_idx]);
- xenvif_grant_handle_reset(vif, pending_idx);
+ queue->grant_tx_handle[pending_idx]);
+ xenvif_grant_handle_reset(queue, pending_idx);
++gop;
}
- } while (dp != vif->dealloc_prod);
+ } while (dp != queue->dealloc_prod);
- vif->dealloc_cons = dc;
+ queue->dealloc_cons = dc;
- if (gop - vif->tx_unmap_ops > 0) {
+ if (gop - queue->tx_unmap_ops > 0) {
int ret;
- ret = gnttab_unmap_refs(vif->tx_unmap_ops,
+ ret = gnttab_unmap_refs(queue->tx_unmap_ops,
NULL,
- vif->pages_to_unmap,
- gop - vif->tx_unmap_ops);
+ queue->pages_to_unmap,
+ gop - queue->tx_unmap_ops);
if (ret) {
- netdev_err(vif->dev, "Unmap fail: nr_ops %tx ret %d\n",
- gop - vif->tx_unmap_ops, ret);
- for (i = 0; i < gop - vif->tx_unmap_ops; ++i) {
+ netdev_err(queue->vif->dev, "Unmap fail: nr_ops %tx ret %d\n",
+ gop - queue->tx_unmap_ops, ret);
+ for (i = 0; i < gop - queue->tx_unmap_ops; ++i) {
if (gop[i].status != GNTST_okay)
- netdev_err(vif->dev,
+ netdev_err(queue->vif->dev,
" host_addr: %llx handle: %x status: %d\n",
gop[i].host_addr,
gop[i].handle,
@@ -1679,91 +1702,91 @@ static inline void xenvif_tx_dealloc_action(struct xenvif *vif)
}
}
- for (i = 0; i < gop - vif->tx_unmap_ops; ++i)
- xenvif_idx_release(vif, pending_idx_release[i],
+ for (i = 0; i < gop - queue->tx_unmap_ops; ++i)
+ xenvif_idx_release(queue, pending_idx_release[i],
XEN_NETIF_RSP_OKAY);
}
/* Called after netfront has transmitted */
-int xenvif_tx_action(struct xenvif *vif, int budget)
+int xenvif_tx_action(struct xenvif_queue *queue, int budget)
{
unsigned nr_mops, nr_cops = 0;
int work_done, ret;
- if (unlikely(!tx_work_todo(vif)))
+ if (unlikely(!tx_work_todo(queue)))
return 0;
- xenvif_tx_build_gops(vif, budget, &nr_cops, &nr_mops);
+ xenvif_tx_build_gops(queue, budget, &nr_cops, &nr_mops);
if (nr_cops == 0)
return 0;
- gnttab_batch_copy(vif->tx_copy_ops, nr_cops);
+ gnttab_batch_copy(queue->tx_copy_ops, nr_cops);
if (nr_mops != 0) {
- ret = gnttab_map_refs(vif->tx_map_ops,
+ ret = gnttab_map_refs(queue->tx_map_ops,
NULL,
- vif->pages_to_map,
+ queue->pages_to_map,
nr_mops);
BUG_ON(ret);
}
- work_done = xenvif_tx_submit(vif);
+ work_done = xenvif_tx_submit(queue);
return work_done;
}
-static void xenvif_idx_release(struct xenvif *vif, u16 pending_idx,
+static void xenvif_idx_release(struct xenvif_queue *queue, u16 pending_idx,
u8 status)
{
struct pending_tx_info *pending_tx_info;
pending_ring_idx_t index;
unsigned long flags;
- pending_tx_info = &vif->pending_tx_info[pending_idx];
- spin_lock_irqsave(&vif->response_lock, flags);
- make_tx_response(vif, &pending_tx_info->req, status);
- index = pending_index(vif->pending_prod);
- vif->pending_ring[index] = pending_idx;
+ pending_tx_info = &queue->pending_tx_info[pending_idx];
+ spin_lock_irqsave(&queue->response_lock, flags);
+ make_tx_response(queue, &pending_tx_info->req, status);
+ index = pending_index(queue->pending_prod);
+ queue->pending_ring[index] = pending_idx;
/* TX shouldn't use the index before we give it back here */
mb();
- vif->pending_prod++;
- spin_unlock_irqrestore(&vif->response_lock, flags);
+ queue->pending_prod++;
+ spin_unlock_irqrestore(&queue->response_lock, flags);
}
-static void make_tx_response(struct xenvif *vif,
+static void make_tx_response(struct xenvif_queue *queue,
struct xen_netif_tx_request *txp,
s8 st)
{
- RING_IDX i = vif->tx.rsp_prod_pvt;
+ RING_IDX i = queue->tx.rsp_prod_pvt;
struct xen_netif_tx_response *resp;
int notify;
- resp = RING_GET_RESPONSE(&vif->tx, i);
+ resp = RING_GET_RESPONSE(&queue->tx, i);
resp->id = txp->id;
resp->status = st;
if (txp->flags & XEN_NETTXF_extra_info)
- RING_GET_RESPONSE(&vif->tx, ++i)->status = XEN_NETIF_RSP_NULL;
+ RING_GET_RESPONSE(&queue->tx, ++i)->status = XEN_NETIF_RSP_NULL;
- vif->tx.rsp_prod_pvt = ++i;
- RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&vif->tx, notify);
+ queue->tx.rsp_prod_pvt = ++i;
+ RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&queue->tx, notify);
if (notify)
- notify_remote_via_irq(vif->tx_irq);
+ notify_remote_via_irq(queue->tx_irq);
}
-static struct xen_netif_rx_response *make_rx_response(struct xenvif *vif,
+static struct xen_netif_rx_response *make_rx_response(struct xenvif_queue *queue,
u16 id,
s8 st,
u16 offset,
u16 size,
u16 flags)
{
- RING_IDX i = vif->rx.rsp_prod_pvt;
+ RING_IDX i = queue->rx.rsp_prod_pvt;
struct xen_netif_rx_response *resp;
- resp = RING_GET_RESPONSE(&vif->rx, i);
+ resp = RING_GET_RESPONSE(&queue->rx, i);
resp->offset = offset;
resp->flags = flags;
resp->id = id;
@@ -1771,26 +1794,26 @@ static struct xen_netif_rx_response *make_rx_response(struct xenvif *vif,
if (st < 0)
resp->status = (s16)st;
- vif->rx.rsp_prod_pvt = ++i;
+ queue->rx.rsp_prod_pvt = ++i;
return resp;
}
-void xenvif_idx_unmap(struct xenvif *vif, u16 pending_idx)
+void xenvif_idx_unmap(struct xenvif_queue *queue, u16 pending_idx)
{
int ret;
struct gnttab_unmap_grant_ref tx_unmap_op;
gnttab_set_unmap_op(&tx_unmap_op,
- idx_to_kaddr(vif, pending_idx),
+ idx_to_kaddr(queue, pending_idx),
GNTMAP_host_map,
- vif->grant_tx_handle[pending_idx]);
- xenvif_grant_handle_reset(vif, pending_idx);
+ queue->grant_tx_handle[pending_idx]);
+ xenvif_grant_handle_reset(queue, pending_idx);
ret = gnttab_unmap_refs(&tx_unmap_op, NULL,
- &vif->mmap_pages[pending_idx], 1);
+ &queue->mmap_pages[pending_idx], 1);
if (ret) {
- netdev_err(vif->dev,
+ netdev_err(queue->vif->dev,
"Unmap fail: ret: %d pending_idx: %d host_addr: %llx handle: %x status: %d\n",
ret,
pending_idx,
@@ -1800,41 +1823,40 @@ void xenvif_idx_unmap(struct xenvif *vif, u16 pending_idx)
BUG();
}
- xenvif_idx_release(vif, pending_idx, XEN_NETIF_RSP_OKAY);
+ xenvif_idx_release(queue, pending_idx, XEN_NETIF_RSP_OKAY);
}
-static inline int rx_work_todo(struct xenvif *vif)
+static inline int rx_work_todo(struct xenvif_queue *queue)
{
- return (!skb_queue_empty(&vif->rx_queue) &&
- xenvif_rx_ring_slots_available(vif, vif->rx_last_skb_slots)) ||
- vif->rx_queue_purge;
+ return (!skb_queue_empty(&queue->rx_queue) &&
+ xenvif_rx_ring_slots_available(queue, queue->rx_last_skb_slots)) ||
+ queue->rx_queue_purge;
}
-static inline int tx_work_todo(struct xenvif *vif)
+static inline int tx_work_todo(struct xenvif_queue *queue)
{
-
- if (likely(RING_HAS_UNCONSUMED_REQUESTS(&vif->tx)))
+ if (likely(RING_HAS_UNCONSUMED_REQUESTS(&queue->tx)))
return 1;
return 0;
}
-static inline bool tx_dealloc_work_todo(struct xenvif *vif)
+static inline bool tx_dealloc_work_todo(struct xenvif_queue *queue)
{
- return vif->dealloc_cons != vif->dealloc_prod;
+ return queue->dealloc_cons != queue->dealloc_prod;
}
-void xenvif_unmap_frontend_rings(struct xenvif *vif)
+void xenvif_unmap_frontend_rings(struct xenvif_queue *queue)
{
- if (vif->tx.sring)
- xenbus_unmap_ring_vfree(xenvif_to_xenbus_device(vif),
- vif->tx.sring);
- if (vif->rx.sring)
- xenbus_unmap_ring_vfree(xenvif_to_xenbus_device(vif),
- vif->rx.sring);
+ if (queue->tx.sring)
+ xenbus_unmap_ring_vfree(xenvif_to_xenbus_device(queue->vif),
+ queue->tx.sring);
+ if (queue->rx.sring)
+ xenbus_unmap_ring_vfree(xenvif_to_xenbus_device(queue->vif),
+ queue->rx.sring);
}
-int xenvif_map_frontend_rings(struct xenvif *vif,
+int xenvif_map_frontend_rings(struct xenvif_queue *queue,
grant_ref_t tx_ring_ref,
grant_ref_t rx_ring_ref)
{
@@ -1844,85 +1866,78 @@ int xenvif_map_frontend_rings(struct xenvif *vif,
int err = -ENOMEM;
- err = xenbus_map_ring_valloc(xenvif_to_xenbus_device(vif),
+ err = xenbus_map_ring_valloc(xenvif_to_xenbus_device(queue->vif),
tx_ring_ref, &addr);
if (err)
goto err;
txs = (struct xen_netif_tx_sring *)addr;
- BACK_RING_INIT(&vif->tx, txs, PAGE_SIZE);
+ BACK_RING_INIT(&queue->tx, txs, PAGE_SIZE);
- err = xenbus_map_ring_valloc(xenvif_to_xenbus_device(vif),
+ err = xenbus_map_ring_valloc(xenvif_to_xenbus_device(queue->vif),
rx_ring_ref, &addr);
if (err)
goto err;
rxs = (struct xen_netif_rx_sring *)addr;
- BACK_RING_INIT(&vif->rx, rxs, PAGE_SIZE);
+ BACK_RING_INIT(&queue->rx, rxs, PAGE_SIZE);
return 0;
err:
- xenvif_unmap_frontend_rings(vif);
+ xenvif_unmap_frontend_rings(queue);
return err;
}
-void xenvif_stop_queue(struct xenvif *vif)
+static void xenvif_start_queue(struct xenvif_queue *queue)
{
- if (!vif->can_queue)
- return;
-
- netif_stop_queue(vif->dev);
-}
-
-static void xenvif_start_queue(struct xenvif *vif)
-{
- if (xenvif_schedulable(vif))
- netif_wake_queue(vif->dev);
+ if (xenvif_schedulable(queue->vif))
+ xenvif_wake_queue(queue);
}
int xenvif_kthread_guest_rx(void *data)
{
- struct xenvif *vif = data;
+ struct xenvif_queue *queue = data;
struct sk_buff *skb;
while (!kthread_should_stop()) {
- wait_event_interruptible(vif->wq,
- rx_work_todo(vif) ||
- vif->disabled ||
+ wait_event_interruptible(queue->wq,
+ rx_work_todo(queue) ||
+ queue->vif->disabled ||
kthread_should_stop());
/* This frontend is found to be rogue, disable it in
* kthread context. Currently this is only set when
* netback finds out frontend sends malformed packet,
* but we cannot disable the interface in softirq
- * context so we defer it here.
+ * context so we defer it here, if this thread is
+ * associated with queue 0.
*/
- if (unlikely(vif->disabled && netif_carrier_ok(vif->dev)))
- xenvif_carrier_off(vif);
+ if (unlikely(queue->vif->disabled && netif_carrier_ok(queue->vif->dev) && queue->id == 0))
+ xenvif_carrier_off(queue->vif);
if (kthread_should_stop())
break;
- if (vif->rx_queue_purge) {
- skb_queue_purge(&vif->rx_queue);
- vif->rx_queue_purge = false;
+ if (queue->rx_queue_purge) {
+ skb_queue_purge(&queue->rx_queue);
+ queue->rx_queue_purge = false;
}
- if (!skb_queue_empty(&vif->rx_queue))
- xenvif_rx_action(vif);
+ if (!skb_queue_empty(&queue->rx_queue))
+ xenvif_rx_action(queue);
- if (skb_queue_empty(&vif->rx_queue) &&
- netif_queue_stopped(vif->dev)) {
- del_timer_sync(&vif->wake_queue);
- xenvif_start_queue(vif);
+ if (skb_queue_empty(&queue->rx_queue) &&
+ xenvif_queue_stopped(queue)) {
+ del_timer_sync(&queue->wake_queue);
+ xenvif_start_queue(queue);
}
cond_resched();
}
/* Bin any remaining skbs */
- while ((skb = skb_dequeue(&vif->rx_queue)) != NULL)
+ while ((skb = skb_dequeue(&queue->rx_queue)) != NULL)
dev_kfree_skb(skb);
return 0;
@@ -1930,22 +1945,22 @@ int xenvif_kthread_guest_rx(void *data)
int xenvif_dealloc_kthread(void *data)
{
- struct xenvif *vif = data;
+ struct xenvif_queue *queue = data;
while (!kthread_should_stop()) {
- wait_event_interruptible(vif->dealloc_wq,
- tx_dealloc_work_todo(vif) ||
+ wait_event_interruptible(queue->dealloc_wq,
+ tx_dealloc_work_todo(queue) ||
kthread_should_stop());
if (kthread_should_stop())
break;
- xenvif_tx_dealloc_action(vif);
+ xenvif_tx_dealloc_action(queue);
cond_resched();
}
/* Unmap anything remaining*/
- if (tx_dealloc_work_todo(vif))
- xenvif_tx_dealloc_action(vif);
+ if (tx_dealloc_work_todo(queue))
+ xenvif_tx_dealloc_action(queue);
return 0;
}
@@ -1957,6 +1972,9 @@ static int __init netback_init(void)
if (!xen_domain())
return -ENODEV;
+ /* Allow as many queues as there are CPUs, by default */
+ xenvif_max_queues = num_online_cpus();
+
if (fatal_skb_slots < XEN_NETBK_LEGACY_SLOTS_MAX) {
pr_info("fatal_skb_slots too small (%d), bump it to XEN_NETBK_LEGACY_SLOTS_MAX (%d)\n",
fatal_skb_slots, XEN_NETBK_LEGACY_SLOTS_MAX);
diff --git a/drivers/net/xen-netback/xenbus.c b/drivers/net/xen-netback/xenbus.c
index 7a206cffb062..96c63dc2509e 100644
--- a/drivers/net/xen-netback/xenbus.c
+++ b/drivers/net/xen-netback/xenbus.c
@@ -19,6 +19,8 @@
*/
#include "common.h"
+#include <linux/vmalloc.h>
+#include <linux/rtnetlink.h>
struct backend_info {
struct xenbus_device *dev;
@@ -34,8 +36,9 @@ struct backend_info {
u8 have_hotplug_status_watch:1;
};
-static int connect_rings(struct backend_info *);
-static void connect(struct backend_info *);
+static int connect_rings(struct backend_info *be, struct xenvif_queue *queue);
+static void connect(struct backend_info *be);
+static int read_xenbus_vif_flags(struct backend_info *be);
static void backend_create_xenvif(struct backend_info *be);
static void unregister_hotplug_status_watch(struct backend_info *be);
static void set_backend_state(struct backend_info *be,
@@ -157,6 +160,12 @@ static int netback_probe(struct xenbus_device *dev,
if (err)
pr_debug("Error writing feature-split-event-channels\n");
+ /* Multi-queue support: This is an optional feature. */
+ err = xenbus_printf(XBT_NIL, dev->nodename,
+ "multi-queue-max-queues", "%u", xenvif_max_queues);
+ if (err)
+ pr_debug("Error writing multi-queue-max-queues\n");
+
err = xenbus_switch_state(dev, XenbusStateInitWait);
if (err)
goto fail;
@@ -485,10 +494,26 @@ static void connect(struct backend_info *be)
{
int err;
struct xenbus_device *dev = be->dev;
+ unsigned long credit_bytes, credit_usec;
+ unsigned int queue_index;
+ unsigned int requested_num_queues;
+ struct xenvif_queue *queue;
- err = connect_rings(be);
- if (err)
+ /* Check whether the frontend requested multiple queues
+ * and read the number requested.
+ */
+ err = xenbus_scanf(XBT_NIL, dev->otherend,
+ "multi-queue-num-queues",
+ "%u", &requested_num_queues);
+ if (err < 0) {
+ requested_num_queues = 1; /* Fall back to single queue */
+ } else if (requested_num_queues > xenvif_max_queues) {
+ /* buggy or malicious guest */
+ xenbus_dev_fatal(dev, err,
+ "guest requested %u queues, exceeding the maximum of %u.",
+ requested_num_queues, xenvif_max_queues);
return;
+ }
err = xen_net_read_mac(dev, be->vif->fe_dev_addr);
if (err) {
@@ -496,9 +521,54 @@ static void connect(struct backend_info *be)
return;
}
- xen_net_read_rate(dev, &be->vif->credit_bytes,
- &be->vif->credit_usec);
- be->vif->remaining_credit = be->vif->credit_bytes;
+ xen_net_read_rate(dev, &credit_bytes, &credit_usec);
+ read_xenbus_vif_flags(be);
+
+ /* Use the number of queues requested by the frontend */
+ be->vif->queues = vzalloc(requested_num_queues *
+ sizeof(struct xenvif_queue));
+ rtnl_lock();
+ netif_set_real_num_tx_queues(be->vif->dev, requested_num_queues);
+ rtnl_unlock();
+
+ for (queue_index = 0; queue_index < requested_num_queues; ++queue_index) {
+ queue = &be->vif->queues[queue_index];
+ queue->vif = be->vif;
+ queue->id = queue_index;
+ snprintf(queue->name, sizeof(queue->name), "%s-q%u",
+ be->vif->dev->name, queue->id);
+
+ err = xenvif_init_queue(queue);
+ if (err) {
+ /* xenvif_init_queue() cleans up after itself on
+ * failure, but we need to clean up any previously
+ * initialised queues. Set num_queues to i so that
+ * earlier queues can be destroyed using the regular
+ * disconnect logic.
+ */
+ rtnl_lock();
+ netif_set_real_num_tx_queues(be->vif->dev, queue_index);
+ rtnl_unlock();
+ goto err;
+ }
+
+ queue->remaining_credit = credit_bytes;
+
+ err = connect_rings(be, queue);
+ if (err) {
+ /* connect_rings() cleans up after itself on failure,
+ * but we need to clean up after xenvif_init_queue() here,
+ * and also clean up any previously initialised queues.
+ */
+ xenvif_deinit_queue(queue);
+ rtnl_lock();
+ netif_set_real_num_tx_queues(be->vif->dev, queue_index);
+ rtnl_unlock();
+ goto err;
+ }
+ }
+
+ xenvif_carrier_on(be->vif);
unregister_hotplug_status_watch(be);
err = xenbus_watch_pathfmt(dev, &be->hotplug_status_watch,
@@ -507,45 +577,109 @@ static void connect(struct backend_info *be)
if (!err)
be->have_hotplug_status_watch = 1;
- netif_wake_queue(be->vif->dev);
+ netif_tx_wake_all_queues(be->vif->dev);
+
+ return;
+
+err:
+ if (be->vif->dev->real_num_tx_queues > 0)
+ xenvif_disconnect(be->vif); /* Clean up existing queues */
+ vfree(be->vif->queues);
+ be->vif->queues = NULL;
+ rtnl_lock();
+ netif_set_real_num_tx_queues(be->vif->dev, 0);
+ rtnl_unlock();
+ return;
}
-static int connect_rings(struct backend_info *be)
+static int connect_rings(struct backend_info *be, struct xenvif_queue *queue)
{
- struct xenvif *vif = be->vif;
struct xenbus_device *dev = be->dev;
+ unsigned int num_queues = queue->vif->dev->real_num_tx_queues;
unsigned long tx_ring_ref, rx_ring_ref;
- unsigned int tx_evtchn, rx_evtchn, rx_copy;
+ unsigned int tx_evtchn, rx_evtchn;
int err;
- int val;
+ char *xspath;
+ size_t xspathsize;
+ const size_t xenstore_path_ext_size = 11; /* sufficient for "/queue-NNN" */
+
+ /* If the frontend requested 1 queue, or we have fallen back
+ * to single queue due to lack of frontend support for multi-
+ * queue, expect the remaining XenStore keys in the toplevel
+ * directory. Otherwise, expect them in a subdirectory called
+ * queue-N.
+ */
+ if (num_queues == 1) {
+ xspath = kzalloc(strlen(dev->otherend) + 1, GFP_KERNEL);
+ if (!xspath) {
+ xenbus_dev_fatal(dev, -ENOMEM,
+ "reading ring references");
+ return -ENOMEM;
+ }
+ strcpy(xspath, dev->otherend);
+ } else {
+ xspathsize = strlen(dev->otherend) + xenstore_path_ext_size;
+ xspath = kzalloc(xspathsize, GFP_KERNEL);
+ if (!xspath) {
+ xenbus_dev_fatal(dev, -ENOMEM,
+ "reading ring references");
+ return -ENOMEM;
+ }
+ snprintf(xspath, xspathsize, "%s/queue-%u", dev->otherend,
+ queue->id);
+ }
- err = xenbus_gather(XBT_NIL, dev->otherend,
+ err = xenbus_gather(XBT_NIL, xspath,
"tx-ring-ref", "%lu", &tx_ring_ref,
"rx-ring-ref", "%lu", &rx_ring_ref, NULL);
if (err) {
xenbus_dev_fatal(dev, err,
"reading %s/ring-ref",
- dev->otherend);
- return err;
+ xspath);
+ goto err;
}
/* Try split event channels first, then single event channel. */
- err = xenbus_gather(XBT_NIL, dev->otherend,
+ err = xenbus_gather(XBT_NIL, xspath,
"event-channel-tx", "%u", &tx_evtchn,
"event-channel-rx", "%u", &rx_evtchn, NULL);
if (err < 0) {
- err = xenbus_scanf(XBT_NIL, dev->otherend,
+ err = xenbus_scanf(XBT_NIL, xspath,
"event-channel", "%u", &tx_evtchn);
if (err < 0) {
xenbus_dev_fatal(dev, err,
"reading %s/event-channel(-tx/rx)",
- dev->otherend);
- return err;
+ xspath);
+ goto err;
}
rx_evtchn = tx_evtchn;
}
+ /* Map the shared frame, irq etc. */
+ err = xenvif_connect(queue, tx_ring_ref, rx_ring_ref,
+ tx_evtchn, rx_evtchn);
+ if (err) {
+ xenbus_dev_fatal(dev, err,
+ "mapping shared-frames %lu/%lu port tx %u rx %u",
+ tx_ring_ref, rx_ring_ref,
+ tx_evtchn, rx_evtchn);
+ goto err;
+ }
+
+ err = 0;
+err: /* Regular return falls through with err == 0 */
+ kfree(xspath);
+ return err;
+}
+
+static int read_xenbus_vif_flags(struct backend_info *be)
+{
+ struct xenvif *vif = be->vif;
+ struct xenbus_device *dev = be->dev;
+ unsigned int rx_copy;
+ int err, val;
+
err = xenbus_scanf(XBT_NIL, dev->otherend, "request-rx-copy", "%u",
&rx_copy);
if (err == -ENOENT) {
@@ -621,16 +755,6 @@ static int connect_rings(struct backend_info *be)
val = 0;
vif->ipv6_csum = !!val;
- /* Map the shared frame, irq etc. */
- err = xenvif_connect(vif, tx_ring_ref, rx_ring_ref,
- tx_evtchn, rx_evtchn);
- if (err) {
- xenbus_dev_fatal(dev, err,
- "mapping shared-frames %lu/%lu port tx %u rx %u",
- tx_ring_ref, rx_ring_ref,
- tx_evtchn, rx_evtchn);
- return err;
- }
return 0;
}
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index 158b5e639fc7..5a7872ac3566 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -57,6 +57,12 @@
#include <xen/interface/memory.h>
#include <xen/interface/grant_table.h>
+/* Module parameters */
+static unsigned int xennet_max_queues;
+module_param_named(max_queues, xennet_max_queues, uint, 0644);
+MODULE_PARM_DESC(max_queues,
+ "Maximum number of queues per virtual interface");
+
static const struct ethtool_ops xennet_ethtool_ops;
struct netfront_cb {
@@ -73,6 +79,12 @@ struct netfront_cb {
#define NET_RX_RING_SIZE __CONST_RING_SIZE(xen_netif_rx, PAGE_SIZE)
#define TX_MAX_TARGET min_t(int, NET_TX_RING_SIZE, 256)
+/* Queue name is interface name with "-qNNN" appended */
+#define QUEUE_NAME_SIZE (IFNAMSIZ + 6)
+
+/* IRQ name is queue name with "-tx" or "-rx" appended */
+#define IRQ_NAME_SIZE (QUEUE_NAME_SIZE + 3)
+
struct netfront_stats {
u64 rx_packets;
u64 tx_packets;
@@ -81,9 +93,12 @@ struct netfront_stats {
struct u64_stats_sync syncp;
};
-struct netfront_info {
- struct list_head list;
- struct net_device *netdev;
+struct netfront_info;
+
+struct netfront_queue {
+ unsigned int id; /* Queue ID, 0-based */
+ char name[QUEUE_NAME_SIZE]; /* DEVNAME-qN */
+ struct netfront_info *info;
struct napi_struct napi;
@@ -93,10 +108,8 @@ struct netfront_info {
unsigned int tx_evtchn, rx_evtchn;
unsigned int tx_irq, rx_irq;
/* Only used when split event channels support is enabled */
- char tx_irq_name[IFNAMSIZ+4]; /* DEVNAME-tx */
- char rx_irq_name[IFNAMSIZ+4]; /* DEVNAME-rx */
-
- struct xenbus_device *xbdev;
+ char tx_irq_name[IRQ_NAME_SIZE]; /* DEVNAME-qN-tx */
+ char rx_irq_name[IRQ_NAME_SIZE]; /* DEVNAME-qN-rx */
spinlock_t tx_lock;
struct xen_netif_tx_front_ring tx;
@@ -140,11 +153,21 @@ struct netfront_info {
unsigned long rx_pfn_array[NET_RX_RING_SIZE];
struct multicall_entry rx_mcl[NET_RX_RING_SIZE+1];
struct mmu_update rx_mmu[NET_RX_RING_SIZE];
+};
+
+struct netfront_info {
+ struct list_head list;
+ struct net_device *netdev;
+
+ struct xenbus_device *xbdev;
+
+ /* Multi-queue support */
+ struct netfront_queue *queues;
/* Statistics */
struct netfront_stats __percpu *stats;
- unsigned long rx_gso_checksum_fixup;
+ atomic_t rx_gso_checksum_fixup;
};
struct netfront_rx_info {
@@ -187,21 +210,21 @@ static int xennet_rxidx(RING_IDX idx)
return idx & (NET_RX_RING_SIZE - 1);
}
-static struct sk_buff *xennet_get_rx_skb(struct netfront_info *np,
+static struct sk_buff *xennet_get_rx_skb(struct netfront_queue *queue,
RING_IDX ri)
{
int i = xennet_rxidx(ri);
- struct sk_buff *skb = np->rx_skbs[i];
- np->rx_skbs[i] = NULL;
+ struct sk_buff *skb = queue->rx_skbs[i];
+ queue->rx_skbs[i] = NULL;
return skb;
}
-static grant_ref_t xennet_get_rx_ref(struct netfront_info *np,
+static grant_ref_t xennet_get_rx_ref(struct netfront_queue *queue,
RING_IDX ri)
{
int i = xennet_rxidx(ri);
- grant_ref_t ref = np->grant_rx_ref[i];
- np->grant_rx_ref[i] = GRANT_INVALID_REF;
+ grant_ref_t ref = queue->grant_rx_ref[i];
+ queue->grant_rx_ref[i] = GRANT_INVALID_REF;
return ref;
}
@@ -221,41 +244,40 @@ static bool xennet_can_sg(struct net_device *dev)
static void rx_refill_timeout(unsigned long data)
{
- struct net_device *dev = (struct net_device *)data;
- struct netfront_info *np = netdev_priv(dev);
- napi_schedule(&np->napi);
+ struct netfront_queue *queue = (struct netfront_queue *)data;
+ napi_schedule(&queue->napi);
}
-static int netfront_tx_slot_available(struct netfront_info *np)
+static int netfront_tx_slot_available(struct netfront_queue *queue)
{
- return (np->tx.req_prod_pvt - np->tx.rsp_cons) <
+ return (queue->tx.req_prod_pvt - queue->tx.rsp_cons) <
(TX_MAX_TARGET - MAX_SKB_FRAGS - 2);
}
-static void xennet_maybe_wake_tx(struct net_device *dev)
+static void xennet_maybe_wake_tx(struct netfront_queue *queue)
{
- struct netfront_info *np = netdev_priv(dev);
+ struct net_device *dev = queue->info->netdev;
+ struct netdev_queue *dev_queue = netdev_get_tx_queue(dev, queue->id);
- if (unlikely(netif_queue_stopped(dev)) &&
- netfront_tx_slot_available(np) &&
+ if (unlikely(netif_tx_queue_stopped(dev_queue)) &&
+ netfront_tx_slot_available(queue) &&
likely(netif_running(dev)))
- netif_wake_queue(dev);
+ netif_tx_wake_queue(netdev_get_tx_queue(dev, queue->id));
}
-static void xennet_alloc_rx_buffers(struct net_device *dev)
+static void xennet_alloc_rx_buffers(struct netfront_queue *queue)
{
unsigned short id;
- struct netfront_info *np = netdev_priv(dev);
struct sk_buff *skb;
struct page *page;
int i, batch_target, notify;
- RING_IDX req_prod = np->rx.req_prod_pvt;
+ RING_IDX req_prod = queue->rx.req_prod_pvt;
grant_ref_t ref;
unsigned long pfn;
void *vaddr;
struct xen_netif_rx_request *req;
- if (unlikely(!netif_carrier_ok(dev)))
+ if (unlikely(!netif_carrier_ok(queue->info->netdev)))
return;
/*
@@ -264,9 +286,10 @@ static void xennet_alloc_rx_buffers(struct net_device *dev)
* allocator, so should reduce the chance of failed allocation requests
* both for ourself and for other kernel subsystems.
*/
- batch_target = np->rx_target - (req_prod - np->rx.rsp_cons);
- for (i = skb_queue_len(&np->rx_batch); i < batch_target; i++) {
- skb = __netdev_alloc_skb(dev, RX_COPY_THRESHOLD + NET_IP_ALIGN,
+ batch_target = queue->rx_target - (req_prod - queue->rx.rsp_cons);
+ for (i = skb_queue_len(&queue->rx_batch); i < batch_target; i++) {
+ skb = __netdev_alloc_skb(queue->info->netdev,
+ RX_COPY_THRESHOLD + NET_IP_ALIGN,
GFP_ATOMIC | __GFP_NOWARN);
if (unlikely(!skb))
goto no_skb;
@@ -279,7 +302,7 @@ static void xennet_alloc_rx_buffers(struct net_device *dev)
kfree_skb(skb);
no_skb:
/* Could not allocate any skbuffs. Try again later. */
- mod_timer(&np->rx_refill_timer,
+ mod_timer(&queue->rx_refill_timer,
jiffies + (HZ/10));
/* Any skbuffs queued for refill? Force them out. */
@@ -289,44 +312,44 @@ no_skb:
}
skb_add_rx_frag(skb, 0, page, 0, 0, PAGE_SIZE);
- __skb_queue_tail(&np->rx_batch, skb);
+ __skb_queue_tail(&queue->rx_batch, skb);
}
/* Is the batch large enough to be worthwhile? */
- if (i < (np->rx_target/2)) {
- if (req_prod > np->rx.sring->req_prod)
+ if (i < (queue->rx_target/2)) {
+ if (req_prod > queue->rx.sring->req_prod)
goto push;
return;
}
/* Adjust our fill target if we risked running out of buffers. */
- if (((req_prod - np->rx.sring->rsp_prod) < (np->rx_target / 4)) &&
- ((np->rx_target *= 2) > np->rx_max_target))
- np->rx_target = np->rx_max_target;
+ if (((req_prod - queue->rx.sring->rsp_prod) < (queue->rx_target / 4)) &&
+ ((queue->rx_target *= 2) > queue->rx_max_target))
+ queue->rx_target = queue->rx_max_target;
refill:
for (i = 0; ; i++) {
- skb = __skb_dequeue(&np->rx_batch);
+ skb = __skb_dequeue(&queue->rx_batch);
if (skb == NULL)
break;
- skb->dev = dev;
+ skb->dev = queue->info->netdev;
id = xennet_rxidx(req_prod + i);
- BUG_ON(np->rx_skbs[id]);
- np->rx_skbs[id] = skb;
+ BUG_ON(queue->rx_skbs[id]);
+ queue->rx_skbs[id] = skb;
- ref = gnttab_claim_grant_reference(&np->gref_rx_head);
+ ref = gnttab_claim_grant_reference(&queue->gref_rx_head);
BUG_ON((signed short)ref < 0);
- np->grant_rx_ref[id] = ref;
+ queue->grant_rx_ref[id] = ref;
pfn = page_to_pfn(skb_frag_page(&skb_shinfo(skb)->frags[0]));
vaddr = page_address(skb_frag_page(&skb_shinfo(skb)->frags[0]));
- req = RING_GET_REQUEST(&np->rx, req_prod + i);
+ req = RING_GET_REQUEST(&queue->rx, req_prod + i);
gnttab_grant_foreign_access_ref(ref,
- np->xbdev->otherend_id,
+ queue->info->xbdev->otherend_id,
pfn_to_mfn(pfn),
0);
@@ -337,72 +360,77 @@ no_skb:
wmb(); /* barrier so backend seens requests */
/* Above is a suitable barrier to ensure backend will see requests. */
- np->rx.req_prod_pvt = req_prod + i;
+ queue->rx.req_prod_pvt = req_prod + i;
push:
- RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&np->rx, notify);
+ RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&queue->rx, notify);
if (notify)
- notify_remote_via_irq(np->rx_irq);
+ notify_remote_via_irq(queue->rx_irq);
}
static int xennet_open(struct net_device *dev)
{
struct netfront_info *np = netdev_priv(dev);
-
- napi_enable(&np->napi);
-
- spin_lock_bh(&np->rx_lock);
- if (netif_carrier_ok(dev)) {
- xennet_alloc_rx_buffers(dev);
- np->rx.sring->rsp_event = np->rx.rsp_cons + 1;
- if (RING_HAS_UNCONSUMED_RESPONSES(&np->rx))
- napi_schedule(&np->napi);
+ unsigned int num_queues = dev->real_num_tx_queues;
+ unsigned int i = 0;
+ struct netfront_queue *queue = NULL;
+
+ for (i = 0; i < num_queues; ++i) {
+ queue = &np->queues[i];
+ napi_enable(&queue->napi);
+
+ spin_lock_bh(&queue->rx_lock);
+ if (netif_carrier_ok(dev)) {
+ xennet_alloc_rx_buffers(queue);
+ queue->rx.sring->rsp_event = queue->rx.rsp_cons + 1;
+ if (RING_HAS_UNCONSUMED_RESPONSES(&queue->rx))
+ napi_schedule(&queue->napi);
+ }
+ spin_unlock_bh(&queue->rx_lock);
}
- spin_unlock_bh(&np->rx_lock);
- netif_start_queue(dev);
+ netif_tx_start_all_queues(dev);
return 0;
}
-static void xennet_tx_buf_gc(struct net_device *dev)
+static void xennet_tx_buf_gc(struct netfront_queue *queue)
{
RING_IDX cons, prod;
unsigned short id;
- struct netfront_info *np = netdev_priv(dev);
struct sk_buff *skb;
- BUG_ON(!netif_carrier_ok(dev));
+ BUG_ON(!netif_carrier_ok(queue->info->netdev));
do {
- prod = np->tx.sring->rsp_prod;
+ prod = queue->tx.sring->rsp_prod;
rmb(); /* Ensure we see responses up to 'rp'. */
- for (cons = np->tx.rsp_cons; cons != prod; cons++) {
+ for (cons = queue->tx.rsp_cons; cons != prod; cons++) {
struct xen_netif_tx_response *txrsp;
- txrsp = RING_GET_RESPONSE(&np->tx, cons);
+ txrsp = RING_GET_RESPONSE(&queue->tx, cons);
if (txrsp->status == XEN_NETIF_RSP_NULL)
continue;
id = txrsp->id;
- skb = np->tx_skbs[id].skb;
+ skb = queue->tx_skbs[id].skb;
if (unlikely(gnttab_query_foreign_access(
- np->grant_tx_ref[id]) != 0)) {
+ queue->grant_tx_ref[id]) != 0)) {
pr_alert("%s: warning -- grant still in use by backend domain\n",
__func__);
BUG();
}
gnttab_end_foreign_access_ref(
- np->grant_tx_ref[id], GNTMAP_readonly);
+ queue->grant_tx_ref[id], GNTMAP_readonly);
gnttab_release_grant_reference(
- &np->gref_tx_head, np->grant_tx_ref[id]);
- np->grant_tx_ref[id] = GRANT_INVALID_REF;
- np->grant_tx_page[id] = NULL;
- add_id_to_freelist(&np->tx_skb_freelist, np->tx_skbs, id);
+ &queue->gref_tx_head, queue->grant_tx_ref[id]);
+ queue->grant_tx_ref[id] = GRANT_INVALID_REF;
+ queue->grant_tx_page[id] = NULL;
+ add_id_to_freelist(&queue->tx_skb_freelist, queue->tx_skbs, id);
dev_kfree_skb_irq(skb);
}
- np->tx.rsp_cons = prod;
+ queue->tx.rsp_cons = prod;
/*
* Set a new event, then check for race with update of tx_cons.
@@ -412,21 +440,20 @@ static void xennet_tx_buf_gc(struct net_device *dev)
* data is outstanding: in such cases notification from Xen is
* likely to be the only kick that we'll get.
*/
- np->tx.sring->rsp_event =
- prod + ((np->tx.sring->req_prod - prod) >> 1) + 1;
+ queue->tx.sring->rsp_event =
+ prod + ((queue->tx.sring->req_prod - prod) >> 1) + 1;
mb(); /* update shared area */
- } while ((cons == prod) && (prod != np->tx.sring->rsp_prod));
+ } while ((cons == prod) && (prod != queue->tx.sring->rsp_prod));
- xennet_maybe_wake_tx(dev);
+ xennet_maybe_wake_tx(queue);
}
-static void xennet_make_frags(struct sk_buff *skb, struct net_device *dev,
+static void xennet_make_frags(struct sk_buff *skb, struct netfront_queue *queue,
struct xen_netif_tx_request *tx)
{
- struct netfront_info *np = netdev_priv(dev);
char *data = skb->data;
unsigned long mfn;
- RING_IDX prod = np->tx.req_prod_pvt;
+ RING_IDX prod = queue->tx.req_prod_pvt;
int frags = skb_shinfo(skb)->nr_frags;
unsigned int offset = offset_in_page(data);
unsigned int len = skb_headlen(skb);
@@ -443,19 +470,19 @@ static void xennet_make_frags(struct sk_buff *skb, struct net_device *dev,
data += tx->size;
offset = 0;
- id = get_id_from_freelist(&np->tx_skb_freelist, np->tx_skbs);
- np->tx_skbs[id].skb = skb_get(skb);
- tx = RING_GET_REQUEST(&np->tx, prod++);
+ id = get_id_from_freelist(&queue->tx_skb_freelist, queue->tx_skbs);
+ queue->tx_skbs[id].skb = skb_get(skb);
+ tx = RING_GET_REQUEST(&queue->tx, prod++);
tx->id = id;
- ref = gnttab_claim_grant_reference(&np->gref_tx_head);
+ ref = gnttab_claim_grant_reference(&queue->gref_tx_head);
BUG_ON((signed short)ref < 0);
mfn = virt_to_mfn(data);
- gnttab_grant_foreign_access_ref(ref, np->xbdev->otherend_id,
+ gnttab_grant_foreign_access_ref(ref, queue->info->xbdev->otherend_id,
mfn, GNTMAP_readonly);
- np->grant_tx_page[id] = virt_to_page(data);
- tx->gref = np->grant_tx_ref[id] = ref;
+ queue->grant_tx_page[id] = virt_to_page(data);
+ tx->gref = queue->grant_tx_ref[id] = ref;
tx->offset = offset;
tx->size = len;
tx->flags = 0;
@@ -487,21 +514,21 @@ static void xennet_make_frags(struct sk_buff *skb, struct net_device *dev,
tx->flags |= XEN_NETTXF_more_data;
- id = get_id_from_freelist(&np->tx_skb_freelist,
- np->tx_skbs);
- np->tx_skbs[id].skb = skb_get(skb);
- tx = RING_GET_REQUEST(&np->tx, prod++);
+ id = get_id_from_freelist(&queue->tx_skb_freelist,
+ queue->tx_skbs);
+ queue->tx_skbs[id].skb = skb_get(skb);
+ tx = RING_GET_REQUEST(&queue->tx, prod++);
tx->id = id;
- ref = gnttab_claim_grant_reference(&np->gref_tx_head);
+ ref = gnttab_claim_grant_reference(&queue->gref_tx_head);
BUG_ON((signed short)ref < 0);
mfn = pfn_to_mfn(page_to_pfn(page));
gnttab_grant_foreign_access_ref(ref,
- np->xbdev->otherend_id,
+ queue->info->xbdev->otherend_id,
mfn, GNTMAP_readonly);
- np->grant_tx_page[id] = page;
- tx->gref = np->grant_tx_ref[id] = ref;
+ queue->grant_tx_page[id] = page;
+ tx->gref = queue->grant_tx_ref[id] = ref;
tx->offset = offset;
tx->size = bytes;
tx->flags = 0;
@@ -518,7 +545,7 @@ static void xennet_make_frags(struct sk_buff *skb, struct net_device *dev,
}
}
- np->tx.req_prod_pvt = prod;
+ queue->tx.req_prod_pvt = prod;
}
/*
@@ -544,6 +571,24 @@ static int xennet_count_skb_frag_slots(struct sk_buff *skb)
return pages;
}
+static u16 xennet_select_queue(struct net_device *dev, struct sk_buff *skb,
+ void *accel_priv, select_queue_fallback_t fallback)
+{
+ unsigned int num_queues = dev->real_num_tx_queues;
+ u32 hash;
+ u16 queue_idx;
+
+ /* First, check if there is only one queue */
+ if (num_queues == 1) {
+ queue_idx = 0;
+ } else {
+ hash = skb_get_hash(skb);
+ queue_idx = hash % num_queues;
+ }
+
+ return queue_idx;
+}
+
static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
unsigned short id;
@@ -559,6 +604,16 @@ static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev)
unsigned int offset = offset_in_page(data);
unsigned int len = skb_headlen(skb);
unsigned long flags;
+ struct netfront_queue *queue = NULL;
+ unsigned int num_queues = dev->real_num_tx_queues;
+ u16 queue_index;
+
+ /* Drop the packet if no queues are set up */
+ if (num_queues < 1)
+ goto drop;
+ /* Determine which queue to transmit this SKB on */
+ queue_index = skb_get_queue_mapping(skb);
+ queue = &np->queues[queue_index];
/* If skb->len is too big for wire format, drop skb and alert
* user about misconfiguration.
@@ -578,30 +633,30 @@ static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev)
goto drop;
}
- spin_lock_irqsave(&np->tx_lock, flags);
+ spin_lock_irqsave(&queue->tx_lock, flags);
if (unlikely(!netif_carrier_ok(dev) ||
(slots > 1 && !xennet_can_sg(dev)) ||
netif_needs_gso(skb, netif_skb_features(skb)))) {
- spin_unlock_irqrestore(&np->tx_lock, flags);
+ spin_unlock_irqrestore(&queue->tx_lock, flags);
goto drop;
}
- i = np->tx.req_prod_pvt;
+ i = queue->tx.req_prod_pvt;
- id = get_id_from_freelist(&np->tx_skb_freelist, np->tx_skbs);
- np->tx_skbs[id].skb = skb;
+ id = get_id_from_freelist(&queue->tx_skb_freelist, queue->tx_skbs);
+ queue->tx_skbs[id].skb = skb;
- tx = RING_GET_REQUEST(&np->tx, i);
+ tx = RING_GET_REQUEST(&queue->tx, i);
tx->id = id;
- ref = gnttab_claim_grant_reference(&np->gref_tx_head);
+ ref = gnttab_claim_grant_reference(&queue->gref_tx_head);
BUG_ON((signed short)ref < 0);
mfn = virt_to_mfn(data);
gnttab_grant_foreign_access_ref(
- ref, np->xbdev->otherend_id, mfn, GNTMAP_readonly);
- np->grant_tx_page[id] = virt_to_page(data);
- tx->gref = np->grant_tx_ref[id] = ref;
+ ref, queue->info->xbdev->otherend_id, mfn, GNTMAP_readonly);
+ queue->grant_tx_page[id] = virt_to_page(data);
+ tx->gref = queue->grant_tx_ref[id] = ref;
tx->offset = offset;
tx->size = len;
@@ -617,7 +672,7 @@ static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev)
struct xen_netif_extra_info *gso;
gso = (struct xen_netif_extra_info *)
- RING_GET_REQUEST(&np->tx, ++i);
+ RING_GET_REQUEST(&queue->tx, ++i);
tx->flags |= XEN_NETTXF_extra_info;
@@ -632,14 +687,14 @@ static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev)
gso->flags = 0;
}
- np->tx.req_prod_pvt = i + 1;
+ queue->tx.req_prod_pvt = i + 1;
- xennet_make_frags(skb, dev, tx);
+ xennet_make_frags(skb, queue, tx);
tx->size = skb->len;
- RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&np->tx, notify);
+ RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&queue->tx, notify);
if (notify)
- notify_remote_via_irq(np->tx_irq);
+ notify_remote_via_irq(queue->tx_irq);
u64_stats_update_begin(&stats->syncp);
stats->tx_bytes += skb->len;
@@ -647,12 +702,12 @@ static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev)
u64_stats_update_end(&stats->syncp);
/* Note: It is not safe to access skb after xennet_tx_buf_gc()! */
- xennet_tx_buf_gc(dev);
+ xennet_tx_buf_gc(queue);
- if (!netfront_tx_slot_available(np))
- netif_stop_queue(dev);
+ if (!netfront_tx_slot_available(queue))
+ netif_tx_stop_queue(netdev_get_tx_queue(dev, queue->id));
- spin_unlock_irqrestore(&np->tx_lock, flags);
+ spin_unlock_irqrestore(&queue->tx_lock, flags);
return NETDEV_TX_OK;
@@ -665,32 +720,38 @@ static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev)
static int xennet_close(struct net_device *dev)
{
struct netfront_info *np = netdev_priv(dev);
- netif_stop_queue(np->netdev);
- napi_disable(&np->napi);
+ unsigned int num_queues = dev->real_num_tx_queues;
+ unsigned int i;
+ struct netfront_queue *queue;
+ netif_tx_stop_all_queues(np->netdev);
+ for (i = 0; i < num_queues; ++i) {
+ queue = &np->queues[i];
+ napi_disable(&queue->napi);
+ }
return 0;
}
-static void xennet_move_rx_slot(struct netfront_info *np, struct sk_buff *skb,
+static void xennet_move_rx_slot(struct netfront_queue *queue, struct sk_buff *skb,
grant_ref_t ref)
{
- int new = xennet_rxidx(np->rx.req_prod_pvt);
-
- BUG_ON(np->rx_skbs[new]);
- np->rx_skbs[new] = skb;
- np->grant_rx_ref[new] = ref;
- RING_GET_REQUEST(&np->rx, np->rx.req_prod_pvt)->id = new;
- RING_GET_REQUEST(&np->rx, np->rx.req_prod_pvt)->gref = ref;
- np->rx.req_prod_pvt++;
+ int new = xennet_rxidx(queue->rx.req_prod_pvt);
+
+ BUG_ON(queue->rx_skbs[new]);
+ queue->rx_skbs[new] = skb;
+ queue->grant_rx_ref[new] = ref;
+ RING_GET_REQUEST(&queue->rx, queue->rx.req_prod_pvt)->id = new;
+ RING_GET_REQUEST(&queue->rx, queue->rx.req_prod_pvt)->gref = ref;
+ queue->rx.req_prod_pvt++;
}
-static int xennet_get_extras(struct netfront_info *np,
+static int xennet_get_extras(struct netfront_queue *queue,
struct xen_netif_extra_info *extras,
RING_IDX rp)
{
struct xen_netif_extra_info *extra;
- struct device *dev = &np->netdev->dev;
- RING_IDX cons = np->rx.rsp_cons;
+ struct device *dev = &queue->info->netdev->dev;
+ RING_IDX cons = queue->rx.rsp_cons;
int err = 0;
do {
@@ -705,7 +766,7 @@ static int xennet_get_extras(struct netfront_info *np,
}
extra = (struct xen_netif_extra_info *)
- RING_GET_RESPONSE(&np->rx, ++cons);
+ RING_GET_RESPONSE(&queue->rx, ++cons);
if (unlikely(!extra->type ||
extra->type >= XEN_NETIF_EXTRA_TYPE_MAX)) {
@@ -718,33 +779,33 @@ static int xennet_get_extras(struct netfront_info *np,
sizeof(*extra));
}
- skb = xennet_get_rx_skb(np, cons);
- ref = xennet_get_rx_ref(np, cons);
- xennet_move_rx_slot(np, skb, ref);
+ skb = xennet_get_rx_skb(queue, cons);
+ ref = xennet_get_rx_ref(queue, cons);
+ xennet_move_rx_slot(queue, skb, ref);
} while (extra->flags & XEN_NETIF_EXTRA_FLAG_MORE);
- np->rx.rsp_cons = cons;
+ queue->rx.rsp_cons = cons;
return err;
}
-static int xennet_get_responses(struct netfront_info *np,
+static int xennet_get_responses(struct netfront_queue *queue,
struct netfront_rx_info *rinfo, RING_IDX rp,
struct sk_buff_head *list)
{
struct xen_netif_rx_response *rx = &rinfo->rx;
struct xen_netif_extra_info *extras = rinfo->extras;
- struct device *dev = &np->netdev->dev;
- RING_IDX cons = np->rx.rsp_cons;
- struct sk_buff *skb = xennet_get_rx_skb(np, cons);
- grant_ref_t ref = xennet_get_rx_ref(np, cons);
+ struct device *dev = &queue->info->netdev->dev;
+ RING_IDX cons = queue->rx.rsp_cons;
+ struct sk_buff *skb = xennet_get_rx_skb(queue, cons);
+ grant_ref_t ref = xennet_get_rx_ref(queue, cons);
int max = MAX_SKB_FRAGS + (rx->status <= RX_COPY_THRESHOLD);
int slots = 1;
int err = 0;
unsigned long ret;
if (rx->flags & XEN_NETRXF_extra_info) {
- err = xennet_get_extras(np, extras, rp);
- cons = np->rx.rsp_cons;
+ err = xennet_get_extras(queue, extras, rp);
+ cons = queue->rx.rsp_cons;
}
for (;;) {
@@ -753,7 +814,7 @@ static int xennet_get_responses(struct netfront_info *np,
if (net_ratelimit())
dev_warn(dev, "rx->offset: %x, size: %u\n",
rx->offset, rx->status);
- xennet_move_rx_slot(np, skb, ref);
+ xennet_move_rx_slot(queue, skb, ref);
err = -EINVAL;
goto next;
}
@@ -774,7 +835,7 @@ static int xennet_get_responses(struct netfront_info *np,
ret = gnttab_end_foreign_access_ref(ref, 0);
BUG_ON(!ret);
- gnttab_release_grant_reference(&np->gref_rx_head, ref);
+ gnttab_release_grant_reference(&queue->gref_rx_head, ref);
__skb_queue_tail(list, skb);
@@ -789,9 +850,9 @@ next:
break;
}
- rx = RING_GET_RESPONSE(&np->rx, cons + slots);
- skb = xennet_get_rx_skb(np, cons + slots);
- ref = xennet_get_rx_ref(np, cons + slots);
+ rx = RING_GET_RESPONSE(&queue->rx, cons + slots);
+ skb = xennet_get_rx_skb(queue, cons + slots);
+ ref = xennet_get_rx_ref(queue, cons + slots);
slots++;
}
@@ -802,7 +863,7 @@ next:
}
if (unlikely(err))
- np->rx.rsp_cons = cons + slots;
+ queue->rx.rsp_cons = cons + slots;
return err;
}
@@ -836,17 +897,17 @@ static int xennet_set_skb_gso(struct sk_buff *skb,
return 0;
}
-static RING_IDX xennet_fill_frags(struct netfront_info *np,
+static RING_IDX xennet_fill_frags(struct netfront_queue *queue,
struct sk_buff *skb,
struct sk_buff_head *list)
{
struct skb_shared_info *shinfo = skb_shinfo(skb);
- RING_IDX cons = np->rx.rsp_cons;
+ RING_IDX cons = queue->rx.rsp_cons;
struct sk_buff *nskb;
while ((nskb = __skb_dequeue(list))) {
struct xen_netif_rx_response *rx =
- RING_GET_RESPONSE(&np->rx, ++cons);
+ RING_GET_RESPONSE(&queue->rx, ++cons);
skb_frag_t *nfrag = &skb_shinfo(nskb)->frags[0];
if (shinfo->nr_frags == MAX_SKB_FRAGS) {
@@ -879,7 +940,7 @@ static int checksum_setup(struct net_device *dev, struct sk_buff *skb)
*/
if (skb->ip_summed != CHECKSUM_PARTIAL && skb_is_gso(skb)) {
struct netfront_info *np = netdev_priv(dev);
- np->rx_gso_checksum_fixup++;
+ atomic_inc(&np->rx_gso_checksum_fixup);
skb->ip_summed = CHECKSUM_PARTIAL;
recalculate_partial_csum = true;
}
@@ -891,11 +952,10 @@ static int checksum_setup(struct net_device *dev, struct sk_buff *skb)
return skb_checksum_setup(skb, recalculate_partial_csum);
}
-static int handle_incoming_queue(struct net_device *dev,
+static int handle_incoming_queue(struct netfront_queue *queue,
struct sk_buff_head *rxq)
{
- struct netfront_info *np = netdev_priv(dev);
- struct netfront_stats *stats = this_cpu_ptr(np->stats);
+ struct netfront_stats *stats = this_cpu_ptr(queue->info->stats);
int packets_dropped = 0;
struct sk_buff *skb;
@@ -906,13 +966,13 @@ static int handle_incoming_queue(struct net_device *dev,
__pskb_pull_tail(skb, pull_to - skb_headlen(skb));
/* Ethernet work: Delayed to here as it peeks the header. */
- skb->protocol = eth_type_trans(skb, dev);
+ skb->protocol = eth_type_trans(skb, queue->info->netdev);
skb_reset_network_header(skb);
- if (checksum_setup(dev, skb)) {
+ if (checksum_setup(queue->info->netdev, skb)) {
kfree_skb(skb);
packets_dropped++;
- dev->stats.rx_errors++;
+ queue->info->netdev->stats.rx_errors++;
continue;
}
@@ -922,7 +982,7 @@ static int handle_incoming_queue(struct net_device *dev,
u64_stats_update_end(&stats->syncp);
/* Pass it up. */
- napi_gro_receive(&np->napi, skb);
+ napi_gro_receive(&queue->napi, skb);
}
return packets_dropped;
@@ -930,8 +990,8 @@ static int handle_incoming_queue(struct net_device *dev,
static int xennet_poll(struct napi_struct *napi, int budget)
{
- struct netfront_info *np = container_of(napi, struct netfront_info, napi);
- struct net_device *dev = np->netdev;
+ struct netfront_queue *queue = container_of(napi, struct netfront_queue, napi);
+ struct net_device *dev = queue->info->netdev;
struct sk_buff *skb;
struct netfront_rx_info rinfo;
struct xen_netif_rx_response *rx = &rinfo.rx;
@@ -944,29 +1004,29 @@ static int xennet_poll(struct napi_struct *napi, int budget)
unsigned long flags;
int err;
- spin_lock(&np->rx_lock);
+ spin_lock(&queue->rx_lock);
skb_queue_head_init(&rxq);
skb_queue_head_init(&errq);
skb_queue_head_init(&tmpq);
- rp = np->rx.sring->rsp_prod;
+ rp = queue->rx.sring->rsp_prod;
rmb(); /* Ensure we see queued responses up to 'rp'. */
- i = np->rx.rsp_cons;
+ i = queue->rx.rsp_cons;
work_done = 0;
while ((i != rp) && (work_done < budget)) {
- memcpy(rx, RING_GET_RESPONSE(&np->rx, i), sizeof(*rx));
+ memcpy(rx, RING_GET_RESPONSE(&queue->rx, i), sizeof(*rx));
memset(extras, 0, sizeof(rinfo.extras));
- err = xennet_get_responses(np, &rinfo, rp, &tmpq);
+ err = xennet_get_responses(queue, &rinfo, rp, &tmpq);
if (unlikely(err)) {
err:
while ((skb = __skb_dequeue(&tmpq)))
__skb_queue_tail(&errq, skb);
dev->stats.rx_errors++;
- i = np->rx.rsp_cons;
+ i = queue->rx.rsp_cons;
continue;
}
@@ -978,7 +1038,7 @@ err:
if (unlikely(xennet_set_skb_gso(skb, gso))) {
__skb_queue_head(&tmpq, skb);
- np->rx.rsp_cons += skb_queue_len(&tmpq);
+ queue->rx.rsp_cons += skb_queue_len(&tmpq);
goto err;
}
}
@@ -992,7 +1052,7 @@ err:
skb->data_len = rx->status;
skb->len += rx->status;
- i = xennet_fill_frags(np, skb, &tmpq);
+ i = xennet_fill_frags(queue, skb, &tmpq);
if (rx->flags & XEN_NETRXF_csum_blank)
skb->ip_summed = CHECKSUM_PARTIAL;
@@ -1001,22 +1061,22 @@ err:
__skb_queue_tail(&rxq, skb);
- np->rx.rsp_cons = ++i;
+ queue->rx.rsp_cons = ++i;
work_done++;
}
__skb_queue_purge(&errq);
- work_done -= handle_incoming_queue(dev, &rxq);
+ work_done -= handle_incoming_queue(queue, &rxq);
/* If we get a callback with very few responses, reduce fill target. */
/* NB. Note exponential increase, linear decrease. */
- if (((np->rx.req_prod_pvt - np->rx.sring->rsp_prod) >
- ((3*np->rx_target) / 4)) &&
- (--np->rx_target < np->rx_min_target))
- np->rx_target = np->rx_min_target;
+ if (((queue->rx.req_prod_pvt - queue->rx.sring->rsp_prod) >
+ ((3*queue->rx_target) / 4)) &&
+ (--queue->rx_target < queue->rx_min_target))
+ queue->rx_target = queue->rx_min_target;
- xennet_alloc_rx_buffers(dev);
+ xennet_alloc_rx_buffers(queue);
if (work_done < budget) {
int more_to_do = 0;
@@ -1025,14 +1085,14 @@ err:
local_irq_save(flags);
- RING_FINAL_CHECK_FOR_RESPONSES(&np->rx, more_to_do);
+ RING_FINAL_CHECK_FOR_RESPONSES(&queue->rx, more_to_do);
if (!more_to_do)
__napi_complete(napi);
local_irq_restore(flags);
}
- spin_unlock(&np->rx_lock);
+ spin_unlock(&queue->rx_lock);
return work_done;
}
@@ -1080,43 +1140,43 @@ static struct rtnl_link_stats64 *xennet_get_stats64(struct net_device *dev,
return tot;
}
-static void xennet_release_tx_bufs(struct netfront_info *np)
+static void xennet_release_tx_bufs(struct netfront_queue *queue)
{
struct sk_buff *skb;
int i;
for (i = 0; i < NET_TX_RING_SIZE; i++) {
/* Skip over entries which are actually freelist references */
- if (skb_entry_is_link(&np->tx_skbs[i]))
+ if (skb_entry_is_link(&queue->tx_skbs[i]))
continue;
- skb = np->tx_skbs[i].skb;
- get_page(np->grant_tx_page[i]);
- gnttab_end_foreign_access(np->grant_tx_ref[i],
+ skb = queue->tx_skbs[i].skb;
+ get_page(queue->grant_tx_page[i]);
+ gnttab_end_foreign_access(queue->grant_tx_ref[i],
GNTMAP_readonly,
- (unsigned long)page_address(np->grant_tx_page[i]));
- np->grant_tx_page[i] = NULL;
- np->grant_tx_ref[i] = GRANT_INVALID_REF;
- add_id_to_freelist(&np->tx_skb_freelist, np->tx_skbs, i);
+ (unsigned long)page_address(queue->grant_tx_page[i]));
+ queue->grant_tx_page[i] = NULL;
+ queue->grant_tx_ref[i] = GRANT_INVALID_REF;
+ add_id_to_freelist(&queue->tx_skb_freelist, queue->tx_skbs, i);
dev_kfree_skb_irq(skb);
}
}
-static void xennet_release_rx_bufs(struct netfront_info *np)
+static void xennet_release_rx_bufs(struct netfront_queue *queue)
{
int id, ref;
- spin_lock_bh(&np->rx_lock);
+ spin_lock_bh(&queue->rx_lock);
for (id = 0; id < NET_RX_RING_SIZE; id++) {
struct sk_buff *skb;
struct page *page;
- skb = np->rx_skbs[id];
+ skb = queue->rx_skbs[id];
if (!skb)
continue;
- ref = np->grant_rx_ref[id];
+ ref = queue->grant_rx_ref[id];
if (ref == GRANT_INVALID_REF)
continue;
@@ -1128,21 +1188,28 @@ static void xennet_release_rx_bufs(struct netfront_info *np)
get_page(page);
gnttab_end_foreign_access(ref, 0,
(unsigned long)page_address(page));
- np->grant_rx_ref[id] = GRANT_INVALID_REF;
+ queue->grant_rx_ref[id] = GRANT_INVALID_REF;
kfree_skb(skb);
}
- spin_unlock_bh(&np->rx_lock);
+ spin_unlock_bh(&queue->rx_lock);
}
static void xennet_uninit(struct net_device *dev)
{
struct netfront_info *np = netdev_priv(dev);
- xennet_release_tx_bufs(np);
- xennet_release_rx_bufs(np);
- gnttab_free_grant_references(np->gref_tx_head);
- gnttab_free_grant_references(np->gref_rx_head);
+ unsigned int num_queues = dev->real_num_tx_queues;
+ struct netfront_queue *queue;
+ unsigned int i;
+
+ for (i = 0; i < num_queues; ++i) {
+ queue = &np->queues[i];
+ xennet_release_tx_bufs(queue);
+ xennet_release_rx_bufs(queue);
+ gnttab_free_grant_references(queue->gref_tx_head);
+ gnttab_free_grant_references(queue->gref_rx_head);
+ }
}
static netdev_features_t xennet_fix_features(struct net_device *dev,
@@ -1203,25 +1270,24 @@ static int xennet_set_features(struct net_device *dev,
static irqreturn_t xennet_tx_interrupt(int irq, void *dev_id)
{
- struct netfront_info *np = dev_id;
- struct net_device *dev = np->netdev;
+ struct netfront_queue *queue = dev_id;
unsigned long flags;
- spin_lock_irqsave(&np->tx_lock, flags);
- xennet_tx_buf_gc(dev);
- spin_unlock_irqrestore(&np->tx_lock, flags);
+ spin_lock_irqsave(&queue->tx_lock, flags);
+ xennet_tx_buf_gc(queue);
+ spin_unlock_irqrestore(&queue->tx_lock, flags);
return IRQ_HANDLED;
}
static irqreturn_t xennet_rx_interrupt(int irq, void *dev_id)
{
- struct netfront_info *np = dev_id;
- struct net_device *dev = np->netdev;
+ struct netfront_queue *queue = dev_id;
+ struct net_device *dev = queue->info->netdev;
if (likely(netif_carrier_ok(dev) &&
- RING_HAS_UNCONSUMED_RESPONSES(&np->rx)))
- napi_schedule(&np->napi);
+ RING_HAS_UNCONSUMED_RESPONSES(&queue->rx)))
+ napi_schedule(&queue->napi);
return IRQ_HANDLED;
}
@@ -1236,7 +1302,12 @@ static irqreturn_t xennet_interrupt(int irq, void *dev_id)
#ifdef CONFIG_NET_POLL_CONTROLLER
static void xennet_poll_controller(struct net_device *dev)
{
- xennet_interrupt(0, dev);
+ /* Poll each queue */
+ struct netfront_info *info = netdev_priv(dev);
+ unsigned int num_queues = dev->real_num_tx_queues;
+ unsigned int i;
+ for (i = 0; i < num_queues; ++i)
+ xennet_interrupt(0, &info->queues[i]);
}
#endif
@@ -1251,6 +1322,7 @@ static const struct net_device_ops xennet_netdev_ops = {
.ndo_validate_addr = eth_validate_addr,
.ndo_fix_features = xennet_fix_features,
.ndo_set_features = xennet_set_features,
+ .ndo_select_queue = xennet_select_queue,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = xennet_poll_controller,
#endif
@@ -1258,66 +1330,30 @@ static const struct net_device_ops xennet_netdev_ops = {
static struct net_device *xennet_create_dev(struct xenbus_device *dev)
{
- int i, err;
+ int err;
struct net_device *netdev;
struct netfront_info *np;
- netdev = alloc_etherdev(sizeof(struct netfront_info));
+ netdev = alloc_etherdev_mq(sizeof(struct netfront_info), xennet_max_queues);
if (!netdev)
return ERR_PTR(-ENOMEM);
np = netdev_priv(netdev);
np->xbdev = dev;
- spin_lock_init(&np->tx_lock);
- spin_lock_init(&np->rx_lock);
-
- skb_queue_head_init(&np->rx_batch);
- np->rx_target = RX_DFL_MIN_TARGET;
- np->rx_min_target = RX_DFL_MIN_TARGET;
- np->rx_max_target = RX_MAX_TARGET;
-
- init_timer(&np->rx_refill_timer);
- np->rx_refill_timer.data = (unsigned long)netdev;
- np->rx_refill_timer.function = rx_refill_timeout;
+ /* No need to use rtnl_lock() before the call below as it
+ * happens before register_netdev().
+ */
+ netif_set_real_num_tx_queues(netdev, 0);
+ np->queues = NULL;
err = -ENOMEM;
np->stats = netdev_alloc_pcpu_stats(struct netfront_stats);
if (np->stats == NULL)
goto exit;
- /* Initialise tx_skbs as a free chain containing every entry. */
- np->tx_skb_freelist = 0;
- for (i = 0; i < NET_TX_RING_SIZE; i++) {
- skb_entry_set_link(&np->tx_skbs[i], i+1);
- np->grant_tx_ref[i] = GRANT_INVALID_REF;
- np->grant_tx_page[i] = NULL;
- }
-
- /* Clear out rx_skbs */
- for (i = 0; i < NET_RX_RING_SIZE; i++) {
- np->rx_skbs[i] = NULL;
- np->grant_rx_ref[i] = GRANT_INVALID_REF;
- }
-
- /* A grant for every tx ring slot */
- if (gnttab_alloc_grant_references(TX_MAX_TARGET,
- &np->gref_tx_head) < 0) {
- pr_alert("can't alloc tx grant refs\n");
- err = -ENOMEM;
- goto exit_free_stats;
- }
- /* A grant for every rx ring slot */
- if (gnttab_alloc_grant_references(RX_MAX_TARGET,
- &np->gref_rx_head) < 0) {
- pr_alert("can't alloc rx grant refs\n");
- err = -ENOMEM;
- goto exit_free_tx;
- }
-
netdev->netdev_ops = &xennet_netdev_ops;
- netif_napi_add(netdev, &np->napi, xennet_poll, 64);
netdev->features = NETIF_F_IP_CSUM | NETIF_F_RXCSUM |
NETIF_F_GSO_ROBUST;
netdev->hw_features = NETIF_F_SG |
@@ -1332,7 +1368,7 @@ static struct net_device *xennet_create_dev(struct xenbus_device *dev)
*/
netdev->features |= netdev->hw_features;
- SET_ETHTOOL_OPS(netdev, &xennet_ethtool_ops);
+ netdev->ethtool_ops = &xennet_ethtool_ops;
SET_NETDEV_DEV(netdev, &dev->dev);
netif_set_gso_max_size(netdev, XEN_NETIF_MAX_TX_SIZE - MAX_TCP_HEADER);
@@ -1343,10 +1379,6 @@ static struct net_device *xennet_create_dev(struct xenbus_device *dev)
return netdev;
- exit_free_tx:
- gnttab_free_grant_references(np->gref_tx_head);
- exit_free_stats:
- free_percpu(np->stats);
exit:
free_netdev(netdev);
return ERR_PTR(err);
@@ -1404,30 +1436,36 @@ static void xennet_end_access(int ref, void *page)
static void xennet_disconnect_backend(struct netfront_info *info)
{
- /* Stop old i/f to prevent errors whilst we rebuild the state. */
- spin_lock_bh(&info->rx_lock);
- spin_lock_irq(&info->tx_lock);
- netif_carrier_off(info->netdev);
- spin_unlock_irq(&info->tx_lock);
- spin_unlock_bh(&info->rx_lock);
-
- if (info->tx_irq && (info->tx_irq == info->rx_irq))
- unbind_from_irqhandler(info->tx_irq, info);
- if (info->tx_irq && (info->tx_irq != info->rx_irq)) {
- unbind_from_irqhandler(info->tx_irq, info);
- unbind_from_irqhandler(info->rx_irq, info);
- }
- info->tx_evtchn = info->rx_evtchn = 0;
- info->tx_irq = info->rx_irq = 0;
+ unsigned int i = 0;
+ struct netfront_queue *queue = NULL;
+ unsigned int num_queues = info->netdev->real_num_tx_queues;
+
+ for (i = 0; i < num_queues; ++i) {
+ /* Stop old i/f to prevent errors whilst we rebuild the state. */
+ spin_lock_bh(&queue->rx_lock);
+ spin_lock_irq(&queue->tx_lock);
+ netif_carrier_off(queue->info->netdev);
+ spin_unlock_irq(&queue->tx_lock);
+ spin_unlock_bh(&queue->rx_lock);
+
+ if (queue->tx_irq && (queue->tx_irq == queue->rx_irq))
+ unbind_from_irqhandler(queue->tx_irq, queue);
+ if (queue->tx_irq && (queue->tx_irq != queue->rx_irq)) {
+ unbind_from_irqhandler(queue->tx_irq, queue);
+ unbind_from_irqhandler(queue->rx_irq, queue);
+ }
+ queue->tx_evtchn = queue->rx_evtchn = 0;
+ queue->tx_irq = queue->rx_irq = 0;
- /* End access and free the pages */
- xennet_end_access(info->tx_ring_ref, info->tx.sring);
- xennet_end_access(info->rx_ring_ref, info->rx.sring);
+ /* End access and free the pages */
+ xennet_end_access(queue->tx_ring_ref, queue->tx.sring);
+ xennet_end_access(queue->rx_ring_ref, queue->rx.sring);
- info->tx_ring_ref = GRANT_INVALID_REF;
- info->rx_ring_ref = GRANT_INVALID_REF;
- info->tx.sring = NULL;
- info->rx.sring = NULL;
+ queue->tx_ring_ref = GRANT_INVALID_REF;
+ queue->rx_ring_ref = GRANT_INVALID_REF;
+ queue->tx.sring = NULL;
+ queue->rx.sring = NULL;
+ }
}
/**
@@ -1468,100 +1506,86 @@ static int xen_net_read_mac(struct xenbus_device *dev, u8 mac[])
return 0;
}
-static int setup_netfront_single(struct netfront_info *info)
+static int setup_netfront_single(struct netfront_queue *queue)
{
int err;
- err = xenbus_alloc_evtchn(info->xbdev, &info->tx_evtchn);
+ err = xenbus_alloc_evtchn(queue->info->xbdev, &queue->tx_evtchn);
if (err < 0)
goto fail;
- err = bind_evtchn_to_irqhandler(info->tx_evtchn,
+ err = bind_evtchn_to_irqhandler(queue->tx_evtchn,
xennet_interrupt,
- 0, info->netdev->name, info);
+ 0, queue->info->netdev->name, queue);
if (err < 0)
goto bind_fail;
- info->rx_evtchn = info->tx_evtchn;
- info->rx_irq = info->tx_irq = err;
+ queue->rx_evtchn = queue->tx_evtchn;
+ queue->rx_irq = queue->tx_irq = err;
return 0;
bind_fail:
- xenbus_free_evtchn(info->xbdev, info->tx_evtchn);
- info->tx_evtchn = 0;
+ xenbus_free_evtchn(queue->info->xbdev, queue->tx_evtchn);
+ queue->tx_evtchn = 0;
fail:
return err;
}
-static int setup_netfront_split(struct netfront_info *info)
+static int setup_netfront_split(struct netfront_queue *queue)
{
int err;
- err = xenbus_alloc_evtchn(info->xbdev, &info->tx_evtchn);
+ err = xenbus_alloc_evtchn(queue->info->xbdev, &queue->tx_evtchn);
if (err < 0)
goto fail;
- err = xenbus_alloc_evtchn(info->xbdev, &info->rx_evtchn);
+ err = xenbus_alloc_evtchn(queue->info->xbdev, &queue->rx_evtchn);
if (err < 0)
goto alloc_rx_evtchn_fail;
- snprintf(info->tx_irq_name, sizeof(info->tx_irq_name),
- "%s-tx", info->netdev->name);
- err = bind_evtchn_to_irqhandler(info->tx_evtchn,
+ snprintf(queue->tx_irq_name, sizeof(queue->tx_irq_name),
+ "%s-tx", queue->name);
+ err = bind_evtchn_to_irqhandler(queue->tx_evtchn,
xennet_tx_interrupt,
- 0, info->tx_irq_name, info);
+ 0, queue->tx_irq_name, queue);
if (err < 0)
goto bind_tx_fail;
- info->tx_irq = err;
+ queue->tx_irq = err;
- snprintf(info->rx_irq_name, sizeof(info->rx_irq_name),
- "%s-rx", info->netdev->name);
- err = bind_evtchn_to_irqhandler(info->rx_evtchn,
+ snprintf(queue->rx_irq_name, sizeof(queue->rx_irq_name),
+ "%s-rx", queue->name);
+ err = bind_evtchn_to_irqhandler(queue->rx_evtchn,
xennet_rx_interrupt,
- 0, info->rx_irq_name, info);
+ 0, queue->rx_irq_name, queue);
if (err < 0)
goto bind_rx_fail;
- info->rx_irq = err;
+ queue->rx_irq = err;
return 0;
bind_rx_fail:
- unbind_from_irqhandler(info->tx_irq, info);
- info->tx_irq = 0;
+ unbind_from_irqhandler(queue->tx_irq, queue);
+ queue->tx_irq = 0;
bind_tx_fail:
- xenbus_free_evtchn(info->xbdev, info->rx_evtchn);
- info->rx_evtchn = 0;
+ xenbus_free_evtchn(queue->info->xbdev, queue->rx_evtchn);
+ queue->rx_evtchn = 0;
alloc_rx_evtchn_fail:
- xenbus_free_evtchn(info->xbdev, info->tx_evtchn);
- info->tx_evtchn = 0;
+ xenbus_free_evtchn(queue->info->xbdev, queue->tx_evtchn);
+ queue->tx_evtchn = 0;
fail:
return err;
}
-static int setup_netfront(struct xenbus_device *dev, struct netfront_info *info)
+static int setup_netfront(struct xenbus_device *dev,
+ struct netfront_queue *queue, unsigned int feature_split_evtchn)
{
struct xen_netif_tx_sring *txs;
struct xen_netif_rx_sring *rxs;
int err;
- struct net_device *netdev = info->netdev;
- unsigned int feature_split_evtchn;
- info->tx_ring_ref = GRANT_INVALID_REF;
- info->rx_ring_ref = GRANT_INVALID_REF;
- info->rx.sring = NULL;
- info->tx.sring = NULL;
- netdev->irq = 0;
-
- err = xenbus_scanf(XBT_NIL, info->xbdev->otherend,
- "feature-split-event-channels", "%u",
- &feature_split_evtchn);
- if (err < 0)
- feature_split_evtchn = 0;
-
- err = xen_net_read_mac(dev, netdev->dev_addr);
- if (err) {
- xenbus_dev_fatal(dev, err, "parsing %s/mac", dev->nodename);
- goto fail;
- }
+ queue->tx_ring_ref = GRANT_INVALID_REF;
+ queue->rx_ring_ref = GRANT_INVALID_REF;
+ queue->rx.sring = NULL;
+ queue->tx.sring = NULL;
txs = (struct xen_netif_tx_sring *)get_zeroed_page(GFP_NOIO | __GFP_HIGH);
if (!txs) {
@@ -1570,13 +1594,13 @@ static int setup_netfront(struct xenbus_device *dev, struct netfront_info *info)
goto fail;
}
SHARED_RING_INIT(txs);
- FRONT_RING_INIT(&info->tx, txs, PAGE_SIZE);
+ FRONT_RING_INIT(&queue->tx, txs, PAGE_SIZE);
err = xenbus_grant_ring(dev, virt_to_mfn(txs));
if (err < 0)
goto grant_tx_ring_fail;
+ queue->tx_ring_ref = err;
- info->tx_ring_ref = err;
rxs = (struct xen_netif_rx_sring *)get_zeroed_page(GFP_NOIO | __GFP_HIGH);
if (!rxs) {
err = -ENOMEM;
@@ -1584,21 +1608,21 @@ static int setup_netfront(struct xenbus_device *dev, struct netfront_info *info)
goto alloc_rx_ring_fail;
}
SHARED_RING_INIT(rxs);
- FRONT_RING_INIT(&info->rx, rxs, PAGE_SIZE);
+ FRONT_RING_INIT(&queue->rx, rxs, PAGE_SIZE);
err = xenbus_grant_ring(dev, virt_to_mfn(rxs));
if (err < 0)
goto grant_rx_ring_fail;
- info->rx_ring_ref = err;
+ queue->rx_ring_ref = err;
if (feature_split_evtchn)
- err = setup_netfront_split(info);
+ err = setup_netfront_split(queue);
/* setup single event channel if
* a) feature-split-event-channels == 0
* b) feature-split-event-channels == 1 but failed to setup
*/
if (!feature_split_evtchn || (feature_split_evtchn && err))
- err = setup_netfront_single(info);
+ err = setup_netfront_single(queue);
if (err)
goto alloc_evtchn_fail;
@@ -1609,17 +1633,163 @@ static int setup_netfront(struct xenbus_device *dev, struct netfront_info *info)
* granted pages because backend is not accessing it at this point.
*/
alloc_evtchn_fail:
- gnttab_end_foreign_access_ref(info->rx_ring_ref, 0);
+ gnttab_end_foreign_access_ref(queue->rx_ring_ref, 0);
grant_rx_ring_fail:
free_page((unsigned long)rxs);
alloc_rx_ring_fail:
- gnttab_end_foreign_access_ref(info->tx_ring_ref, 0);
+ gnttab_end_foreign_access_ref(queue->tx_ring_ref, 0);
grant_tx_ring_fail:
free_page((unsigned long)txs);
fail:
return err;
}
+/* Queue-specific initialisation
+ * This used to be done in xennet_create_dev() but must now
+ * be run per-queue.
+ */
+static int xennet_init_queue(struct netfront_queue *queue)
+{
+ unsigned short i;
+ int err = 0;
+
+ spin_lock_init(&queue->tx_lock);
+ spin_lock_init(&queue->rx_lock);
+
+ skb_queue_head_init(&queue->rx_batch);
+ queue->rx_target = RX_DFL_MIN_TARGET;
+ queue->rx_min_target = RX_DFL_MIN_TARGET;
+ queue->rx_max_target = RX_MAX_TARGET;
+
+ init_timer(&queue->rx_refill_timer);
+ queue->rx_refill_timer.data = (unsigned long)queue;
+ queue->rx_refill_timer.function = rx_refill_timeout;
+
+ snprintf(queue->name, sizeof(queue->name), "%s-q%u",
+ queue->info->netdev->name, queue->id);
+
+ /* Initialise tx_skbs as a free chain containing every entry. */
+ queue->tx_skb_freelist = 0;
+ for (i = 0; i < NET_TX_RING_SIZE; i++) {
+ skb_entry_set_link(&queue->tx_skbs[i], i+1);
+ queue->grant_tx_ref[i] = GRANT_INVALID_REF;
+ queue->grant_tx_page[i] = NULL;
+ }
+
+ /* Clear out rx_skbs */
+ for (i = 0; i < NET_RX_RING_SIZE; i++) {
+ queue->rx_skbs[i] = NULL;
+ queue->grant_rx_ref[i] = GRANT_INVALID_REF;
+ }
+
+ /* A grant for every tx ring slot */
+ if (gnttab_alloc_grant_references(TX_MAX_TARGET,
+ &queue->gref_tx_head) < 0) {
+ pr_alert("can't alloc tx grant refs\n");
+ err = -ENOMEM;
+ goto exit;
+ }
+
+ /* A grant for every rx ring slot */
+ if (gnttab_alloc_grant_references(RX_MAX_TARGET,
+ &queue->gref_rx_head) < 0) {
+ pr_alert("can't alloc rx grant refs\n");
+ err = -ENOMEM;
+ goto exit_free_tx;
+ }
+
+ netif_napi_add(queue->info->netdev, &queue->napi, xennet_poll, 64);
+
+ return 0;
+
+ exit_free_tx:
+ gnttab_free_grant_references(queue->gref_tx_head);
+ exit:
+ return err;
+}
+
+static int write_queue_xenstore_keys(struct netfront_queue *queue,
+ struct xenbus_transaction *xbt, int write_hierarchical)
+{
+ /* Write the queue-specific keys into XenStore in the traditional
+ * way for a single queue, or in a queue subkeys for multiple
+ * queues.
+ */
+ struct xenbus_device *dev = queue->info->xbdev;
+ int err;
+ const char *message;
+ char *path;
+ size_t pathsize;
+
+ /* Choose the correct place to write the keys */
+ if (write_hierarchical) {
+ pathsize = strlen(dev->nodename) + 10;
+ path = kzalloc(pathsize, GFP_KERNEL);
+ if (!path) {
+ err = -ENOMEM;
+ message = "out of memory while writing ring references";
+ goto error;
+ }
+ snprintf(path, pathsize, "%s/queue-%u",
+ dev->nodename, queue->id);
+ } else {
+ path = (char *)dev->nodename;
+ }
+
+ /* Write ring references */
+ err = xenbus_printf(*xbt, path, "tx-ring-ref", "%u",
+ queue->tx_ring_ref);
+ if (err) {
+ message = "writing tx-ring-ref";
+ goto error;
+ }
+
+ err = xenbus_printf(*xbt, path, "rx-ring-ref", "%u",
+ queue->rx_ring_ref);
+ if (err) {
+ message = "writing rx-ring-ref";
+ goto error;
+ }
+
+ /* Write event channels; taking into account both shared
+ * and split event channel scenarios.
+ */
+ if (queue->tx_evtchn == queue->rx_evtchn) {
+ /* Shared event channel */
+ err = xenbus_printf(*xbt, path,
+ "event-channel", "%u", queue->tx_evtchn);
+ if (err) {
+ message = "writing event-channel";
+ goto error;
+ }
+ } else {
+ /* Split event channels */
+ err = xenbus_printf(*xbt, path,
+ "event-channel-tx", "%u", queue->tx_evtchn);
+ if (err) {
+ message = "writing event-channel-tx";
+ goto error;
+ }
+
+ err = xenbus_printf(*xbt, path,
+ "event-channel-rx", "%u", queue->rx_evtchn);
+ if (err) {
+ message = "writing event-channel-rx";
+ goto error;
+ }
+ }
+
+ if (write_hierarchical)
+ kfree(path);
+ return 0;
+
+error:
+ if (write_hierarchical)
+ kfree(path);
+ xenbus_dev_fatal(dev, err, "%s", message);
+ return err;
+}
+
/* Common code used when first setting up, and when resuming. */
static int talk_to_netback(struct xenbus_device *dev,
struct netfront_info *info)
@@ -1627,11 +1797,83 @@ static int talk_to_netback(struct xenbus_device *dev,
const char *message;
struct xenbus_transaction xbt;
int err;
+ unsigned int feature_split_evtchn;
+ unsigned int i = 0;
+ unsigned int max_queues = 0;
+ struct netfront_queue *queue = NULL;
+ unsigned int num_queues = 1;
- /* Create shared ring, alloc event channel. */
- err = setup_netfront(dev, info);
- if (err)
+ info->netdev->irq = 0;
+
+ /* Check if backend supports multiple queues */
+ err = xenbus_scanf(XBT_NIL, info->xbdev->otherend,
+ "multi-queue-max-queues", "%u", &max_queues);
+ if (err < 0)
+ max_queues = 1;
+ num_queues = min(max_queues, xennet_max_queues);
+
+ /* Check feature-split-event-channels */
+ err = xenbus_scanf(XBT_NIL, info->xbdev->otherend,
+ "feature-split-event-channels", "%u",
+ &feature_split_evtchn);
+ if (err < 0)
+ feature_split_evtchn = 0;
+
+ /* Read mac addr. */
+ err = xen_net_read_mac(dev, info->netdev->dev_addr);
+ if (err) {
+ xenbus_dev_fatal(dev, err, "parsing %s/mac", dev->nodename);
+ goto out;
+ }
+
+ /* Allocate array of queues */
+ info->queues = kcalloc(num_queues, sizeof(struct netfront_queue), GFP_KERNEL);
+ if (!info->queues) {
+ err = -ENOMEM;
goto out;
+ }
+ rtnl_lock();
+ netif_set_real_num_tx_queues(info->netdev, num_queues);
+ rtnl_unlock();
+
+ /* Create shared ring, alloc event channel -- for each queue */
+ for (i = 0; i < num_queues; ++i) {
+ queue = &info->queues[i];
+ queue->id = i;
+ queue->info = info;
+ err = xennet_init_queue(queue);
+ if (err) {
+ /* xennet_init_queue() cleans up after itself on failure,
+ * but we still have to clean up any previously initialised
+ * queues. If i > 0, set num_queues to i, then goto
+ * destroy_ring, which calls xennet_disconnect_backend()
+ * to tidy up.
+ */
+ if (i > 0) {
+ rtnl_lock();
+ netif_set_real_num_tx_queues(info->netdev, i);
+ rtnl_unlock();
+ goto destroy_ring;
+ } else {
+ goto out;
+ }
+ }
+ err = setup_netfront(dev, queue, feature_split_evtchn);
+ if (err) {
+ /* As for xennet_init_queue(), setup_netfront() will tidy
+ * up the current queue on error, but we need to clean up
+ * those already allocated.
+ */
+ if (i > 0) {
+ rtnl_lock();
+ netif_set_real_num_tx_queues(info->netdev, i);
+ rtnl_unlock();
+ goto destroy_ring;
+ } else {
+ goto out;
+ }
+ }
+ }
again:
err = xenbus_transaction_start(&xbt);
@@ -1640,41 +1882,29 @@ again:
goto destroy_ring;
}
- err = xenbus_printf(xbt, dev->nodename, "tx-ring-ref", "%u",
- info->tx_ring_ref);
- if (err) {
- message = "writing tx ring-ref";
- goto abort_transaction;
- }
- err = xenbus_printf(xbt, dev->nodename, "rx-ring-ref", "%u",
- info->rx_ring_ref);
- if (err) {
- message = "writing rx ring-ref";
- goto abort_transaction;
- }
-
- if (info->tx_evtchn == info->rx_evtchn) {
- err = xenbus_printf(xbt, dev->nodename,
- "event-channel", "%u", info->tx_evtchn);
- if (err) {
- message = "writing event-channel";
- goto abort_transaction;
- }
+ if (num_queues == 1) {
+ err = write_queue_xenstore_keys(&info->queues[0], &xbt, 0); /* flat */
+ if (err)
+ goto abort_transaction_no_dev_fatal;
} else {
- err = xenbus_printf(xbt, dev->nodename,
- "event-channel-tx", "%u", info->tx_evtchn);
+ /* Write the number of queues */
+ err = xenbus_printf(xbt, dev->nodename, "multi-queue-num-queues",
+ "%u", num_queues);
if (err) {
- message = "writing event-channel-tx";
- goto abort_transaction;
+ message = "writing multi-queue-num-queues";
+ goto abort_transaction_no_dev_fatal;
}
- err = xenbus_printf(xbt, dev->nodename,
- "event-channel-rx", "%u", info->rx_evtchn);
- if (err) {
- message = "writing event-channel-rx";
- goto abort_transaction;
+
+ /* Write the keys for each queue */
+ for (i = 0; i < num_queues; ++i) {
+ queue = &info->queues[i];
+ err = write_queue_xenstore_keys(queue, &xbt, 1); /* hierarchical */
+ if (err)
+ goto abort_transaction_no_dev_fatal;
}
}
+ /* The remaining keys are not queue-specific */
err = xenbus_printf(xbt, dev->nodename, "request-rx-copy", "%u",
1);
if (err) {
@@ -1724,10 +1954,16 @@ again:
return 0;
abort_transaction:
- xenbus_transaction_end(xbt, 1);
xenbus_dev_fatal(dev, err, "%s", message);
+abort_transaction_no_dev_fatal:
+ xenbus_transaction_end(xbt, 1);
destroy_ring:
xennet_disconnect_backend(info);
+ kfree(info->queues);
+ info->queues = NULL;
+ rtnl_lock();
+ netif_set_real_num_tx_queues(info->netdev, 0);
+ rtnl_lock();
out:
return err;
}
@@ -1735,11 +1971,14 @@ again:
static int xennet_connect(struct net_device *dev)
{
struct netfront_info *np = netdev_priv(dev);
+ unsigned int num_queues = 0;
int i, requeue_idx, err;
struct sk_buff *skb;
grant_ref_t ref;
struct xen_netif_rx_request *req;
unsigned int feature_rx_copy;
+ unsigned int j = 0;
+ struct netfront_queue *queue = NULL;
err = xenbus_scanf(XBT_NIL, np->xbdev->otherend,
"feature-rx-copy", "%u", &feature_rx_copy);
@@ -1756,40 +1995,47 @@ static int xennet_connect(struct net_device *dev)
if (err)
return err;
+ /* talk_to_netback() sets the correct number of queues */
+ num_queues = dev->real_num_tx_queues;
+
rtnl_lock();
netdev_update_features(dev);
rtnl_unlock();
- spin_lock_bh(&np->rx_lock);
- spin_lock_irq(&np->tx_lock);
+ /* By now, the queue structures have been set up */
+ for (j = 0; j < num_queues; ++j) {
+ queue = &np->queues[j];
+ spin_lock_bh(&queue->rx_lock);
+ spin_lock_irq(&queue->tx_lock);
- /* Step 1: Discard all pending TX packet fragments. */
- xennet_release_tx_bufs(np);
+ /* Step 1: Discard all pending TX packet fragments. */
+ xennet_release_tx_bufs(queue);
- /* Step 2: Rebuild the RX buffer freelist and the RX ring itself. */
- for (requeue_idx = 0, i = 0; i < NET_RX_RING_SIZE; i++) {
- skb_frag_t *frag;
- const struct page *page;
- if (!np->rx_skbs[i])
- continue;
+ /* Step 2: Rebuild the RX buffer freelist and the RX ring itself. */
+ for (requeue_idx = 0, i = 0; i < NET_RX_RING_SIZE; i++) {
+ skb_frag_t *frag;
+ const struct page *page;
+ if (!queue->rx_skbs[i])
+ continue;
- skb = np->rx_skbs[requeue_idx] = xennet_get_rx_skb(np, i);
- ref = np->grant_rx_ref[requeue_idx] = xennet_get_rx_ref(np, i);
- req = RING_GET_REQUEST(&np->rx, requeue_idx);
+ skb = queue->rx_skbs[requeue_idx] = xennet_get_rx_skb(queue, i);
+ ref = queue->grant_rx_ref[requeue_idx] = xennet_get_rx_ref(queue, i);
+ req = RING_GET_REQUEST(&queue->rx, requeue_idx);
- frag = &skb_shinfo(skb)->frags[0];
- page = skb_frag_page(frag);
- gnttab_grant_foreign_access_ref(
- ref, np->xbdev->otherend_id,
- pfn_to_mfn(page_to_pfn(page)),
- 0);
- req->gref = ref;
- req->id = requeue_idx;
+ frag = &skb_shinfo(skb)->frags[0];
+ page = skb_frag_page(frag);
+ gnttab_grant_foreign_access_ref(
+ ref, queue->info->xbdev->otherend_id,
+ pfn_to_mfn(page_to_pfn(page)),
+ 0);
+ req->gref = ref;
+ req->id = requeue_idx;
- requeue_idx++;
- }
+ requeue_idx++;
+ }
- np->rx.req_prod_pvt = requeue_idx;
+ queue->rx.req_prod_pvt = requeue_idx;
+ }
/*
* Step 3: All public and private state should now be sane. Get
@@ -1798,14 +2044,17 @@ static int xennet_connect(struct net_device *dev)
* packets.
*/
netif_carrier_on(np->netdev);
- notify_remote_via_irq(np->tx_irq);
- if (np->tx_irq != np->rx_irq)
- notify_remote_via_irq(np->rx_irq);
- xennet_tx_buf_gc(dev);
- xennet_alloc_rx_buffers(dev);
-
- spin_unlock_irq(&np->tx_lock);
- spin_unlock_bh(&np->rx_lock);
+ for (j = 0; j < num_queues; ++j) {
+ queue = &np->queues[j];
+ notify_remote_via_irq(queue->tx_irq);
+ if (queue->tx_irq != queue->rx_irq)
+ notify_remote_via_irq(queue->rx_irq);
+ xennet_tx_buf_gc(queue);
+ xennet_alloc_rx_buffers(queue);
+
+ spin_unlock_irq(&queue->tx_lock);
+ spin_unlock_bh(&queue->rx_lock);
+ }
return 0;
}
@@ -1878,7 +2127,7 @@ static void xennet_get_ethtool_stats(struct net_device *dev,
int i;
for (i = 0; i < ARRAY_SIZE(xennet_stats); i++)
- data[i] = *(unsigned long *)(np + xennet_stats[i].offset);
+ data[i] = atomic_read((atomic_t *)(np + xennet_stats[i].offset));
}
static void xennet_get_strings(struct net_device *dev, u32 stringset, u8 * data)
@@ -1909,8 +2158,12 @@ static ssize_t show_rxbuf_min(struct device *dev,
{
struct net_device *netdev = to_net_dev(dev);
struct netfront_info *info = netdev_priv(netdev);
+ unsigned int num_queues = netdev->real_num_tx_queues;
- return sprintf(buf, "%u\n", info->rx_min_target);
+ if (num_queues)
+ return sprintf(buf, "%u\n", info->queues[0].rx_min_target);
+ else
+ return sprintf(buf, "%u\n", RX_MIN_TARGET);
}
static ssize_t store_rxbuf_min(struct device *dev,
@@ -1919,8 +2172,11 @@ static ssize_t store_rxbuf_min(struct device *dev,
{
struct net_device *netdev = to_net_dev(dev);
struct netfront_info *np = netdev_priv(netdev);
+ unsigned int num_queues = netdev->real_num_tx_queues;
char *endp;
unsigned long target;
+ unsigned int i;
+ struct netfront_queue *queue;
if (!capable(CAP_NET_ADMIN))
return -EPERM;
@@ -1934,16 +2190,19 @@ static ssize_t store_rxbuf_min(struct device *dev,
if (target > RX_MAX_TARGET)
target = RX_MAX_TARGET;
- spin_lock_bh(&np->rx_lock);
- if (target > np->rx_max_target)
- np->rx_max_target = target;
- np->rx_min_target = target;
- if (target > np->rx_target)
- np->rx_target = target;
+ for (i = 0; i < num_queues; ++i) {
+ queue = &np->queues[i];
+ spin_lock_bh(&queue->rx_lock);
+ if (target > queue->rx_max_target)
+ queue->rx_max_target = target;
+ queue->rx_min_target = target;
+ if (target > queue->rx_target)
+ queue->rx_target = target;
- xennet_alloc_rx_buffers(netdev);
+ xennet_alloc_rx_buffers(queue);
- spin_unlock_bh(&np->rx_lock);
+ spin_unlock_bh(&queue->rx_lock);
+ }
return len;
}
@@ -1952,8 +2211,12 @@ static ssize_t show_rxbuf_max(struct device *dev,
{
struct net_device *netdev = to_net_dev(dev);
struct netfront_info *info = netdev_priv(netdev);
+ unsigned int num_queues = netdev->real_num_tx_queues;
- return sprintf(buf, "%u\n", info->rx_max_target);
+ if (num_queues)
+ return sprintf(buf, "%u\n", info->queues[0].rx_max_target);
+ else
+ return sprintf(buf, "%u\n", RX_MAX_TARGET);
}
static ssize_t store_rxbuf_max(struct device *dev,
@@ -1962,8 +2225,11 @@ static ssize_t store_rxbuf_max(struct device *dev,
{
struct net_device *netdev = to_net_dev(dev);
struct netfront_info *np = netdev_priv(netdev);
+ unsigned int num_queues = netdev->real_num_tx_queues;
char *endp;
unsigned long target;
+ unsigned int i = 0;
+ struct netfront_queue *queue = NULL;
if (!capable(CAP_NET_ADMIN))
return -EPERM;
@@ -1977,16 +2243,19 @@ static ssize_t store_rxbuf_max(struct device *dev,
if (target > RX_MAX_TARGET)
target = RX_MAX_TARGET;
- spin_lock_bh(&np->rx_lock);
- if (target < np->rx_min_target)
- np->rx_min_target = target;
- np->rx_max_target = target;
- if (target < np->rx_target)
- np->rx_target = target;
+ for (i = 0; i < num_queues; ++i) {
+ queue = &np->queues[i];
+ spin_lock_bh(&queue->rx_lock);
+ if (target < queue->rx_min_target)
+ queue->rx_min_target = target;
+ queue->rx_max_target = target;
+ if (target < queue->rx_target)
+ queue->rx_target = target;
- xennet_alloc_rx_buffers(netdev);
+ xennet_alloc_rx_buffers(queue);
- spin_unlock_bh(&np->rx_lock);
+ spin_unlock_bh(&queue->rx_lock);
+ }
return len;
}
@@ -1995,8 +2264,12 @@ static ssize_t show_rxbuf_cur(struct device *dev,
{
struct net_device *netdev = to_net_dev(dev);
struct netfront_info *info = netdev_priv(netdev);
+ unsigned int num_queues = netdev->real_num_tx_queues;
- return sprintf(buf, "%u\n", info->rx_target);
+ if (num_queues)
+ return sprintf(buf, "%u\n", info->queues[0].rx_target);
+ else
+ return sprintf(buf, "0\n");
}
static struct device_attribute xennet_attrs[] = {
@@ -2043,6 +2316,9 @@ static const struct xenbus_device_id netfront_ids[] = {
static int xennet_remove(struct xenbus_device *dev)
{
struct netfront_info *info = dev_get_drvdata(&dev->dev);
+ unsigned int num_queues = info->netdev->real_num_tx_queues;
+ struct netfront_queue *queue = NULL;
+ unsigned int i = 0;
dev_dbg(&dev->dev, "%s\n", dev->nodename);
@@ -2052,7 +2328,15 @@ static int xennet_remove(struct xenbus_device *dev)
unregister_netdev(info->netdev);
- del_timer_sync(&info->rx_refill_timer);
+ for (i = 0; i < num_queues; ++i) {
+ queue = &info->queues[i];
+ del_timer_sync(&queue->rx_refill_timer);
+ }
+
+ if (num_queues) {
+ kfree(info->queues);
+ info->queues = NULL;
+ }
free_percpu(info->stats);
@@ -2078,6 +2362,9 @@ static int __init netif_init(void)
pr_info("Initialising Xen virtual ethernet driver\n");
+ /* Allow as many queues as there are CPUs, by default */
+ xennet_max_queues = num_online_cpus();
+
return xenbus_register_frontend(&netfront_driver);
}
module_init(netif_init);