diff options
Diffstat (limited to 'drivers/net/ethernet')
54 files changed, 0 insertions, 259 deletions
diff --git a/drivers/net/ethernet/aeroflex/greth.c b/drivers/net/ethernet/aeroflex/greth.c index 47e5984f16fb..3155f7fa83eb 100644 --- a/drivers/net/ethernet/aeroflex/greth.c +++ b/drivers/net/ethernet/aeroflex/greth.c @@ -613,7 +613,6 @@ static irqreturn_t greth_interrupt(int irq, void *dev_id) napi_schedule(&greth->napi); } - mmiowb(); spin_unlock(&greth->devlock); return retval; diff --git a/drivers/net/ethernet/alacritech/slicoss.c b/drivers/net/ethernet/alacritech/slicoss.c index 16477aa6d61f..4f7e792e50e9 100644 --- a/drivers/net/ethernet/alacritech/slicoss.c +++ b/drivers/net/ethernet/alacritech/slicoss.c @@ -345,8 +345,6 @@ static void slic_set_rx_mode(struct net_device *dev) if (sdev->promisc != set_promisc) { sdev->promisc = set_promisc; slic_configure_rcv(sdev); - /* make sure writes to receiver cant leak out of the lock */ - mmiowb(); } spin_unlock_bh(&sdev->link_lock); } @@ -1461,8 +1459,6 @@ static netdev_tx_t slic_xmit(struct sk_buff *skb, struct net_device *dev) if (slic_get_free_tx_descs(txq) < SLIC_MAX_REQ_TX_DESCS) netif_stop_queue(dev); - /* make sure writes to io-memory cant leak out of tx queue lock */ - mmiowb(); return NETDEV_TX_OK; drop_skb: diff --git a/drivers/net/ethernet/amazon/ena/ena_com.c b/drivers/net/ethernet/amazon/ena/ena_com.c index b17d435de09f..05798aa5bb73 100644 --- a/drivers/net/ethernet/amazon/ena/ena_com.c +++ b/drivers/net/ethernet/amazon/ena/ena_com.c @@ -2016,7 +2016,6 @@ void ena_com_aenq_intr_handler(struct ena_com_dev *dev, void *data) mb(); writel_relaxed((u32)aenq->head, dev->reg_bar + ENA_REGS_AENQ_HEAD_DB_OFF); - mmiowb(); } int ena_com_dev_reset(struct ena_com_dev *ena_dev, diff --git a/drivers/net/ethernet/atheros/atlx/atl1.c b/drivers/net/ethernet/atheros/atlx/atl1.c index 156fbc5601ca..f35c9a75be50 100644 --- a/drivers/net/ethernet/atheros/atlx/atl1.c +++ b/drivers/net/ethernet/atheros/atlx/atl1.c @@ -2439,7 +2439,6 @@ static netdev_tx_t atl1_xmit_frame(struct sk_buff *skb, atl1_tx_map(adapter, skb, ptpd); atl1_tx_queue(adapter, count, ptpd); atl1_update_mailbox(adapter); - mmiowb(); return NETDEV_TX_OK; } diff --git a/drivers/net/ethernet/atheros/atlx/atl2.c b/drivers/net/ethernet/atheros/atlx/atl2.c index 98da0fa27192..dd81c5863111 100644 --- a/drivers/net/ethernet/atheros/atlx/atl2.c +++ b/drivers/net/ethernet/atheros/atlx/atl2.c @@ -908,7 +908,6 @@ static netdev_tx_t atl2_xmit_frame(struct sk_buff *skb, ATL2_WRITE_REGW(&adapter->hw, REG_MB_TXD_WR_IDX, (adapter->txd_write_ptr >> 2)); - mmiowb(); dev_consume_skb_any(skb); return NETDEV_TX_OK; } diff --git a/drivers/net/ethernet/broadcom/bnx2.c b/drivers/net/ethernet/broadcom/bnx2.c index d63371d70bce..dfdd14eadd57 100644 --- a/drivers/net/ethernet/broadcom/bnx2.c +++ b/drivers/net/ethernet/broadcom/bnx2.c @@ -3305,8 +3305,6 @@ next_rx: BNX2_WR(bp, rxr->rx_bseq_addr, rxr->rx_prod_bseq); - mmiowb(); - return rx_pkt; } @@ -6723,8 +6721,6 @@ bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev) BNX2_WR16(bp, txr->tx_bidx_addr, prod); BNX2_WR(bp, txr->tx_bseq_addr, txr->tx_prod_bseq); - mmiowb(); - txr->tx_prod = prod; if (unlikely(bnx2_tx_avail(bp, txr) <= MAX_SKB_FRAGS)) { diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c index ecb1bd7eb508..0c8f5b546c6f 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c @@ -4166,8 +4166,6 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev) DOORBELL_RELAXED(bp, txdata->cid, txdata->tx_db.raw); - mmiowb(); - txdata->tx_bd_prod += nbd; if (unlikely(bnx2x_tx_avail(bp, txdata) < MAX_DESC_PER_TX_PKT)) { diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h index 2462e7aa0c5d..2d57af9c061c 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h @@ -527,8 +527,6 @@ static inline void bnx2x_update_rx_prod(struct bnx2x *bp, REG_WR_RELAXED(bp, fp->ustorm_rx_prods_offset + i * 4, ((u32 *)&rx_prods)[i]); - mmiowb(); /* keep prod updates ordered */ - DP(NETIF_MSG_RX_STATUS, "queue[%d]: wrote bd_prod %u cqe_prod %u sge_prod %u\n", fp->index, bd_prod, rx_comp_prod, rx_sge_prod); @@ -653,7 +651,6 @@ static inline void bnx2x_igu_ack_sb_gen(struct bnx2x *bp, u8 igu_sb_id, REG_WR(bp, igu_addr, cmd_data.sb_id_and_flags); /* Make sure that ACK is written */ - mmiowb(); barrier(); } @@ -674,7 +671,6 @@ static inline void bnx2x_hc_ack_sb(struct bnx2x *bp, u8 sb_id, REG_WR(bp, hc_addr, (*(u32 *)&igu_ack)); /* Make sure that ACK is written */ - mmiowb(); barrier(); } diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c index 749d0ef44371..0745cccd416d 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c @@ -2623,7 +2623,6 @@ static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode) wmb(); DOORBELL_RELAXED(bp, txdata->cid, txdata->tx_db.raw); - mmiowb(); barrier(); num_pkts++; diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c index 626b491f7674..3716c828ff5d 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c @@ -869,9 +869,6 @@ static void bnx2x_hc_int_disable(struct bnx2x *bp) "write %x to HC %d (addr 0x%x)\n", val, port, addr); - /* flush all outstanding writes */ - mmiowb(); - REG_WR(bp, addr, val); if (REG_RD(bp, addr) != val) BNX2X_ERR("BUG! Proper val not read from IGU!\n"); @@ -887,9 +884,6 @@ static void bnx2x_igu_int_disable(struct bnx2x *bp) DP(NETIF_MSG_IFDOWN, "write %x to IGU\n", val); - /* flush all outstanding writes */ - mmiowb(); - REG_WR(bp, IGU_REG_PF_CONFIGURATION, val); if (REG_RD(bp, IGU_REG_PF_CONFIGURATION) != val) BNX2X_ERR("BUG! Proper val not read from IGU!\n"); @@ -1595,7 +1589,6 @@ static void bnx2x_hc_int_enable(struct bnx2x *bp) /* * Ensure that HC_CONFIG is written before leading/trailing edge config */ - mmiowb(); barrier(); if (!CHIP_IS_E1(bp)) { @@ -1611,9 +1604,6 @@ static void bnx2x_hc_int_enable(struct bnx2x *bp) REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val); REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val); } - - /* Make sure that interrupts are indeed enabled from here on */ - mmiowb(); } static void bnx2x_igu_int_enable(struct bnx2x *bp) @@ -1674,9 +1664,6 @@ static void bnx2x_igu_int_enable(struct bnx2x *bp) REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, val); REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, val); - - /* Make sure that interrupts are indeed enabled from here on */ - mmiowb(); } void bnx2x_int_enable(struct bnx2x *bp) @@ -3833,7 +3820,6 @@ static void bnx2x_sp_prod_update(struct bnx2x *bp) REG_WR16_RELAXED(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func), bp->spq_prod_idx); - mmiowb(); } /** @@ -5244,7 +5230,6 @@ static void bnx2x_update_eq_prod(struct bnx2x *bp, u16 prod) { /* No memory barriers */ storm_memset_eq_prod(bp, prod, BP_FUNC(bp)); - mmiowb(); /* keep prod updates ordered */ } static int bnx2x_cnic_handle_cfc_del(struct bnx2x *bp, u32 cid, @@ -6513,7 +6498,6 @@ void bnx2x_nic_init_cnic(struct bnx2x *bp) /* flush all */ mb(); - mmiowb(); } void bnx2x_pre_irq_nic_init(struct bnx2x *bp) @@ -6553,7 +6537,6 @@ void bnx2x_post_irq_nic_init(struct bnx2x *bp, u32 load_code) /* flush all before enabling interrupts */ mb(); - mmiowb(); bnx2x_int_enable(bp); @@ -7775,12 +7758,10 @@ void bnx2x_igu_clear_sb_gen(struct bnx2x *bp, u8 func, u8 idu_sb_id, bool is_pf) DP(NETIF_MSG_HW, "write 0x%08x to IGU(via GRC) addr 0x%x\n", data, igu_addr_data); REG_WR(bp, igu_addr_data, data); - mmiowb(); barrier(); DP(NETIF_MSG_HW, "write 0x%08x to IGU(via GRC) addr 0x%x\n", ctl, igu_addr_ctl); REG_WR(bp, igu_addr_ctl, ctl); - mmiowb(); barrier(); /* wait for clean up to finish */ @@ -9550,7 +9531,6 @@ static void bnx2x_set_234_gates(struct bnx2x *bp, bool close) DP(NETIF_MSG_HW | NETIF_MSG_IFUP, "%s gates #2, #3 and #4\n", close ? "closing" : "opening"); - mmiowb(); } #define SHARED_MF_CLP_MAGIC 0x80000000 /* `magic' bit */ @@ -9674,7 +9654,6 @@ static void bnx2x_pxp_prep(struct bnx2x *bp) if (!CHIP_IS_E1(bp)) { REG_WR(bp, PXP2_REG_RD_START_INIT, 0); REG_WR(bp, PXP2_REG_RQ_RBC_DONE, 0); - mmiowb(); } } @@ -9774,16 +9753,13 @@ static void bnx2x_process_kill_chip_reset(struct bnx2x *bp, bool global) reset_mask1 & (~not_reset_mask1)); barrier(); - mmiowb(); REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, reset_mask2 & (~stay_reset2)); barrier(); - mmiowb(); REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, reset_mask1); - mmiowb(); } /** @@ -9867,9 +9843,6 @@ static int bnx2x_process_kill(struct bnx2x *bp, bool global) REG_WR(bp, MISC_REG_UNPREPARED, 0); barrier(); - /* Make sure all is written to the chip before the reset */ - mmiowb(); - /* Wait for 1ms to empty GLUE and PCI-E core queues, * PSWHST, GRC and PSWRD Tetris buffer. */ @@ -14828,7 +14801,6 @@ static int bnx2x_drv_ctl(struct net_device *dev, struct drv_ctl_info *ctl) if (rc) break; - mmiowb(); barrier(); /* Start accepting on iSCSI L2 ring */ @@ -14863,7 +14835,6 @@ static int bnx2x_drv_ctl(struct net_device *dev, struct drv_ctl_info *ctl) if (!bnx2x_wait_sp_comp(bp, sp_bits)) BNX2X_ERR("rx_mode completion timed out!\n"); - mmiowb(); barrier(); /* Unset iSCSI L2 MAC */ diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c index 7b22a6d8514c..80d250a6d048 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c @@ -5039,7 +5039,6 @@ static inline int bnx2x_q_init(struct bnx2x *bp, /* As no ramrod is sent, complete the command immediately */ o->complete_cmd(bp, o, BNX2X_Q_CMD_INIT); - mmiowb(); smp_mb(); return 0; diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c index c97b642e6537..0edbb0a76847 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c @@ -100,13 +100,11 @@ static void bnx2x_vf_igu_ack_sb(struct bnx2x *bp, struct bnx2x_virtf *vf, DP(NETIF_MSG_HW, "write 0x%08x to IGU(via GRC) addr 0x%x\n", cmd_data.sb_id_and_flags, igu_addr_data); REG_WR(bp, igu_addr_data, cmd_data.sb_id_and_flags); - mmiowb(); barrier(); DP(NETIF_MSG_HW, "write 0x%08x to IGU(via GRC) addr 0x%x\n", ctl, igu_addr_ctl); REG_WR(bp, igu_addr_ctl, ctl); - mmiowb(); barrier(); } diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c index 10ff37d6dc78..0752b7fa4d9c 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c @@ -172,8 +172,6 @@ static int bnx2x_send_msg2pf(struct bnx2x *bp, u8 *done, dma_addr_t msg_mapping) /* Trigger the PF FW */ writeb_relaxed(1, &zone_data->trigger.vf_pf_channel.addr_valid); - mmiowb(); - /* Wait for PF to complete */ while ((tout >= 0) && (!*done)) { msleep(interval); @@ -1179,7 +1177,6 @@ static void bnx2x_vf_mbx_resp_send_msg(struct bnx2x *bp, /* ack the FW */ storm_memset_vf_mbx_ack(bp, vf->abs_vfid); - mmiowb(); /* copy the response header including status-done field, * must be last dmae, must be after FW is acked @@ -2174,7 +2171,6 @@ static void bnx2x_vf_mbx_request(struct bnx2x *bp, struct bnx2x_virtf *vf, */ storm_memset_vf_mbx_ack(bp, vf->abs_vfid); /* Firmware ack should be written before unlocking channel */ - mmiowb(); bnx2x_unlock_vf_pf_channel(bp, vf, mbx->first_tlv.tl.type); } } diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index 52ade133b57c..2a4341708c0f 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -556,8 +556,6 @@ normal_tx: tx_done: - mmiowb(); - if (unlikely(bnxt_tx_avail(bp, txr) <= MAX_SKB_FRAGS + 1)) { if (skb->xmit_more && !tx_buf->is_push) bnxt_db_write(bp, &txr->tx_db, prod); @@ -2134,7 +2132,6 @@ static int bnxt_poll(struct napi_struct *napi, int budget) &dim_sample); net_dim(&cpr->dim, dim_sample); } - mmiowb(); return work_done; } diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c index 060a6f386104..2aebd4bbb67d 100644 --- a/drivers/net/ethernet/broadcom/tg3.c +++ b/drivers/net/ethernet/broadcom/tg3.c @@ -1073,7 +1073,6 @@ static void tg3_int_reenable(struct tg3_napi *tnapi) struct tg3 *tp = tnapi->tp; tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24); - mmiowb(); /* When doing tagged status, this work check is unnecessary. * The last_tag we write above tells the chip which piece of @@ -6999,7 +6998,6 @@ next_pkt_nopost: tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, tpr->rx_jmb_prod_idx); } - mmiowb(); } else if (work_mask) { /* rx_std_buffers[] and rx_jmb_buffers[] entries must be * updated before the producer indices can be updated. @@ -7210,8 +7208,6 @@ static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget) tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, dpr->rx_jmb_prod_idx); - mmiowb(); - if (err) tw32_f(HOSTCC_MODE, tp->coal_now); } @@ -7278,7 +7274,6 @@ static int tg3_poll_msix(struct napi_struct *napi, int budget) HOSTCC_MODE_ENABLE | tnapi->coal_now); } - mmiowb(); break; } } @@ -8159,7 +8154,6 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev) if (!skb->xmit_more || netif_xmit_stopped(txq)) { /* Packets are ready, update Tx producer idx on card. */ tw32_tx_mbox(tnapi->prodmbox, entry); - mmiowb(); } return NETDEV_TX_OK; diff --git a/drivers/net/ethernet/cavium/liquidio/cn66xx_device.c b/drivers/net/ethernet/cavium/liquidio/cn66xx_device.c index 2df7440f58df..39643be8c30a 100644 --- a/drivers/net/ethernet/cavium/liquidio/cn66xx_device.c +++ b/drivers/net/ethernet/cavium/liquidio/cn66xx_device.c @@ -38,9 +38,6 @@ int lio_cn6xxx_soft_reset(struct octeon_device *oct) lio_pci_readq(oct, CN6XXX_CIU_SOFT_RST); lio_pci_writeq(oct, 1, CN6XXX_CIU_SOFT_RST); - /* make sure that the reset is written before starting timer */ - mmiowb(); - /* Wait for 10ms as Octeon resets. */ mdelay(100); @@ -487,9 +484,6 @@ void lio_cn6xxx_disable_interrupt(struct octeon_device *oct, /* Disable Interrupts */ writeq(0, cn6xxx->intr_enb_reg64); - - /* make sure interrupts are really disabled */ - mmiowb(); } static void lio_cn6xxx_get_pcie_qlmport(struct octeon_device *oct) @@ -555,10 +549,6 @@ static int lio_cn6xxx_process_droq_intr_regs(struct octeon_device *oct) value &= ~(1 << oq_no); octeon_write_csr(oct, reg, value); - /* Ensure that the enable register is written. - */ - mmiowb(); - spin_unlock(&cn6xxx->lock_for_droq_int_enb_reg); } } diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_device.c b/drivers/net/ethernet/cavium/liquidio/octeon_device.c index ce8c3f818666..934115d18488 100644 --- a/drivers/net/ethernet/cavium/liquidio/octeon_device.c +++ b/drivers/net/ethernet/cavium/liquidio/octeon_device.c @@ -1449,7 +1449,6 @@ void lio_enable_irq(struct octeon_droq *droq, struct octeon_instr_queue *iq) iq->pkt_in_done -= iq->pkts_processed; iq->pkts_processed = 0; /* this write needs to be flushed before we release the lock */ - mmiowb(); spin_unlock_bh(&iq->lock); oct = iq->oct_dev; } diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_droq.c b/drivers/net/ethernet/cavium/liquidio/octeon_droq.c index a0c099f71524..017169023cca 100644 --- a/drivers/net/ethernet/cavium/liquidio/octeon_droq.c +++ b/drivers/net/ethernet/cavium/liquidio/octeon_droq.c @@ -513,8 +513,6 @@ int octeon_retry_droq_refill(struct octeon_droq *droq) */ wmb(); writel(desc_refilled, droq->pkts_credit_reg); - /* make sure mmio write completes */ - mmiowb(); if (pkts_credit + desc_refilled >= CN23XX_SLI_DEF_BP) reschedule = 0; @@ -712,8 +710,6 @@ octeon_droq_fast_process_packets(struct octeon_device *oct, */ wmb(); writel(desc_refilled, droq->pkts_credit_reg); - /* make sure mmio write completes */ - mmiowb(); } } } /* for (each packet)... */ diff --git a/drivers/net/ethernet/cavium/liquidio/request_manager.c b/drivers/net/ethernet/cavium/liquidio/request_manager.c index c6f4cbda040f..fcf20a8f92d9 100644 --- a/drivers/net/ethernet/cavium/liquidio/request_manager.c +++ b/drivers/net/ethernet/cavium/liquidio/request_manager.c @@ -278,7 +278,6 @@ ring_doorbell(struct octeon_device *oct, struct octeon_instr_queue *iq) if (atomic_read(&oct->status) == OCT_DEV_RUNNING) { writel(iq->fill_cnt, iq->doorbell_reg); /* make sure doorbell write goes through */ - mmiowb(); iq->fill_cnt = 0; iq->last_db_time = jiffies; return; diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c index 8fe9af0e2ab7..466bf1ea186d 100644 --- a/drivers/net/ethernet/intel/e1000/e1000_main.c +++ b/drivers/net/ethernet/intel/e1000/e1000_main.c @@ -3270,11 +3270,6 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb, if (!skb->xmit_more || netif_xmit_stopped(netdev_get_tx_queue(netdev, 0))) { writel(tx_ring->next_to_use, hw->hw_addr + tx_ring->tdt); - /* we need this if more than one processor can write to - * our tail at a time, it synchronizes IO on IA64/Altix - * systems - */ - mmiowb(); } } else { dev_kfree_skb_any(skb); diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c index 7acc61e4f645..022c3ac0e40f 100644 --- a/drivers/net/ethernet/intel/e1000e/netdev.c +++ b/drivers/net/ethernet/intel/e1000e/netdev.c @@ -3816,7 +3816,6 @@ static void e1000_flush_tx_ring(struct e1000_adapter *adapter) if (tx_ring->next_to_use == tx_ring->count) tx_ring->next_to_use = 0; ew32(TDT(0), tx_ring->next_to_use); - mmiowb(); usleep_range(200, 250); } @@ -5904,12 +5903,6 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb, tx_ring->next_to_use); else writel(tx_ring->next_to_use, tx_ring->tail); - - /* we need this if more than one processor can write - * to our tail at a time, it synchronizes IO on - *IA64/Altix systems - */ - mmiowb(); } } else { dev_kfree_skb_any(skb); diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_iov.c b/drivers/net/ethernet/intel/fm10k/fm10k_iov.c index 5d4f1761dc0c..8de77155f2e7 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_iov.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_iov.c @@ -321,8 +321,6 @@ static void fm10k_mask_aer_comp_abort(struct pci_dev *pdev) pci_read_config_dword(pdev, pos + PCI_ERR_UNCOR_MASK, &err_mask); err_mask |= PCI_ERR_UNC_COMP_ABORT; pci_write_config_dword(pdev, pos + PCI_ERR_UNCOR_MASK, err_mask); - - mmiowb(); } int fm10k_iov_resume(struct pci_dev *pdev) diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_main.c b/drivers/net/ethernet/intel/fm10k/fm10k_main.c index ecef949f3baa..cbf76a96e94e 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_main.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_main.c @@ -1039,11 +1039,6 @@ static void fm10k_tx_map(struct fm10k_ring *tx_ring, /* notify HW of packet */ if (netif_xmit_stopped(txring_txq(tx_ring)) || !skb->xmit_more) { writel(i, tx_ring->tail); - - /* we need this if more than one processor can write to our tail - * at a time, it synchronizes IO on IA64/Altix systems - */ - mmiowb(); } return; diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c index 6c97667d20ef..ffb611bbedfa 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c @@ -3471,11 +3471,6 @@ static inline int i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb, /* notify HW of packet */ if (netif_xmit_stopped(txring_txq(tx_ring)) || !skb->xmit_more) { writel(i, tx_ring->tail); - - /* we need this if more than one processor can write to our tail - * at a time, it synchronizes IO on IA64/Altix systems - */ - mmiowb(); } return 0; diff --git a/drivers/net/ethernet/intel/iavf/iavf_txrx.c b/drivers/net/ethernet/intel/iavf/iavf_txrx.c index 9b4d7cec2e18..6bfef82e7607 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_txrx.c +++ b/drivers/net/ethernet/intel/iavf/iavf_txrx.c @@ -2360,11 +2360,6 @@ static inline void iavf_tx_map(struct iavf_ring *tx_ring, struct sk_buff *skb, /* notify HW of packet */ if (netif_xmit_stopped(txring_txq(tx_ring)) || !skb->xmit_more) { writel(i, tx_ring->tail); - - /* we need this if more than one processor can write to our tail - * at a time, it synchronizes IO on IA64/Altix systems - */ - mmiowb(); } return; diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.c b/drivers/net/ethernet/intel/ice/ice_txrx.c index c289d97f477d..1af21bbe180e 100644 --- a/drivers/net/ethernet/intel/ice/ice_txrx.c +++ b/drivers/net/ethernet/intel/ice/ice_txrx.c @@ -1356,11 +1356,6 @@ ice_tx_map(struct ice_ring *tx_ring, struct ice_tx_buf *first, /* notify HW of packet */ if (netif_xmit_stopped(txring_txq(tx_ring)) || !skb->xmit_more) { writel(i, tx_ring->tail); - - /* we need this if more than one processor can write to our tail - * at a time, it synchronizes IO on IA64/Altix systems - */ - mmiowb(); } return; diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c index 3269d8e94744..1d71ec360b1c 100644 --- a/drivers/net/ethernet/intel/igb/igb_main.c +++ b/drivers/net/ethernet/intel/igb/igb_main.c @@ -6028,11 +6028,6 @@ static int igb_tx_map(struct igb_ring *tx_ring, if (netif_xmit_stopped(txring_txq(tx_ring)) || !skb->xmit_more) { writel(i, tx_ring->tail); - - /* we need this if more than one processor can write to our tail - * at a time, it synchronizes IO on IA64/Altix systems - */ - mmiowb(); } return 0; diff --git a/drivers/net/ethernet/intel/igbvf/netdev.c b/drivers/net/ethernet/intel/igbvf/netdev.c index 4eab83faec62..34cd30d7162f 100644 --- a/drivers/net/ethernet/intel/igbvf/netdev.c +++ b/drivers/net/ethernet/intel/igbvf/netdev.c @@ -2279,10 +2279,6 @@ static inline void igbvf_tx_queue_adv(struct igbvf_adapter *adapter, tx_ring->buffer_info[first].next_to_watch = tx_desc; tx_ring->next_to_use = i; writel(i, adapter->hw.hw_addr + tx_ring->tail); - /* we need this if more than one processor can write to our tail - * at a time, it synchronizes IO on IA64/Altix systems - */ - mmiowb(); } static netdev_tx_t igbvf_xmit_frame_ring_adv(struct sk_buff *skb, diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c index 87a11879bf2d..f8d692f6aa4f 100644 --- a/drivers/net/ethernet/intel/igc/igc_main.c +++ b/drivers/net/ethernet/intel/igc/igc_main.c @@ -892,11 +892,6 @@ static int igc_tx_map(struct igc_ring *tx_ring, if (netif_xmit_stopped(txring_txq(tx_ring)) || !skb->xmit_more) { writel(i, tx_ring->tail); - - /* we need this if more than one processor can write to our tail - * at a time, it synchronizes IO on IA64/Altix systems - */ - mmiowb(); } return 0; diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index e100054a3765..99e23cf6a73a 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -8299,11 +8299,6 @@ static int ixgbe_tx_map(struct ixgbe_ring *tx_ring, if (netif_xmit_stopped(txring_txq(tx_ring)) || !skb->xmit_more) { writel(i, tx_ring->tail); - - /* we need this if more than one processor can write to our tail - * at a time, it synchronizes IO on IA64/Altix systems - */ - mmiowb(); } return 0; diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c index 8b3495ee2b6e..49486c10ef81 100644 --- a/drivers/net/ethernet/marvell/sky2.c +++ b/drivers/net/ethernet/marvell/sky2.c @@ -1139,9 +1139,6 @@ static inline void sky2_put_idx(struct sky2_hw *hw, unsigned q, u16 idx) /* Make sure write' to descriptors are complete before we tell hardware */ wmb(); sky2_write16(hw, Y2_QADDR(q, PREF_UNIT_PUT_IDX), idx); - - /* Synchronize I/O on since next processor may write to tail */ - mmiowb(); } @@ -1354,7 +1351,6 @@ stopped: /* reset the Rx prefetch unit */ sky2_write32(hw, Y2_QADDR(rxq, PREF_UNIT_CTRL), PREF_UNIT_RST_SET); - mmiowb(); } /* Clean out receive buffer area, assumes receiver hardware stopped */ diff --git a/drivers/net/ethernet/mellanox/mlx4/catas.c b/drivers/net/ethernet/mellanox/mlx4/catas.c index c81d15bf259c..87e90b5d4d7d 100644 --- a/drivers/net/ethernet/mellanox/mlx4/catas.c +++ b/drivers/net/ethernet/mellanox/mlx4/catas.c @@ -129,10 +129,6 @@ static int mlx4_reset_slave(struct mlx4_dev *dev) comm_flags = rst_req << COM_CHAN_RST_REQ_OFFSET; __raw_writel((__force u32)cpu_to_be32(comm_flags), (__iomem char *)priv->mfunc.comm + MLX4_COMM_CHAN_FLAGS); - /* Make sure that our comm channel write doesn't - * get mixed in with writes from another CPU. - */ - mmiowb(); end = msecs_to_jiffies(MLX4_COMM_TIME) + jiffies; while (time_before(jiffies, end)) { diff --git a/drivers/net/ethernet/mellanox/mlx4/cmd.c b/drivers/net/ethernet/mellanox/mlx4/cmd.c index a5d5d6fc1da0..c678344d22a2 100644 --- a/drivers/net/ethernet/mellanox/mlx4/cmd.c +++ b/drivers/net/ethernet/mellanox/mlx4/cmd.c @@ -281,7 +281,6 @@ static int mlx4_comm_cmd_post(struct mlx4_dev *dev, u8 cmd, u16 param) val = param | (cmd << 16) | (priv->cmd.comm_toggle << 31); __raw_writel((__force u32) cpu_to_be32(val), &priv->mfunc.comm->slave_write); - mmiowb(); mutex_unlock(&dev->persist->device_state_mutex); return 0; } @@ -496,12 +495,6 @@ static int mlx4_cmd_post(struct mlx4_dev *dev, u64 in_param, u64 out_param, (op_modifier << HCR_OPMOD_SHIFT) | op), hcr + 6); - /* - * Make sure that our HCR writes don't get mixed in with - * writes from another CPU starting a FW command. - */ - mmiowb(); - cmd->toggle = cmd->toggle ^ 1; ret = 0; @@ -2206,7 +2199,6 @@ static void mlx4_master_do_cmd(struct mlx4_dev *dev, int slave, u8 cmd, } __raw_writel((__force u32) cpu_to_be32(reply), &priv->mfunc.comm[slave].slave_read); - mmiowb(); return; @@ -2410,7 +2402,6 @@ int mlx4_multi_func_init(struct mlx4_dev *dev) &priv->mfunc.comm[i].slave_write); __raw_writel((__force u32) 0, &priv->mfunc.comm[i].slave_read); - mmiowb(); for (port = 1; port <= MLX4_MAX_PORTS; port++) { struct mlx4_vport_state *admin_vport; struct mlx4_vport_state *oper_vport; @@ -2576,10 +2567,6 @@ void mlx4_report_internal_err_comm_event(struct mlx4_dev *dev) slave_read |= (u32)COMM_CHAN_EVENT_INTERNAL_ERR; __raw_writel((__force u32)cpu_to_be32(slave_read), &priv->mfunc.comm[slave].slave_read); - /* Make sure that our comm channel write doesn't - * get mixed in with writes from another CPU. - */ - mmiowb(); } } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c index be48c6440251..c087d1014b09 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c @@ -917,7 +917,6 @@ static void cmd_work_handler(struct work_struct *work) mlx5_core_dbg(dev, "writing 0x%x to command doorbell\n", 1 << ent->idx); wmb(); iowrite32be(1 << ent->idx, &dev->iseg->cmd_dbell); - mmiowb(); /* if not in polling don't use ent after this point */ if (cmd_mode == CMD_MODE_POLLING || poll_cmd) { poll_timeout(ent); diff --git a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c index e0340f778d8f..d8b7fba96d58 100644 --- a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c +++ b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c @@ -1439,7 +1439,6 @@ myri10ge_tx_done(struct myri10ge_slice_state *ss, int mcp_index) tx->queue_active = 0; put_be32(htonl(1), tx->send_stop); mb(); - mmiowb(); } __netif_tx_unlock(dev_queue); } @@ -2861,7 +2860,6 @@ again: tx->queue_active = 1; put_be32(htonl(1), tx->send_go); mb(); - mmiowb(); } tx->pkt_start++; if ((avail - count) < MXGEFW_MAX_SEND_DESC) { diff --git a/drivers/net/ethernet/neterion/s2io.c b/drivers/net/ethernet/neterion/s2io.c index feda9644289d..3b2ae1a21678 100644 --- a/drivers/net/ethernet/neterion/s2io.c +++ b/drivers/net/ethernet/neterion/s2io.c @@ -4153,8 +4153,6 @@ static netdev_tx_t s2io_xmit(struct sk_buff *skb, struct net_device *dev) writeq(val64, &tx_fifo->List_Control); - mmiowb(); - put_off++; if (put_off == fifo->tx_curr_put_info.fifo_len + 1) put_off = 0; diff --git a/drivers/net/ethernet/neterion/vxge/vxge-main.c b/drivers/net/ethernet/neterion/vxge/vxge-main.c index b877acec5cde..1d334f2e0a56 100644 --- a/drivers/net/ethernet/neterion/vxge/vxge-main.c +++ b/drivers/net/ethernet/neterion/vxge/vxge-main.c @@ -1826,7 +1826,6 @@ static int vxge_poll_msix(struct napi_struct *napi, int budget) vxge_hw_channel_msix_unmask( (struct __vxge_hw_channel *)ring->handle, ring->rx_vector_no); - mmiowb(); } /* We are copying and returning the local variable, in case if after @@ -2234,8 +2233,6 @@ static irqreturn_t vxge_tx_msix_handle(int irq, void *dev_id) vxge_hw_channel_msix_unmask((struct __vxge_hw_channel *)fifo->handle, fifo->tx_vector_no); - mmiowb(); - return IRQ_HANDLED; } @@ -2272,14 +2269,12 @@ vxge_alarm_msix_handle(int irq, void *dev_id) */ vxge_hw_vpath_msix_mask(vdev->vpaths[i].handle, msix_id); vxge_hw_vpath_msix_clear(vdev->vpaths[i].handle, msix_id); - mmiowb(); status = vxge_hw_vpath_alarm_process(vdev->vpaths[i].handle, vdev->exec_mode); if (status == VXGE_HW_OK) { vxge_hw_vpath_msix_unmask(vdev->vpaths[i].handle, msix_id); - mmiowb(); continue; } vxge_debug_intr(VXGE_ERR, diff --git a/drivers/net/ethernet/neterion/vxge/vxge-traffic.c b/drivers/net/ethernet/neterion/vxge/vxge-traffic.c index 59e77e3086bb..709d20d9938f 100644 --- a/drivers/net/ethernet/neterion/vxge/vxge-traffic.c +++ b/drivers/net/ethernet/neterion/vxge/vxge-traffic.c @@ -1399,11 +1399,7 @@ static void __vxge_hw_non_offload_db_post(struct __vxge_hw_fifo *fifo, VXGE_HW_NODBW_GET_NO_SNOOP(no_snoop), &fifo->nofl_db->control_0); - mmiowb(); - writeq(txdl_ptr, &fifo->nofl_db->txdl_ptr); - - mmiowb(); } /** diff --git a/drivers/net/ethernet/qlogic/qed/qed_int.c b/drivers/net/ethernet/qlogic/qed/qed_int.c index 8848d5bed6e5..fdfedbc8e431 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_int.c +++ b/drivers/net/ethernet/qlogic/qed/qed_int.c @@ -814,18 +814,12 @@ static inline u16 qed_attn_update_idx(struct qed_hwfn *p_hwfn, { u16 rc = 0, index; - /* Make certain HW write took affect */ - mmiowb(); - index = le16_to_cpu(p_sb_desc->sb_attn->sb_index); if (p_sb_desc->index != index) { p_sb_desc->index = index; rc = QED_SB_ATT_IDX; } - /* Make certain we got a consistent view with HW */ - mmiowb(); - return rc; } @@ -1213,7 +1207,6 @@ static void qed_sb_ack_attn(struct qed_hwfn *p_hwfn, /* Both segments (interrupts & acks) are written to same place address; * Need to guarantee all commands will be received (in-order) by HW. */ - mmiowb(); barrier(); } @@ -1848,9 +1841,6 @@ static void qed_int_igu_enable_attn(struct qed_hwfn *p_hwfn, qed_wr(p_hwfn, p_ptt, IGU_REG_TRAILING_EDGE_LATCH, 0xfff); qed_wr(p_hwfn, p_ptt, IGU_REG_ATTENTION_ENABLE, 0xfff); - /* Flush the writes to IGU */ - mmiowb(); - /* Unmask AEU signals toward IGU */ qed_wr(p_hwfn, p_ptt, MISC_REG_AEU_MASK_ATTN_IGU, 0xff); } @@ -1914,9 +1904,6 @@ static void qed_int_igu_cleanup_sb(struct qed_hwfn *p_hwfn, qed_wr(p_hwfn, p_ptt, IGU_REG_COMMAND_REG_CTRL, cmd_ctrl); - /* Flush the write to IGU */ - mmiowb(); - /* calculate where to read the status bit from */ sb_bit = 1 << (igu_sb_id % 32); sb_bit_addr = igu_sb_id / 32 * sizeof(u32); diff --git a/drivers/net/ethernet/qlogic/qed/qed_spq.c b/drivers/net/ethernet/qlogic/qed/qed_spq.c index 79b311b86f66..f5f3c03b9dd2 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_spq.c +++ b/drivers/net/ethernet/qlogic/qed/qed_spq.c @@ -341,9 +341,6 @@ void qed_eq_prod_update(struct qed_hwfn *p_hwfn, u16 prod) USTORM_EQE_CONS_OFFSET(p_hwfn->rel_pf_id); REG_WR16(p_hwfn, addr, prod); - - /* keep prod updates ordered */ - mmiowb(); } int qed_eq_completion(struct qed_hwfn *p_hwfn, void *cookie) diff --git a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c index b4c8949933f1..4555c0b161ef 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c +++ b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c @@ -1526,14 +1526,6 @@ static int qede_selftest_transmit_traffic(struct qede_dev *edev, barrier(); writel(txq->tx_db.raw, txq->doorbell_addr); - /* mmiowb is needed to synchronize doorbell writes from more than one - * processor. It guarantees that the write arrives to the device before - * the queue lock is released and another start_xmit is called (possibly - * on another CPU). Without this barrier, the next doorbell can bypass - * this doorbell. This is applicable to IA64/Altix systems. - */ - mmiowb(); - for (i = 0; i < QEDE_SELFTEST_POLL_COUNT; i++) { if (qede_txq_has_work(txq)) break; diff --git a/drivers/net/ethernet/qlogic/qede/qede_fp.c b/drivers/net/ethernet/qlogic/qede/qede_fp.c index 31b046e24565..6f7e3622c6b4 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_fp.c +++ b/drivers/net/ethernet/qlogic/qede/qede_fp.c @@ -580,14 +580,6 @@ void qede_update_rx_prod(struct qede_dev *edev, struct qede_rx_queue *rxq) internal_ram_wr(rxq->hw_rxq_prod_addr, sizeof(rx_prods), (u32 *)&rx_prods); - - /* mmiowb is needed to synchronize doorbell writes from more than one - * processor. It guarantees that the write arrives to the device before - * the napi lock is released and another qede_poll is called (possibly - * on another CPU). Without this barrier, the next doorbell can bypass - * this doorbell. This is applicable to IA64/Altix systems. - */ - mmiowb(); } static void qede_get_rxhash(struct sk_buff *skb, u8 bitfields, __le32 rss_hash) diff --git a/drivers/net/ethernet/qlogic/qla3xxx.c b/drivers/net/ethernet/qlogic/qla3xxx.c index b61b88cbc0c7..457444894d80 100644 --- a/drivers/net/ethernet/qlogic/qla3xxx.c +++ b/drivers/net/ethernet/qlogic/qla3xxx.c @@ -1858,7 +1858,6 @@ static void ql_update_small_bufq_prod_index(struct ql3_adapter *qdev) wmb(); writel_relaxed(qdev->small_buf_q_producer_index, &port_regs->CommonRegs.rxSmallQProducerIndex); - mmiowb(); } } diff --git a/drivers/net/ethernet/qlogic/qlge/qlge.h b/drivers/net/ethernet/qlogic/qlge/qlge.h index 3e71b65a9546..ad7c5eb8a3b6 100644 --- a/drivers/net/ethernet/qlogic/qlge/qlge.h +++ b/drivers/net/ethernet/qlogic/qlge/qlge.h @@ -2181,7 +2181,6 @@ static inline void ql_write32(const struct ql_adapter *qdev, int reg, u32 val) static inline void ql_write_db_reg(u32 val, void __iomem *addr) { writel(val, addr); - mmiowb(); } /* diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_main.c b/drivers/net/ethernet/qlogic/qlge/qlge_main.c index 07e1c623048e..6cae33072496 100644 --- a/drivers/net/ethernet/qlogic/qlge/qlge_main.c +++ b/drivers/net/ethernet/qlogic/qlge/qlge_main.c @@ -2695,7 +2695,6 @@ static netdev_tx_t qlge_send(struct sk_buff *skb, struct net_device *ndev) wmb(); ql_write_db_reg_relaxed(tx_ring->prod_idx, tx_ring->prod_idx_db_reg); - mmiowb(); netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev, "tx queued, slot %d, len %d\n", tx_ring->prod_idx, skb->len); diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c index 8154b38c08f7..316b47741d3f 100644 --- a/drivers/net/ethernet/renesas/ravb_main.c +++ b/drivers/net/ethernet/renesas/ravb_main.c @@ -728,7 +728,6 @@ static irqreturn_t ravb_emac_interrupt(int irq, void *dev_id) spin_lock(&priv->lock); ravb_emac_interrupt_unlocked(ndev); - mmiowb(); spin_unlock(&priv->lock); return IRQ_HANDLED; } @@ -848,7 +847,6 @@ static irqreturn_t ravb_interrupt(int irq, void *dev_id) result = IRQ_HANDLED; } - mmiowb(); spin_unlock(&priv->lock); return result; } @@ -881,7 +879,6 @@ static irqreturn_t ravb_multi_interrupt(int irq, void *dev_id) result = IRQ_HANDLED; } - mmiowb(); spin_unlock(&priv->lock); return result; } @@ -898,7 +895,6 @@ static irqreturn_t ravb_dma_interrupt(int irq, void *dev_id, int q) if (ravb_queue_interrupt(ndev, q)) result = IRQ_HANDLED; - mmiowb(); spin_unlock(&priv->lock); return result; } @@ -943,7 +939,6 @@ static int ravb_poll(struct napi_struct *napi, int budget) ravb_write(ndev, ~(mask | TIS_RESERVED), TIS); ravb_tx_free(ndev, q, true); netif_wake_subqueue(ndev, q); - mmiowb(); spin_unlock_irqrestore(&priv->lock, flags); } } @@ -959,7 +954,6 @@ static int ravb_poll(struct napi_struct *napi, int budget) ravb_write(ndev, mask, RIE0); ravb_write(ndev, mask, TIE); } - mmiowb(); spin_unlock_irqrestore(&priv->lock, flags); /* Receive error message handling */ @@ -1008,7 +1002,6 @@ static void ravb_adjust_link(struct net_device *ndev) if (priv->no_avb_link && phydev->link) ravb_rcv_snd_enable(ndev); - mmiowb(); spin_unlock_irqrestore(&priv->lock, flags); if (new_state && netif_msg_link(priv)) @@ -1601,7 +1594,6 @@ static netdev_tx_t ravb_start_xmit(struct sk_buff *skb, struct net_device *ndev) netif_stop_subqueue(ndev, q); exit: - mmiowb(); spin_unlock_irqrestore(&priv->lock, flags); return NETDEV_TX_OK; @@ -1673,7 +1665,6 @@ static void ravb_set_rx_mode(struct net_device *ndev) spin_lock_irqsave(&priv->lock, flags); ravb_modify(ndev, ECMR, ECMR_PRM, ndev->flags & IFF_PROMISC ? ECMR_PRM : 0); - mmiowb(); spin_unlock_irqrestore(&priv->lock, flags); } diff --git a/drivers/net/ethernet/renesas/ravb_ptp.c b/drivers/net/ethernet/renesas/ravb_ptp.c index dce2a40a31e3..9a42580693cb 100644 --- a/drivers/net/ethernet/renesas/ravb_ptp.c +++ b/drivers/net/ethernet/renesas/ravb_ptp.c @@ -196,7 +196,6 @@ static int ravb_ptp_extts(struct ptp_clock_info *ptp, ravb_write(ndev, GIE_PTCS, GIE); else ravb_write(ndev, GID_PTCD, GID); - mmiowb(); spin_unlock_irqrestore(&priv->lock, flags); return 0; @@ -259,7 +258,6 @@ static int ravb_ptp_perout(struct ptp_clock_info *ptp, else ravb_write(ndev, GID_PTMD0, GID); } - mmiowb(); spin_unlock_irqrestore(&priv->lock, flags); return error; @@ -331,7 +329,6 @@ void ravb_ptp_init(struct net_device *ndev, struct platform_device *pdev) spin_lock_irqsave(&priv->lock, flags); ravb_wait(ndev, GCCR, GCCR_TCR, GCCR_TCR_NOREQ); ravb_modify(ndev, GCCR, GCCR_TCSS, GCCR_TCSS_ADJGPTP); - mmiowb(); spin_unlock_irqrestore(&priv->lock, flags); priv->ptp.clock = ptp_clock_register(&priv->ptp.info, &pdev->dev); diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c index e33af371b169..ed30aebdb941 100644 --- a/drivers/net/ethernet/renesas/sh_eth.c +++ b/drivers/net/ethernet/renesas/sh_eth.c @@ -2010,7 +2010,6 @@ static void sh_eth_adjust_link(struct net_device *ndev) if ((mdp->cd->no_psr || mdp->no_ether_link) && phydev->link) sh_eth_rcv_snd_enable(ndev); - mmiowb(); spin_unlock_irqrestore(&mdp->lock, flags); if (new_state && netif_msg_link(mdp)) diff --git a/drivers/net/ethernet/sfc/falcon/io.h b/drivers/net/ethernet/sfc/falcon/io.h index 7085ee1d5e2b..c3577643fbda 100644 --- a/drivers/net/ethernet/sfc/falcon/io.h +++ b/drivers/net/ethernet/sfc/falcon/io.h @@ -108,7 +108,6 @@ static inline void ef4_writeo(struct ef4_nic *efx, const ef4_oword_t *value, _ef4_writed(efx, value->u32[2], reg + 8); _ef4_writed(efx, value->u32[3], reg + 12); #endif - mmiowb(); spin_unlock_irqrestore(&efx->biu_lock, flags); } @@ -130,7 +129,6 @@ static inline void ef4_sram_writeq(struct ef4_nic *efx, void __iomem *membase, __raw_writel((__force u32)value->u32[0], membase + addr); __raw_writel((__force u32)value->u32[1], membase + addr + 4); #endif - mmiowb(); spin_unlock_irqrestore(&efx->biu_lock, flags); } diff --git a/drivers/net/ethernet/sfc/io.h b/drivers/net/ethernet/sfc/io.h index 89563170af52..2774a10f44e9 100644 --- a/drivers/net/ethernet/sfc/io.h +++ b/drivers/net/ethernet/sfc/io.h @@ -120,7 +120,6 @@ static inline void efx_writeo(struct efx_nic *efx, const efx_oword_t *value, _efx_writed(efx, value->u32[2], reg + 8); _efx_writed(efx, value->u32[3], reg + 12); #endif - mmiowb(); spin_unlock_irqrestore(&efx->biu_lock, flags); } @@ -142,7 +141,6 @@ static inline void efx_sram_writeq(struct efx_nic *efx, void __iomem *membase, __raw_writel((__force u32)value->u32[0], membase + addr); __raw_writel((__force u32)value->u32[1], membase + addr + 4); #endif - mmiowb(); spin_unlock_irqrestore(&efx->biu_lock, flags); } diff --git a/drivers/net/ethernet/silan/sc92031.c b/drivers/net/ethernet/silan/sc92031.c index c07fd594fe71..02b3962b0e63 100644 --- a/drivers/net/ethernet/silan/sc92031.c +++ b/drivers/net/ethernet/silan/sc92031.c @@ -251,7 +251,6 @@ enum PMConfigBits { * use of mdelay() at _sc92031_reset. * Functions prefixed with _sc92031_ must be called with the lock held; * functions prefixed with sc92031_ must be called without the lock held. - * Use mmiowb() before unlocking if the hardware was written to. */ /* Locking rules for the interrupt: @@ -361,7 +360,6 @@ static void sc92031_disable_interrupts(struct net_device *dev) /* stop interrupts */ iowrite32(0, port_base + IntrMask); _sc92031_dummy_read(port_base); - mmiowb(); /* wait for any concurrent interrupt/tasklet to finish */ synchronize_irq(priv->pdev->irq); @@ -379,7 +377,6 @@ static void sc92031_enable_interrupts(struct net_device *dev) wmb(); iowrite32(IntrBits, port_base + IntrMask); - mmiowb(); } static void _sc92031_disable_tx_rx(struct net_device *dev) @@ -867,7 +864,6 @@ out: rmb(); iowrite32(intr_mask, port_base + IntrMask); - mmiowb(); spin_unlock(&priv->lock); } @@ -901,7 +897,6 @@ out_none: rmb(); iowrite32(intr_mask, port_base + IntrMask); - mmiowb(); return IRQ_NONE; } @@ -978,7 +973,6 @@ static netdev_tx_t sc92031_start_xmit(struct sk_buff *skb, iowrite32(priv->tx_bufs_dma_addr + entry * TX_BUF_SIZE, port_base + TxAddr0 + entry * 4); iowrite32(tx_status, port_base + TxStatus0 + entry * 4); - mmiowb(); if (priv->tx_head - priv->tx_tail >= NUM_TX_DESC) netif_stop_queue(dev); @@ -1024,7 +1018,6 @@ static int sc92031_open(struct net_device *dev) spin_lock_bh(&priv->lock); _sc92031_reset(dev); - mmiowb(); spin_unlock_bh(&priv->lock); sc92031_enable_interrupts(dev); @@ -1060,7 +1053,6 @@ static int sc92031_stop(struct net_device *dev) _sc92031_disable_tx_rx(dev); _sc92031_tx_clear(dev); - mmiowb(); spin_unlock_bh(&priv->lock); @@ -1081,7 +1073,6 @@ static void sc92031_set_multicast_list(struct net_device *dev) _sc92031_set_mar(dev); _sc92031_set_rx_config(dev); - mmiowb(); spin_unlock_bh(&priv->lock); } @@ -1098,7 +1089,6 @@ static void sc92031_tx_timeout(struct net_device *dev) priv->tx_timeouts++; _sc92031_reset(dev); - mmiowb(); spin_unlock(&priv->lock); @@ -1140,7 +1130,6 @@ sc92031_ethtool_get_link_ksettings(struct net_device *dev, output_status = _sc92031_mii_read(port_base, MII_OutputStatus); _sc92031_mii_scan(port_base); - mmiowb(); spin_unlock_bh(&priv->lock); @@ -1311,7 +1300,6 @@ static int sc92031_ethtool_set_wol(struct net_device *dev, priv->pm_config = pm_config; iowrite32(pm_config, port_base + PMConfig); - mmiowb(); spin_unlock_bh(&priv->lock); @@ -1337,7 +1325,6 @@ static int sc92031_ethtool_nway_reset(struct net_device *dev) out: _sc92031_mii_scan(port_base); - mmiowb(); spin_unlock_bh(&priv->lock); @@ -1530,7 +1517,6 @@ static int sc92031_suspend(struct pci_dev *pdev, pm_message_t state) _sc92031_disable_tx_rx(dev); _sc92031_tx_clear(dev); - mmiowb(); spin_unlock_bh(&priv->lock); @@ -1555,7 +1541,6 @@ static int sc92031_resume(struct pci_dev *pdev) spin_lock_bh(&priv->lock); _sc92031_reset(dev); - mmiowb(); spin_unlock_bh(&priv->lock); sc92031_enable_interrupts(dev); diff --git a/drivers/net/ethernet/via/via-rhine.c b/drivers/net/ethernet/via/via-rhine.c index 33949248c829..ab55416a10fa 100644 --- a/drivers/net/ethernet/via/via-rhine.c +++ b/drivers/net/ethernet/via/via-rhine.c @@ -571,7 +571,6 @@ static void rhine_ack_events(struct rhine_private *rp, u32 mask) if (rp->quirks & rqStatusWBRace) iowrite8(mask >> 16, ioaddr + IntrStatus2); iowrite16(mask, ioaddr + IntrStatus); - mmiowb(); } /* @@ -863,7 +862,6 @@ static int rhine_napipoll(struct napi_struct *napi, int budget) if (work_done < budget) { napi_complete_done(napi, work_done); iowrite16(enable_mask, ioaddr + IntrEnable); - mmiowb(); } return work_done; } @@ -1893,7 +1891,6 @@ static netdev_tx_t rhine_start_tx(struct sk_buff *skb, static void rhine_irq_disable(struct rhine_private *rp) { iowrite16(0x0000, rp->base + IntrEnable); - mmiowb(); } /* The interrupt handler does all of the Rx thread work and cleans up diff --git a/drivers/net/ethernet/wiznet/w5100.c b/drivers/net/ethernet/wiznet/w5100.c index d8ba512f166a..1713c2d2dccf 100644 --- a/drivers/net/ethernet/wiznet/w5100.c +++ b/drivers/net/ethernet/wiznet/w5100.c @@ -219,7 +219,6 @@ static inline int __w5100_write_direct(struct net_device *ndev, u32 addr, static inline int w5100_write_direct(struct net_device *ndev, u32 addr, u8 data) { __w5100_write_direct(ndev, addr, data); - mmiowb(); return 0; } @@ -236,7 +235,6 @@ static int w5100_write16_direct(struct net_device *ndev, u32 addr, u16 data) { __w5100_write_direct(ndev, addr, data >> 8); __w5100_write_direct(ndev, addr + 1, data); - mmiowb(); return 0; } @@ -260,8 +258,6 @@ static int w5100_writebulk_direct(struct net_device *ndev, u32 addr, for (i = 0; i < len; i++, addr++) __w5100_write_direct(ndev, addr, *buf++); - mmiowb(); - return 0; } @@ -375,7 +371,6 @@ static int w5100_readbulk_indirect(struct net_device *ndev, u32 addr, u8 *buf, for (i = 0; i < len; i++) *buf++ = w5100_read_direct(ndev, W5100_IDM_DR); - mmiowb(); spin_unlock_irqrestore(&mmio_priv->reg_lock, flags); return 0; @@ -394,7 +389,6 @@ static int w5100_writebulk_indirect(struct net_device *ndev, u32 addr, for (i = 0; i < len; i++) __w5100_write_direct(ndev, W5100_IDM_DR, *buf++); - mmiowb(); spin_unlock_irqrestore(&mmio_priv->reg_lock, flags); return 0; diff --git a/drivers/net/ethernet/wiznet/w5300.c b/drivers/net/ethernet/wiznet/w5300.c index f9da5d6172e3..3f03eecc0479 100644 --- a/drivers/net/ethernet/wiznet/w5300.c +++ b/drivers/net/ethernet/wiznet/w5300.c @@ -141,7 +141,6 @@ static u16 w5300_read_indirect(struct w5300_priv *priv, u16 addr) spin_lock_irqsave(&priv->reg_lock, flags); w5300_write_direct(priv, W5300_IDM_AR, addr); - mmiowb(); data = w5300_read_direct(priv, W5300_IDM_DR); spin_unlock_irqrestore(&priv->reg_lock, flags); @@ -154,9 +153,7 @@ static void w5300_write_indirect(struct w5300_priv *priv, u16 addr, u16 data) spin_lock_irqsave(&priv->reg_lock, flags); w5300_write_direct(priv, W5300_IDM_AR, addr); - mmiowb(); w5300_write_direct(priv, W5300_IDM_DR, data); - mmiowb(); spin_unlock_irqrestore(&priv->reg_lock, flags); } @@ -192,7 +189,6 @@ static int w5300_command(struct w5300_priv *priv, u16 cmd) unsigned long timeout = jiffies + msecs_to_jiffies(100); w5300_write(priv, W5300_S0_CR, cmd); - mmiowb(); while (w5300_read(priv, W5300_S0_CR) != 0) { if (time_after(jiffies, timeout)) @@ -241,18 +237,15 @@ static void w5300_write_macaddr(struct w5300_priv *priv) w5300_write(priv, W5300_SHARH, ndev->dev_addr[4] << 8 | ndev->dev_addr[5]); - mmiowb(); } static void w5300_hw_reset(struct w5300_priv *priv) { w5300_write_direct(priv, W5300_MR, MR_RST); - mmiowb(); mdelay(5); w5300_write_direct(priv, W5300_MR, priv->indirect ? MR_WDF(7) | MR_PB | MR_IND : MR_WDF(7) | MR_PB); - mmiowb(); w5300_write(priv, W5300_IMR, 0); w5300_write_macaddr(priv); @@ -264,24 +257,20 @@ static void w5300_hw_reset(struct w5300_priv *priv) w5300_write32(priv, W5300_TMSRL, 64 << 24); w5300_write32(priv, W5300_TMSRH, 0); w5300_write(priv, W5300_MTYPE, 0x00ff); - mmiowb(); } static void w5300_hw_start(struct w5300_priv *priv) { w5300_write(priv, W5300_S0_MR, priv->promisc ? S0_MR_MACRAW : S0_MR_MACRAW_MF); - mmiowb(); w5300_command(priv, S0_CR_OPEN); w5300_write(priv, W5300_S0_IMR, S0_IR_RECV | S0_IR_SENDOK); w5300_write(priv, W5300_IMR, IR_S0); - mmiowb(); } static void w5300_hw_close(struct w5300_priv *priv) { w5300_write(priv, W5300_IMR, 0); - mmiowb(); w5300_command(priv, S0_CR_CLOSE); } @@ -372,7 +361,6 @@ static netdev_tx_t w5300_start_tx(struct sk_buff *skb, struct net_device *ndev) netif_stop_queue(ndev); w5300_write_frame(priv, skb->data, skb->len); - mmiowb(); ndev->stats.tx_packets++; ndev->stats.tx_bytes += skb->len; dev_kfree_skb(skb); @@ -419,7 +407,6 @@ static int w5300_napi_poll(struct napi_struct *napi, int budget) if (rx_count < budget) { napi_complete_done(napi, rx_count); w5300_write(priv, W5300_IMR, IR_S0); - mmiowb(); } return rx_count; @@ -434,7 +421,6 @@ static irqreturn_t w5300_interrupt(int irq, void *ndev_instance) if (!ir) return IRQ_NONE; w5300_write(priv, W5300_S0_IR, ir); - mmiowb(); if (ir & S0_IR_SENDOK) { netif_dbg(priv, tx_done, ndev, "tx done\n"); @@ -444,7 +430,6 @@ static irqreturn_t w5300_interrupt(int irq, void *ndev_instance) if (ir & S0_IR_RECV) { if (napi_schedule_prep(&priv->napi)) { w5300_write(priv, W5300_IMR, 0); - mmiowb(); __napi_schedule(&priv->napi); } } |