summaryrefslogtreecommitdiff
path: root/drivers/net/sfc
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/sfc')
-rw-r--r--drivers/net/sfc/Kconfig1
-rw-r--r--drivers/net/sfc/bitfield.h4
-rw-r--r--drivers/net/sfc/boards.c74
-rw-r--r--drivers/net/sfc/boards.h1
-rw-r--r--drivers/net/sfc/efx.c131
-rw-r--r--drivers/net/sfc/efx.h13
-rw-r--r--drivers/net/sfc/ethtool.c31
-rw-r--r--drivers/net/sfc/falcon.c128
-rw-r--r--drivers/net/sfc/falcon.h2
-rw-r--r--drivers/net/sfc/falcon_io.h13
-rw-r--r--drivers/net/sfc/mdio_10g.c219
-rw-r--r--drivers/net/sfc/mdio_10g.h11
-rw-r--r--drivers/net/sfc/mtd.c1
-rw-r--r--drivers/net/sfc/net_driver.h38
-rw-r--r--drivers/net/sfc/phy.h9
-rw-r--r--drivers/net/sfc/rx.c209
-rw-r--r--drivers/net/sfc/rx.h3
-rw-r--r--drivers/net/sfc/selftest.c7
-rw-r--r--drivers/net/sfc/sfe4001.c57
-rw-r--r--drivers/net/sfc/tenxpress.c354
-rw-r--r--drivers/net/sfc/tx.c13
-rw-r--r--drivers/net/sfc/workarounds.h14
-rw-r--r--drivers/net/sfc/xfp_phy.c105
23 files changed, 750 insertions, 688 deletions
diff --git a/drivers/net/sfc/Kconfig b/drivers/net/sfc/Kconfig
index c535408ad6be..12a82966b577 100644
--- a/drivers/net/sfc/Kconfig
+++ b/drivers/net/sfc/Kconfig
@@ -2,7 +2,6 @@ config SFC
tristate "Solarflare Solarstorm SFC4000 support"
depends on PCI && INET
select MII
- select INET_LRO
select CRC32
select I2C
select I2C_ALGOBIT
diff --git a/drivers/net/sfc/bitfield.h b/drivers/net/sfc/bitfield.h
index d95c21828014..d54d84c267b9 100644
--- a/drivers/net/sfc/bitfield.h
+++ b/drivers/net/sfc/bitfield.h
@@ -543,7 +543,7 @@ typedef union efx_oword {
/* Static initialiser */
#define EFX_OWORD32(a, b, c, d) \
- { .u32 = { __constant_cpu_to_le32(a), __constant_cpu_to_le32(b), \
- __constant_cpu_to_le32(c), __constant_cpu_to_le32(d) } }
+ { .u32 = { cpu_to_le32(a), cpu_to_le32(b), \
+ cpu_to_le32(c), cpu_to_le32(d) } }
#endif /* EFX_BITFIELD_H */
diff --git a/drivers/net/sfc/boards.c b/drivers/net/sfc/boards.c
index 64903496aa9a..5182ac5a1034 100644
--- a/drivers/net/sfc/boards.c
+++ b/drivers/net/sfc/boards.c
@@ -26,7 +26,7 @@ static void blink_led_timer(unsigned long context)
{
struct efx_nic *efx = (struct efx_nic *)context;
struct efx_blinker *bl = &efx->board_info.blinker;
- efx->board_info.set_fault_led(efx, bl->state);
+ efx->board_info.set_id_led(efx, bl->state);
bl->state = !bl->state;
if (bl->resubmit)
mod_timer(&bl->timer, jiffies + BLINK_INTERVAL);
@@ -48,7 +48,7 @@ static void board_blink(struct efx_nic *efx, bool blink)
blinker->resubmit = false;
if (blinker->timer.function)
del_timer_sync(&blinker->timer);
- efx->board_info.set_fault_led(efx, false);
+ efx->board_info.init_leds(efx);
}
}
@@ -185,7 +185,7 @@ static struct i2c_board_info sfe4002_hwmon_info = {
#define SFE4002_RX_LED (0) /* Green */
#define SFE4002_TX_LED (1) /* Amber */
-static int sfe4002_init_leds(struct efx_nic *efx)
+static void sfe4002_init_leds(struct efx_nic *efx)
{
/* Set the TX and RX LEDs to reflect status and activity, and the
* fault LED off */
@@ -194,11 +194,9 @@ static int sfe4002_init_leds(struct efx_nic *efx)
xfp_set_led(efx, SFE4002_RX_LED,
QUAKE_LED_RXLINK | QUAKE_LED_LINK_ACTSTAT);
xfp_set_led(efx, SFE4002_FAULT_LED, QUAKE_LED_OFF);
- efx->board_info.blinker.led_num = SFE4002_FAULT_LED;
- return 0;
}
-static void sfe4002_fault_led(struct efx_nic *efx, bool state)
+static void sfe4002_set_id_led(struct efx_nic *efx, bool state)
{
xfp_set_led(efx, SFE4002_FAULT_LED, state ? QUAKE_LED_ON :
QUAKE_LED_OFF);
@@ -222,7 +220,67 @@ static int sfe4002_init(struct efx_nic *efx)
return rc;
efx->board_info.monitor = sfe4002_check_hw;
efx->board_info.init_leds = sfe4002_init_leds;
- efx->board_info.set_fault_led = sfe4002_fault_led;
+ efx->board_info.set_id_led = sfe4002_set_id_led;
+ efx->board_info.blink = board_blink;
+ efx->board_info.fini = efx_fini_lm87;
+ return 0;
+}
+
+/*****************************************************************************
+ * Support for the SFN4112F
+ *
+ */
+static u8 sfn4112f_lm87_channel = 0x03; /* use AIN not FAN inputs */
+
+static const u8 sfn4112f_lm87_regs[] = {
+ LM87_IN_LIMITS(0, 0x83, 0x91), /* 2.5V: 1.8V +/- 5% */
+ LM87_IN_LIMITS(1, 0x51, 0x5a), /* Vccp1: 1.2V +/- 5% */
+ LM87_IN_LIMITS(2, 0xb6, 0xca), /* 3.3V: 3.3V +/- 5% */
+ LM87_IN_LIMITS(4, 0xb0, 0xe0), /* 12V: 11-14V */
+ LM87_IN_LIMITS(5, 0x44, 0x4b), /* Vccp2: 1.0V +/- 5% */
+ LM87_AIN_LIMITS(1, 0x91, 0xa1), /* AIN2: 1.5V +/- 5% */
+ LM87_TEMP_INT_LIMITS(10, 60), /* board */
+ LM87_TEMP_EXT1_LIMITS(10, 70), /* Falcon */
+ 0
+};
+
+static struct i2c_board_info sfn4112f_hwmon_info = {
+ I2C_BOARD_INFO("lm87", 0x2e),
+ .platform_data = &sfn4112f_lm87_channel,
+ .irq = -1,
+};
+
+#define SFN4112F_ACT_LED 0
+#define SFN4112F_LINK_LED 1
+
+static void sfn4112f_init_leds(struct efx_nic *efx)
+{
+ xfp_set_led(efx, SFN4112F_ACT_LED,
+ QUAKE_LED_RXLINK | QUAKE_LED_LINK_ACT);
+ xfp_set_led(efx, SFN4112F_LINK_LED,
+ QUAKE_LED_RXLINK | QUAKE_LED_LINK_STAT);
+}
+
+static void sfn4112f_set_id_led(struct efx_nic *efx, bool state)
+{
+ xfp_set_led(efx, SFN4112F_LINK_LED,
+ state ? QUAKE_LED_ON : QUAKE_LED_OFF);
+}
+
+static int sfn4112f_check_hw(struct efx_nic *efx)
+{
+ /* Mask out unused sensors */
+ return efx_check_lm87(efx, ~0x48);
+}
+
+static int sfn4112f_init(struct efx_nic *efx)
+{
+ int rc = efx_init_lm87(efx, &sfn4112f_hwmon_info, sfn4112f_lm87_regs);
+ if (rc)
+ return rc;
+ efx->board_info.monitor = sfn4112f_check_hw;
+ efx->board_info.init_leds = sfn4112f_init_leds;
+ efx->board_info.set_id_led = sfn4112f_set_id_led;
efx->board_info.blink = board_blink;
efx->board_info.fini = efx_fini_lm87;
return 0;
@@ -243,6 +301,8 @@ static struct efx_board_data board_data[] = {
{ EFX_BOARD_SFE4002, "SFE4002", "XFP adapter", sfe4002_init },
{ EFX_BOARD_SFN4111T, "SFN4111T", "100/1000/10GBASE-T adapter",
sfn4111t_init },
+ { EFX_BOARD_SFN4112F, "SFN4112F", "SFP+ adapter",
+ sfn4112f_init },
};
void efx_set_board_info(struct efx_nic *efx, u16 revision_info)
diff --git a/drivers/net/sfc/boards.h b/drivers/net/sfc/boards.h
index d93c6c6a7548..44942de0e080 100644
--- a/drivers/net/sfc/boards.h
+++ b/drivers/net/sfc/boards.h
@@ -15,6 +15,7 @@ enum efx_board_type {
EFX_BOARD_SFE4001 = 1,
EFX_BOARD_SFE4002 = 2,
EFX_BOARD_SFN4111T = 0x51,
+ EFX_BOARD_SFN4112F = 0x52,
};
extern void efx_set_board_info(struct efx_nic *efx, u16 revision_info);
diff --git a/drivers/net/sfc/efx.c b/drivers/net/sfc/efx.c
index 7673fd92eaf5..6eff9ca6c6c8 100644
--- a/drivers/net/sfc/efx.c
+++ b/drivers/net/sfc/efx.c
@@ -133,6 +133,16 @@ static int phy_flash_cfg;
module_param(phy_flash_cfg, int, 0644);
MODULE_PARM_DESC(phy_flash_cfg, "Set PHYs into reflash mode initially");
+static unsigned irq_adapt_low_thresh = 10000;
+module_param(irq_adapt_low_thresh, uint, 0644);
+MODULE_PARM_DESC(irq_adapt_low_thresh,
+ "Threshold score for reducing IRQ moderation");
+
+static unsigned irq_adapt_high_thresh = 20000;
+module_param(irq_adapt_high_thresh, uint, 0644);
+MODULE_PARM_DESC(irq_adapt_high_thresh,
+ "Threshold score for increasing IRQ moderation");
+
/**************************************************************************
*
* Utility functions and prototypes
@@ -182,7 +192,6 @@ static int efx_process_channel(struct efx_channel *channel, int rx_quota)
channel->rx_pkt = NULL;
}
- efx_flush_lro(channel);
efx_rx_strategy(channel);
efx_fast_push_rx_descriptors(&efx->rx_queue[channel->channel]);
@@ -224,12 +233,41 @@ static int efx_poll(struct napi_struct *napi, int budget)
rx_packets = efx_process_channel(channel, budget);
if (rx_packets < budget) {
+ struct efx_nic *efx = channel->efx;
+
+ if (channel->used_flags & EFX_USED_BY_RX &&
+ efx->irq_rx_adaptive &&
+ unlikely(++channel->irq_count == 1000)) {
+ unsigned old_irq_moderation = channel->irq_moderation;
+
+ if (unlikely(channel->irq_mod_score <
+ irq_adapt_low_thresh)) {
+ channel->irq_moderation =
+ max_t(int,
+ channel->irq_moderation -
+ FALCON_IRQ_MOD_RESOLUTION,
+ FALCON_IRQ_MOD_RESOLUTION);
+ } else if (unlikely(channel->irq_mod_score >
+ irq_adapt_high_thresh)) {
+ channel->irq_moderation =
+ min(channel->irq_moderation +
+ FALCON_IRQ_MOD_RESOLUTION,
+ efx->irq_rx_moderation);
+ }
+
+ if (channel->irq_moderation != old_irq_moderation)
+ falcon_set_int_moderation(channel);
+
+ channel->irq_count = 0;
+ channel->irq_mod_score = 0;
+ }
+
/* There is no race here; although napi_disable() will
- * only wait for netif_rx_complete(), this isn't a problem
+ * only wait for napi_complete(), this isn't a problem
* since efx_channel_processed() will have no effect if
* interrupts have already been disabled.
*/
- netif_rx_complete(napi);
+ napi_complete(napi);
efx_channel_processed(channel);
}
@@ -558,6 +596,8 @@ static void efx_link_status_changed(struct efx_nic *efx)
}
+static void efx_fini_port(struct efx_nic *efx);
+
/* This call reinitialises the MAC to pick up new PHY settings. The
* caller must hold the mac_lock */
void __efx_reconfigure_port(struct efx_nic *efx)
@@ -593,8 +633,8 @@ void __efx_reconfigure_port(struct efx_nic *efx)
fail:
EFX_ERR(efx, "failed to reconfigure MAC\n");
- efx->phy_op->fini(efx);
- efx->port_initialized = false;
+ efx->port_enabled = false;
+ efx_fini_port(efx);
}
/* Reinitialise the MAC to pick up new PHY settings, even if the port is
@@ -676,9 +716,8 @@ static int efx_init_port(struct efx_nic *efx)
rc = efx->phy_op->init(efx);
if (rc)
return rc;
- efx->phy_op->reconfigure(efx);
-
mutex_lock(&efx->mac_lock);
+ efx->phy_op->reconfigure(efx);
rc = falcon_switch_mac(efx);
mutex_unlock(&efx->mac_lock);
if (rc)
@@ -686,7 +725,7 @@ static int efx_init_port(struct efx_nic *efx)
efx->mac_op->reconfigure(efx);
efx->port_initialized = true;
- efx->stats_enabled = true;
+ efx_stats_enable(efx);
return 0;
fail:
@@ -735,6 +774,7 @@ static void efx_fini_port(struct efx_nic *efx)
if (!efx->port_initialized)
return;
+ efx_stats_disable(efx);
efx->phy_op->fini(efx);
efx->port_initialized = false;
@@ -990,7 +1030,7 @@ static int efx_probe_nic(struct efx_nic *efx)
efx_set_channels(efx);
/* Initialise the interrupt moderation settings */
- efx_init_irq_moderation(efx, tx_irq_mod_usec, rx_irq_mod_usec);
+ efx_init_irq_moderation(efx, tx_irq_mod_usec, rx_irq_mod_usec, true);
return 0;
}
@@ -1187,7 +1227,8 @@ void efx_flush_queues(struct efx_nic *efx)
**************************************************************************/
/* Set interrupt moderation parameters */
-void efx_init_irq_moderation(struct efx_nic *efx, int tx_usecs, int rx_usecs)
+void efx_init_irq_moderation(struct efx_nic *efx, int tx_usecs, int rx_usecs,
+ bool rx_adaptive)
{
struct efx_tx_queue *tx_queue;
struct efx_rx_queue *rx_queue;
@@ -1197,6 +1238,8 @@ void efx_init_irq_moderation(struct efx_nic *efx, int tx_usecs, int rx_usecs)
efx_for_each_tx_queue(tx_queue, efx)
tx_queue->channel->irq_moderation = tx_usecs;
+ efx->irq_rx_adaptive = rx_adaptive;
+ efx->irq_rx_moderation = rx_usecs;
efx_for_each_rx_queue(rx_queue, efx)
rx_queue->channel->irq_moderation = rx_usecs;
}
@@ -1269,18 +1312,11 @@ static int efx_ioctl(struct net_device *net_dev, struct ifreq *ifr, int cmd)
static int efx_init_napi(struct efx_nic *efx)
{
struct efx_channel *channel;
- int rc;
efx_for_each_channel(channel, efx) {
channel->napi_dev = efx->net_dev;
- rc = efx_lro_init(&channel->lro_mgr, efx);
- if (rc)
- goto err;
}
return 0;
- err:
- efx_fini_napi(efx);
- return rc;
}
static void efx_fini_napi(struct efx_nic *efx)
@@ -1288,7 +1324,6 @@ static void efx_fini_napi(struct efx_nic *efx)
struct efx_channel *channel;
efx_for_each_channel(channel, efx) {
- efx_lro_fini(&channel->lro_mgr);
channel->napi_dev = NULL;
}
}
@@ -1361,6 +1396,20 @@ static int efx_net_stop(struct net_device *net_dev)
return 0;
}
+void efx_stats_disable(struct efx_nic *efx)
+{
+ spin_lock(&efx->stats_lock);
+ ++efx->stats_disable_count;
+ spin_unlock(&efx->stats_lock);
+}
+
+void efx_stats_enable(struct efx_nic *efx)
+{
+ spin_lock(&efx->stats_lock);
+ --efx->stats_disable_count;
+ spin_unlock(&efx->stats_lock);
+}
+
/* Context: process, dev_base_lock or RTNL held, non-blocking. */
static struct net_device_stats *efx_net_stats(struct net_device *net_dev)
{
@@ -1369,12 +1418,12 @@ static struct net_device_stats *efx_net_stats(struct net_device *net_dev)
struct net_device_stats *stats = &net_dev->stats;
/* Update stats if possible, but do not wait if another thread
- * is updating them (or resetting the NIC); slightly stale
- * stats are acceptable.
+ * is updating them or if MAC stats fetches are temporarily
+ * disabled; slightly stale stats are acceptable.
*/
if (!spin_trylock(&efx->stats_lock))
return stats;
- if (efx->stats_enabled) {
+ if (!efx->stats_disable_count) {
efx->mac_op->update_stats(efx);
falcon_update_nic_stats(efx);
}
@@ -1622,16 +1671,12 @@ static void efx_unregister_netdev(struct efx_nic *efx)
/* Tears down the entire software state and most of the hardware state
* before reset. */
-void efx_reset_down(struct efx_nic *efx, struct ethtool_cmd *ecmd)
+void efx_reset_down(struct efx_nic *efx, enum reset_type method,
+ struct ethtool_cmd *ecmd)
{
EFX_ASSERT_RESET_SERIALISED(efx);
- /* The net_dev->get_stats handler is quite slow, and will fail
- * if a fetch is pending over reset. Serialise against it. */
- spin_lock(&efx->stats_lock);
- efx->stats_enabled = false;
- spin_unlock(&efx->stats_lock);
-
+ efx_stats_disable(efx);
efx_stop_all(efx);
mutex_lock(&efx->mac_lock);
mutex_lock(&efx->spi_lock);
@@ -1639,6 +1684,8 @@ void efx_reset_down(struct efx_nic *efx, struct ethtool_cmd *ecmd)
efx->phy_op->get_settings(efx, ecmd);
efx_fini_channels(efx);
+ if (efx->port_initialized && method != RESET_TYPE_INVISIBLE)
+ efx->phy_op->fini(efx);
}
/* This function will always ensure that the locks acquired in
@@ -1646,7 +1693,8 @@ void efx_reset_down(struct efx_nic *efx, struct ethtool_cmd *ecmd)
* that we were unable to reinitialise the hardware, and the
* driver should be disabled. If ok is false, then the rx and tx
* engines are not restarted, pending a RESET_DISABLE. */
-int efx_reset_up(struct efx_nic *efx, struct ethtool_cmd *ecmd, bool ok)
+int efx_reset_up(struct efx_nic *efx, enum reset_type method,
+ struct ethtool_cmd *ecmd, bool ok)
{
int rc;
@@ -1658,6 +1706,16 @@ int efx_reset_up(struct efx_nic *efx, struct ethtool_cmd *ecmd, bool ok)
ok = false;
}
+ if (efx->port_initialized && method != RESET_TYPE_INVISIBLE) {
+ if (ok) {
+ rc = efx->phy_op->init(efx);
+ if (rc)
+ ok = false;
+ }
+ if (!ok)
+ efx->port_initialized = false;
+ }
+
if (ok) {
efx_init_channels(efx);
@@ -1670,7 +1728,7 @@ int efx_reset_up(struct efx_nic *efx, struct ethtool_cmd *ecmd, bool ok)
if (ok) {
efx_start_all(efx);
- efx->stats_enabled = true;
+ efx_stats_enable(efx);
}
return rc;
}
@@ -1702,7 +1760,7 @@ static int efx_reset(struct efx_nic *efx)
EFX_INFO(efx, "resetting (%d)\n", method);
- efx_reset_down(efx, &ecmd);
+ efx_reset_down(efx, method, &ecmd);
rc = falcon_reset_hw(efx, method);
if (rc) {
@@ -1721,10 +1779,10 @@ static int efx_reset(struct efx_nic *efx)
/* Leave device stopped if necessary */
if (method == RESET_TYPE_DISABLE) {
- efx_reset_up(efx, &ecmd, false);
+ efx_reset_up(efx, method, &ecmd, false);
rc = -EIO;
} else {
- rc = efx_reset_up(efx, &ecmd, true);
+ rc = efx_reset_up(efx, method, &ecmd, true);
}
out_disable:
@@ -1835,8 +1893,8 @@ static struct efx_phy_operations efx_dummy_phy_operations = {
static struct efx_board efx_dummy_board_info = {
.init = efx_port_dummy_op_int,
- .init_leds = efx_port_dummy_op_int,
- .set_fault_led = efx_port_dummy_op_blink,
+ .init_leds = efx_port_dummy_op_void,
+ .set_id_led = efx_port_dummy_op_blink,
.monitor = efx_port_dummy_op_int,
.blink = efx_port_dummy_op_blink,
.fini = efx_port_dummy_op_void,
@@ -1876,6 +1934,7 @@ static int efx_init_struct(struct efx_nic *efx, struct efx_nic_type *type,
efx->rx_checksum_enabled = true;
spin_lock_init(&efx->netif_stop_lock);
spin_lock_init(&efx->stats_lock);
+ efx->stats_disable_count = 1;
mutex_init(&efx->mac_lock);
efx->mac_op = &efx_dummy_mac_operations;
efx->phy_op = &efx_dummy_phy_operations;
@@ -2097,7 +2156,7 @@ static int __devinit efx_pci_probe(struct pci_dev *pci_dev,
net_dev->features |= (NETIF_F_IP_CSUM | NETIF_F_SG |
NETIF_F_HIGHDMA | NETIF_F_TSO);
if (lro)
- net_dev->features |= NETIF_F_LRO;
+ net_dev->features |= NETIF_F_GRO;
/* Mask for features that also apply to VLAN devices */
net_dev->vlan_features |= (NETIF_F_ALL_CSUM | NETIF_F_SG |
NETIF_F_HIGHDMA | NETIF_F_TSO);
diff --git a/drivers/net/sfc/efx.h b/drivers/net/sfc/efx.h
index 0dd7a532c78a..da157aa74b83 100644
--- a/drivers/net/sfc/efx.h
+++ b/drivers/net/sfc/efx.h
@@ -36,20 +36,23 @@ extern void efx_process_channel_now(struct efx_channel *channel);
extern void efx_flush_queues(struct efx_nic *efx);
/* Ports */
+extern void efx_stats_disable(struct efx_nic *efx);
+extern void efx_stats_enable(struct efx_nic *efx);
extern void efx_reconfigure_port(struct efx_nic *efx);
extern void __efx_reconfigure_port(struct efx_nic *efx);
/* Reset handling */
-extern void efx_reset_down(struct efx_nic *efx, struct ethtool_cmd *ecmd);
-extern int efx_reset_up(struct efx_nic *efx, struct ethtool_cmd *ecmd,
- bool ok);
+extern void efx_reset_down(struct efx_nic *efx, enum reset_type method,
+ struct ethtool_cmd *ecmd);
+extern int efx_reset_up(struct efx_nic *efx, enum reset_type method,
+ struct ethtool_cmd *ecmd, bool ok);
/* Global */
extern void efx_schedule_reset(struct efx_nic *efx, enum reset_type type);
extern void efx_suspend(struct efx_nic *efx);
extern void efx_resume(struct efx_nic *efx);
extern void efx_init_irq_moderation(struct efx_nic *efx, int tx_usecs,
- int rx_usecs);
+ int rx_usecs, bool rx_adaptive);
extern int efx_request_power(struct efx_nic *efx, int mw, const char *name);
extern void efx_hex_dump(const u8 *, unsigned int, const char *);
@@ -77,7 +80,7 @@ static inline void efx_schedule_channel(struct efx_channel *channel)
channel->channel, raw_smp_processor_id());
channel->work_pending = true;
- netif_rx_schedule(&channel->napi_str);
+ napi_schedule(&channel->napi_str);
}
#endif /* EFX_EFX_H */
diff --git a/drivers/net/sfc/ethtool.c b/drivers/net/sfc/ethtool.c
index 53d259e90187..64309f4e8b19 100644
--- a/drivers/net/sfc/ethtool.c
+++ b/drivers/net/sfc/ethtool.c
@@ -219,9 +219,6 @@ int efx_ethtool_set_settings(struct net_device *net_dev,
struct efx_nic *efx = netdev_priv(net_dev);
int rc;
- if (EFX_WORKAROUND_13963(efx) && !ecmd->autoneg)
- return -EINVAL;
-
/* Falcon GMAC does not support 1000Mbps HD */
if (ecmd->speed == SPEED_1000 && ecmd->duplex != DUPLEX_FULL) {
EFX_LOG(efx, "rejecting unsupported 1000Mbps HD"
@@ -532,7 +529,14 @@ static int efx_ethtool_nway_reset(struct net_device *net_dev)
{
struct efx_nic *efx = netdev_priv(net_dev);
- return mii_nway_restart(&efx->mii);
+ if (efx->phy_op->mmds & DEV_PRESENT_BIT(MDIO_MMD_AN)) {
+ mdio_clause45_set_flag(efx, efx->mii.phy_id, MDIO_MMD_AN,
+ MDIO_MMDREG_CTRL1,
+ __ffs(BMCR_ANRESTART), true);
+ return 0;
+ }
+
+ return -EOPNOTSUPP;
}
static u32 efx_ethtool_get_link(struct net_device *net_dev)
@@ -600,7 +604,6 @@ static int efx_ethtool_get_coalesce(struct net_device *net_dev,
{
struct efx_nic *efx = netdev_priv(net_dev);
struct efx_tx_queue *tx_queue;
- struct efx_rx_queue *rx_queue;
struct efx_channel *channel;
memset(coalesce, 0, sizeof(*coalesce));
@@ -618,14 +621,8 @@ static int efx_ethtool_get_coalesce(struct net_device *net_dev,
}
}
- /* Find lowest IRQ moderation across all used RX queues */
- coalesce->rx_coalesce_usecs_irq = ~((u32) 0);
- efx_for_each_rx_queue(rx_queue, efx) {
- channel = rx_queue->channel;
- if (channel->irq_moderation < coalesce->rx_coalesce_usecs_irq)
- coalesce->rx_coalesce_usecs_irq =
- channel->irq_moderation;
- }
+ coalesce->use_adaptive_rx_coalesce = efx->irq_rx_adaptive;
+ coalesce->rx_coalesce_usecs_irq = efx->irq_rx_moderation;
return 0;
}
@@ -639,10 +636,9 @@ static int efx_ethtool_set_coalesce(struct net_device *net_dev,
struct efx_nic *efx = netdev_priv(net_dev);
struct efx_channel *channel;
struct efx_tx_queue *tx_queue;
- unsigned tx_usecs, rx_usecs;
+ unsigned tx_usecs, rx_usecs, adaptive;
- if (coalesce->use_adaptive_rx_coalesce ||
- coalesce->use_adaptive_tx_coalesce)
+ if (coalesce->use_adaptive_tx_coalesce)
return -EOPNOTSUPP;
if (coalesce->rx_coalesce_usecs || coalesce->tx_coalesce_usecs) {
@@ -653,6 +649,7 @@ static int efx_ethtool_set_coalesce(struct net_device *net_dev,
rx_usecs = coalesce->rx_coalesce_usecs_irq;
tx_usecs = coalesce->tx_coalesce_usecs_irq;
+ adaptive = coalesce->use_adaptive_rx_coalesce;
/* If the channel is shared only allow RX parameters to be set */
efx_for_each_tx_queue(tx_queue, efx) {
@@ -664,7 +661,7 @@ static int efx_ethtool_set_coalesce(struct net_device *net_dev,
}
}
- efx_init_irq_moderation(efx, tx_usecs, rx_usecs);
+ efx_init_irq_moderation(efx, tx_usecs, rx_usecs, adaptive);
/* Reset channel to pick up new moderation value. Note that
* this may change the value of the irq_moderation field
diff --git a/drivers/net/sfc/falcon.c b/drivers/net/sfc/falcon.c
index 5b9f2d9cc4ed..23a1b148d5b2 100644
--- a/drivers/net/sfc/falcon.c
+++ b/drivers/net/sfc/falcon.c
@@ -39,11 +39,16 @@
* @next_buffer_table: First available buffer table id
* @pci_dev2: The secondary PCI device if present
* @i2c_data: Operations and state for I2C bit-bashing algorithm
+ * @int_error_count: Number of internal errors seen recently
+ * @int_error_expire: Time at which error count will be expired
*/
struct falcon_nic_data {
unsigned next_buffer_table;
struct pci_dev *pci_dev2;
struct i2c_algo_bit_data i2c_data;
+
+ unsigned int_error_count;
+ unsigned long int_error_expire;
};
/**************************************************************************
@@ -119,8 +124,12 @@ MODULE_PARM_DESC(rx_xon_thresh_bytes, "RX fifo XON threshold");
#define FALCON_EVQ_SIZE 4096
#define FALCON_EVQ_MASK (FALCON_EVQ_SIZE - 1)
-/* Max number of internal errors. After this resets will not be performed */
-#define FALCON_MAX_INT_ERRORS 4
+/* If FALCON_MAX_INT_ERRORS internal errors occur within
+ * FALCON_INT_ERROR_EXPIRE seconds, we consider the NIC broken and
+ * disable it.
+ */
+#define FALCON_INT_ERROR_EXPIRE 3600
+#define FALCON_MAX_INT_ERRORS 5
/* We poll for events every FLUSH_INTERVAL ms, and check FLUSH_POLL_COUNT times
*/
@@ -146,13 +155,6 @@ MODULE_PARM_DESC(rx_xon_thresh_bytes, "RX fifo XON threshold");
/* Dummy SRAM size code */
#define SRM_NB_BSZ_ONCHIP_ONLY (-1)
-/* Be nice if these (or equiv.) were in linux/pci_regs.h, but they're not. */
-#define PCI_EXP_DEVCAP_PWR_VAL_LBN 18
-#define PCI_EXP_DEVCAP_PWR_SCL_LBN 26
-#define PCI_EXP_DEVCTL_PAYLOAD_LBN 5
-#define PCI_EXP_LNKSTA_LNK_WID 0x3f0
-#define PCI_EXP_LNKSTA_LNK_WID_LBN 4
-
#define FALCON_IS_DUAL_FUNC(efx) \
(falcon_rev(efx) < FALCON_REV_B0)
@@ -727,6 +729,9 @@ static void falcon_handle_tx_event(struct efx_channel *channel,
tx_ev_desc_ptr = EFX_QWORD_FIELD(*event, TX_EV_DESC_PTR);
tx_ev_q_label = EFX_QWORD_FIELD(*event, TX_EV_Q_LABEL);
tx_queue = &efx->tx_queue[tx_ev_q_label];
+ channel->irq_mod_score +=
+ (tx_ev_desc_ptr - tx_queue->read_count) &
+ efx->type->txd_ring_mask;
efx_xmit_done(tx_queue, tx_ev_desc_ptr);
} else if (EFX_QWORD_FIELD(*event, TX_EV_WQ_FF_FULL)) {
/* Rewrite the FIFO write pointer */
@@ -824,10 +829,6 @@ static void falcon_handle_rx_not_ok(struct efx_rx_queue *rx_queue,
rx_ev_pause_frm ? " [PAUSE]" : "");
}
#endif
-
- if (unlikely(rx_ev_eth_crc_err && EFX_WORKAROUND_10750(efx) &&
- efx->phy_type == PHY_TYPE_SFX7101))
- tenxpress_crc_err(efx);
}
/* Handle receive events that are not in-order. */
@@ -900,6 +901,8 @@ static void falcon_handle_rx_event(struct efx_channel *channel,
discard = true;
}
+ channel->irq_mod_score += 2;
+
/* Handle received packet */
efx_rx_packet(rx_queue, rx_ev_desc_ptr, rx_ev_byte_cnt,
checksummed, discard);
@@ -1077,14 +1080,15 @@ void falcon_set_int_moderation(struct efx_channel *channel)
* program is based at 0. So actual interrupt moderation
* achieved is ((x + 1) * res).
*/
- unsigned int res = 5;
- channel->irq_moderation -= (channel->irq_moderation % res);
- if (channel->irq_moderation < res)
- channel->irq_moderation = res;
+ channel->irq_moderation -= (channel->irq_moderation %
+ FALCON_IRQ_MOD_RESOLUTION);
+ if (channel->irq_moderation < FALCON_IRQ_MOD_RESOLUTION)
+ channel->irq_moderation = FALCON_IRQ_MOD_RESOLUTION;
EFX_POPULATE_DWORD_2(timer_cmd,
TIMER_MODE, TIMER_MODE_INT_HLDOFF,
TIMER_VAL,
- (channel->irq_moderation / res) - 1);
+ channel->irq_moderation /
+ FALCON_IRQ_MOD_RESOLUTION - 1);
} else {
EFX_POPULATE_DWORD_2(timer_cmd,
TIMER_MODE, TIMER_MODE_DIS,
@@ -1191,31 +1195,29 @@ static void falcon_poll_flush_events(struct efx_nic *efx)
struct efx_channel *channel = &efx->channel[0];
struct efx_tx_queue *tx_queue;
struct efx_rx_queue *rx_queue;
- unsigned int read_ptr, i;
+ unsigned int read_ptr = channel->eventq_read_ptr;
+ unsigned int end_ptr = (read_ptr - 1) & FALCON_EVQ_MASK;
- read_ptr = channel->eventq_read_ptr;
- for (i = 0; i < FALCON_EVQ_SIZE; ++i) {
+ do {
efx_qword_t *event = falcon_event(channel, read_ptr);
int ev_code, ev_sub_code, ev_queue;
bool ev_failed;
+
if (!falcon_event_present(event))
break;
ev_code = EFX_QWORD_FIELD(*event, EV_CODE);
- if (ev_code != DRIVER_EV_DECODE)
- continue;
-
ev_sub_code = EFX_QWORD_FIELD(*event, DRIVER_EV_SUB_CODE);
- switch (ev_sub_code) {
- case TX_DESCQ_FLS_DONE_EV_DECODE:
+ if (ev_code == DRIVER_EV_DECODE &&
+ ev_sub_code == TX_DESCQ_FLS_DONE_EV_DECODE) {
ev_queue = EFX_QWORD_FIELD(*event,
DRIVER_EV_TX_DESCQ_ID);
if (ev_queue < EFX_TX_QUEUE_COUNT) {
tx_queue = efx->tx_queue + ev_queue;
tx_queue->flushed = true;
}
- break;
- case RX_DESCQ_FLS_DONE_EV_DECODE:
+ } else if (ev_code == DRIVER_EV_DECODE &&
+ ev_sub_code == RX_DESCQ_FLS_DONE_EV_DECODE) {
ev_queue = EFX_QWORD_FIELD(*event,
DRIVER_EV_RX_DESCQ_ID);
ev_failed = EFX_QWORD_FIELD(*event,
@@ -1229,11 +1231,10 @@ static void falcon_poll_flush_events(struct efx_nic *efx)
else
rx_queue->flushed = true;
}
- break;
}
read_ptr = (read_ptr + 1) & FALCON_EVQ_MASK;
- }
+ } while (read_ptr != end_ptr);
}
/* Handle tx and rx flushes at the same time, since they run in
@@ -1381,7 +1382,6 @@ static irqreturn_t falcon_fatal_interrupt(struct efx_nic *efx)
efx_oword_t *int_ker = efx->irq_status.addr;
efx_oword_t fatal_intr;
int error, mem_perr;
- static int n_int_errors;
falcon_read(efx, &fatal_intr, FATAL_INTR_REG_KER);
error = EFX_OWORD_FIELD(fatal_intr, INT_KER_ERROR);
@@ -1408,7 +1408,14 @@ static irqreturn_t falcon_fatal_interrupt(struct efx_nic *efx)
pci_clear_master(nic_data->pci_dev2);
falcon_disable_interrupts(efx);
- if (++n_int_errors < FALCON_MAX_INT_ERRORS) {
+ /* Count errors and reset or disable the NIC accordingly */
+ if (nic_data->int_error_count == 0 ||
+ time_after(jiffies, nic_data->int_error_expire)) {
+ nic_data->int_error_count = 0;
+ nic_data->int_error_expire =
+ jiffies + FALCON_INT_ERROR_EXPIRE * HZ;
+ }
+ if (++nic_data->int_error_count < FALCON_MAX_INT_ERRORS) {
EFX_ERR(efx, "SYSTEM ERROR - reset scheduled\n");
efx_schedule_reset(efx, RESET_TYPE_INT_ERROR);
} else {
@@ -1427,6 +1434,7 @@ static irqreturn_t falcon_legacy_interrupt_b0(int irq, void *dev_id)
{
struct efx_nic *efx = dev_id;
efx_oword_t *int_ker = efx->irq_status.addr;
+ irqreturn_t result = IRQ_NONE;
struct efx_channel *channel;
efx_dword_t reg;
u32 queues;
@@ -1441,23 +1449,24 @@ static irqreturn_t falcon_legacy_interrupt_b0(int irq, void *dev_id)
if (unlikely(syserr))
return falcon_fatal_interrupt(efx);
- if (queues == 0)
- return IRQ_NONE;
-
- efx->last_irq_cpu = raw_smp_processor_id();
- EFX_TRACE(efx, "IRQ %d on CPU %d status " EFX_DWORD_FMT "\n",
- irq, raw_smp_processor_id(), EFX_DWORD_VAL(reg));
-
/* Schedule processing of any interrupting queues */
- channel = &efx->channel[0];
- while (queues) {
- if (queues & 0x01)
+ efx_for_each_channel(channel, efx) {
+ if ((queues & 1) ||
+ falcon_event_present(
+ falcon_event(channel, channel->eventq_read_ptr))) {
efx_schedule_channel(channel);
- channel++;
+ result = IRQ_HANDLED;
+ }
queues >>= 1;
}
- return IRQ_HANDLED;
+ if (result == IRQ_HANDLED) {
+ efx->last_irq_cpu = raw_smp_processor_id();
+ EFX_TRACE(efx, "IRQ %d on CPU %d status " EFX_DWORD_FMT "\n",
+ irq, raw_smp_processor_id(), EFX_DWORD_VAL(reg));
+ }
+
+ return result;
}
@@ -1887,7 +1896,7 @@ static int falcon_reset_macs(struct efx_nic *efx)
/* MAC stats will fail whilst the TX fifo is draining. Serialise
* the drain sequence with the statistics fetch */
- spin_lock(&efx->stats_lock);
+ efx_stats_disable(efx);
falcon_read(efx, &reg, MAC0_CTRL_REG_KER);
EFX_SET_OWORD_FIELD(reg, TXFIFO_DRAIN_EN_B0, 1);
@@ -1917,7 +1926,7 @@ static int falcon_reset_macs(struct efx_nic *efx)
udelay(10);
}
- spin_unlock(&efx->stats_lock);
+ efx_stats_enable(efx);
/* If we've reset the EM block and the link is up, then
* we'll have to kick the XAUI link so the PHY can recover */
@@ -2253,6 +2262,7 @@ static int falcon_probe_phy(struct efx_nic *efx)
efx->phy_op = &falcon_sft9001_phy_ops;
break;
case PHY_TYPE_QT2022C2:
+ case PHY_TYPE_QT2025C:
efx->phy_op = &falcon_xfp_phy_ops;
break;
default:
@@ -2277,6 +2287,10 @@ int falcon_switch_mac(struct efx_nic *efx)
struct efx_mac_operations *old_mac_op = efx->mac_op;
efx_oword_t nic_stat;
unsigned strap_val;
+ int rc = 0;
+
+ /* Don't try to fetch MAC stats while we're switching MACs */
+ efx_stats_disable(efx);
/* Internal loopbacks override the phy speed setting */
if (efx->loopback_mode == LOOPBACK_GMAC) {
@@ -2287,16 +2301,12 @@ int falcon_switch_mac(struct efx_nic *efx)
efx->link_fd = true;
}
+ WARN_ON(!mutex_is_locked(&efx->mac_lock));
efx->mac_op = (EFX_IS10G(efx) ?
&falcon_xmac_operations : &falcon_gmac_operations);
- if (old_mac_op == efx->mac_op)
- return 0;
-
- WARN_ON(!mutex_is_locked(&efx->mac_lock));
-
- /* Not all macs support a mac-level link state */
- efx->mac_up = true;
+ /* Always push the NIC_STAT_REG setting even if the mac hasn't
+ * changed, because this function is run post online reset */
falcon_read(efx, &nic_stat, NIC_STAT_REG);
strap_val = EFX_IS10G(efx) ? 5 : 3;
if (falcon_rev(efx) >= FALCON_REV_B0) {
@@ -2309,9 +2319,17 @@ int falcon_switch_mac(struct efx_nic *efx)
BUG_ON(EFX_OWORD_FIELD(nic_stat, STRAP_PINS) != strap_val);
}
+ if (old_mac_op == efx->mac_op)
+ goto out;
EFX_LOG(efx, "selected %cMAC\n", EFX_IS10G(efx) ? 'X' : 'G');
- return falcon_reset_macs(efx);
+ /* Not all macs support a mac-level link state */
+ efx->mac_up = true;
+
+ rc = falcon_reset_macs(efx);
+out:
+ efx_stats_enable(efx);
+ return rc;
}
/* This call is responsible for hooking in the MAC and PHY operations */
@@ -3109,8 +3127,10 @@ void falcon_remove_nic(struct efx_nic *efx)
struct falcon_nic_data *nic_data = efx->nic_data;
int rc;
+ /* Remove I2C adapter and clear it in preparation for a retry */
rc = i2c_del_adapter(&efx->i2c_adap);
BUG_ON(rc);
+ memset(&efx->i2c_adap, 0, sizeof(efx->i2c_adap));
falcon_remove_spi_devices(efx);
falcon_free_buffer(efx, &efx->irq_status);
diff --git a/drivers/net/sfc/falcon.h b/drivers/net/sfc/falcon.h
index 7869c3d74383..77f2e0db7ca1 100644
--- a/drivers/net/sfc/falcon.h
+++ b/drivers/net/sfc/falcon.h
@@ -85,6 +85,8 @@ extern void falcon_set_int_moderation(struct efx_channel *channel);
extern void falcon_disable_interrupts(struct efx_nic *efx);
extern void falcon_fini_interrupt(struct efx_nic *efx);
+#define FALCON_IRQ_MOD_RESOLUTION 5
+
/* Global Resources */
extern int falcon_probe_nic(struct efx_nic *efx);
extern int falcon_probe_resources(struct efx_nic *efx);
diff --git a/drivers/net/sfc/falcon_io.h b/drivers/net/sfc/falcon_io.h
index c16da3149fa9..8883092dae97 100644
--- a/drivers/net/sfc/falcon_io.h
+++ b/drivers/net/sfc/falcon_io.h
@@ -238,18 +238,21 @@ static inline void falcon_writel_page(struct efx_nic *efx, efx_dword_t *value,
/* Write dword to Falcon page-mapped register with an extra lock.
*
* As for falcon_writel_page(), but for a register that suffers from
- * SFC bug 3181. Take out a lock so the BIU collector cannot be
- * confused. */
+ * SFC bug 3181. If writing to page 0, take out a lock so the BIU
+ * collector cannot be confused.
+ */
static inline void falcon_writel_page_locked(struct efx_nic *efx,
efx_dword_t *value,
unsigned int reg,
unsigned int page)
{
- unsigned long flags;
+ unsigned long flags = 0;
- spin_lock_irqsave(&efx->biu_lock, flags);
+ if (page == 0)
+ spin_lock_irqsave(&efx->biu_lock, flags);
falcon_writel(efx, value, FALCON_PAGED_REG(page, reg));
- spin_unlock_irqrestore(&efx->biu_lock, flags);
+ if (page == 0)
+ spin_unlock_irqrestore(&efx->biu_lock, flags);
}
#endif /* EFX_FALCON_IO_H */
diff --git a/drivers/net/sfc/mdio_10g.c b/drivers/net/sfc/mdio_10g.c
index f6a16428113d..9f5ec3eb3418 100644
--- a/drivers/net/sfc/mdio_10g.c
+++ b/drivers/net/sfc/mdio_10g.c
@@ -15,6 +15,22 @@
#include "net_driver.h"
#include "mdio_10g.h"
#include "boards.h"
+#include "workarounds.h"
+
+unsigned mdio_id_oui(u32 id)
+{
+ unsigned oui = 0;
+ int i;
+
+ /* The bits of the OUI are designated a..x, with a=0 and b variable.
+ * In the id register c is the MSB but the OUI is conventionally
+ * written as bytes h..a, p..i, x..q. Reorder the bits accordingly. */
+ for (i = 0; i < 22; ++i)
+ if (id & (1 << (i + 10)))
+ oui |= 1 << (i ^ 7);
+
+ return oui;
+}
int mdio_clause45_reset_mmd(struct efx_nic *port, int mmd,
int spins, int spintime)
@@ -124,24 +140,25 @@ int mdio_clause45_wait_reset_mmds(struct efx_nic *efx,
int mdio_clause45_check_mmds(struct efx_nic *efx,
unsigned int mmd_mask, unsigned int fatal_mask)
{
+ int mmd = 0, probe_mmd, devs0, devs1;
u32 devices;
- int mmd = 0, probe_mmd;
/* Historically we have probed the PHYXS to find out what devices are
* present,but that doesn't work so well if the PHYXS isn't expected
* to exist, if so just find the first item in the list supplied. */
probe_mmd = (mmd_mask & MDIO_MMDREG_DEVS_PHYXS) ? MDIO_MMD_PHYXS :
__ffs(mmd_mask);
- devices = (mdio_clause45_read(efx, efx->mii.phy_id,
- probe_mmd, MDIO_MMDREG_DEVS0) |
- mdio_clause45_read(efx, efx->mii.phy_id,
- probe_mmd, MDIO_MMDREG_DEVS1) << 16);
/* Check all the expected MMDs are present */
- if (devices < 0) {
+ devs0 = mdio_clause45_read(efx, efx->mii.phy_id,
+ probe_mmd, MDIO_MMDREG_DEVS0);
+ devs1 = mdio_clause45_read(efx, efx->mii.phy_id,
+ probe_mmd, MDIO_MMDREG_DEVS1);
+ if (devs0 < 0 || devs1 < 0) {
EFX_ERR(efx, "failed to read devices present\n");
return -EIO;
}
+ devices = devs0 | (devs1 << 16);
if ((devices & mmd_mask) != mmd_mask) {
EFX_ERR(efx, "required MMDs not present: got %x, "
"wanted %x\n", devices, mmd_mask);
@@ -179,17 +196,12 @@ bool mdio_clause45_links_ok(struct efx_nic *efx, unsigned int mmd_mask)
return false;
else if (efx_phy_mode_disabled(efx->phy_mode))
return false;
- else if (efx->loopback_mode == LOOPBACK_PHYXS) {
+ else if (efx->loopback_mode == LOOPBACK_PHYXS)
mmd_mask &= ~(MDIO_MMDREG_DEVS_PHYXS |
MDIO_MMDREG_DEVS_PCS |
MDIO_MMDREG_DEVS_PMAPMD |
MDIO_MMDREG_DEVS_AN);
- if (!mmd_mask) {
- reg = mdio_clause45_read(efx, phy_id, MDIO_MMD_PHYXS,
- MDIO_PHYXS_STATUS2);
- return !(reg & (1 << MDIO_PHYXS_STATUS2_RX_FAULT_LBN));
- }
- } else if (efx->loopback_mode == LOOPBACK_PCS)
+ else if (efx->loopback_mode == LOOPBACK_PCS)
mmd_mask &= ~(MDIO_MMDREG_DEVS_PCS |
MDIO_MMDREG_DEVS_PMAPMD |
MDIO_MMDREG_DEVS_AN);
@@ -197,6 +209,13 @@ bool mdio_clause45_links_ok(struct efx_nic *efx, unsigned int mmd_mask)
mmd_mask &= ~(MDIO_MMDREG_DEVS_PMAPMD |
MDIO_MMDREG_DEVS_AN);
+ if (!mmd_mask) {
+ /* Use presence of XGMII faults in leui of link state */
+ reg = mdio_clause45_read(efx, phy_id, MDIO_MMD_PHYXS,
+ MDIO_PHYXS_STATUS2);
+ return !(reg & (1 << MDIO_PHYXS_STATUS2_RX_FAULT_LBN));
+ }
+
while (mmd_mask) {
if (mmd_mask & 1) {
/* Double reads because link state is latched, and a
@@ -263,7 +282,7 @@ void mdio_clause45_set_mmds_lpower(struct efx_nic *efx,
}
}
-static u32 mdio_clause45_get_an(struct efx_nic *efx, u16 addr, u32 xnp)
+static u32 mdio_clause45_get_an(struct efx_nic *efx, u16 addr)
{
int phy_id = efx->mii.phy_id;
u32 result = 0;
@@ -278,9 +297,6 @@ static u32 mdio_clause45_get_an(struct efx_nic *efx, u16 addr, u32 xnp)
result |= ADVERTISED_100baseT_Half;
if (reg & ADVERTISE_100FULL)
result |= ADVERTISED_100baseT_Full;
- if (reg & LPA_RESV)
- result |= xnp;
-
return result;
}
@@ -310,7 +326,7 @@ void mdio_clause45_get_settings(struct efx_nic *efx,
*/
void mdio_clause45_get_settings_ext(struct efx_nic *efx,
struct ethtool_cmd *ecmd,
- u32 xnp, u32 xnp_lpa)
+ u32 npage_adv, u32 npage_lpa)
{
int phy_id = efx->mii.phy_id;
int reg;
@@ -361,8 +377,8 @@ void mdio_clause45_get_settings_ext(struct efx_nic *efx,
ecmd->autoneg = AUTONEG_ENABLE;
ecmd->advertising |=
ADVERTISED_Autoneg |
- mdio_clause45_get_an(efx,
- MDIO_AN_ADVERTISE, xnp);
+ mdio_clause45_get_an(efx, MDIO_AN_ADVERTISE) |
+ npage_adv;
} else
ecmd->autoneg = AUTONEG_DISABLE;
} else
@@ -371,27 +387,30 @@ void mdio_clause45_get_settings_ext(struct efx_nic *efx,
if (ecmd->autoneg) {
/* If AN is complete, report best common mode,
* otherwise report best advertised mode. */
- u32 common = ecmd->advertising;
+ u32 modes = 0;
if (mdio_clause45_read(efx, phy_id, MDIO_MMD_AN,
MDIO_MMDREG_STAT1) &
- (1 << MDIO_AN_STATUS_AN_DONE_LBN)) {
- common &= mdio_clause45_get_an(efx, MDIO_AN_LPA,
- xnp_lpa);
- }
- if (common & ADVERTISED_10000baseT_Full) {
+ (1 << MDIO_AN_STATUS_AN_DONE_LBN))
+ modes = (ecmd->advertising &
+ (mdio_clause45_get_an(efx, MDIO_AN_LPA) |
+ npage_lpa));
+ if (modes == 0)
+ modes = ecmd->advertising;
+
+ if (modes & ADVERTISED_10000baseT_Full) {
ecmd->speed = SPEED_10000;
ecmd->duplex = DUPLEX_FULL;
- } else if (common & (ADVERTISED_1000baseT_Full |
- ADVERTISED_1000baseT_Half)) {
+ } else if (modes & (ADVERTISED_1000baseT_Full |
+ ADVERTISED_1000baseT_Half)) {
ecmd->speed = SPEED_1000;
- ecmd->duplex = !!(common & ADVERTISED_1000baseT_Full);
- } else if (common & (ADVERTISED_100baseT_Full |
- ADVERTISED_100baseT_Half)) {
+ ecmd->duplex = !!(modes & ADVERTISED_1000baseT_Full);
+ } else if (modes & (ADVERTISED_100baseT_Full |
+ ADVERTISED_100baseT_Half)) {
ecmd->speed = SPEED_100;
- ecmd->duplex = !!(common & ADVERTISED_100baseT_Full);
+ ecmd->duplex = !!(modes & ADVERTISED_100baseT_Full);
} else {
ecmd->speed = SPEED_10;
- ecmd->duplex = !!(common & ADVERTISED_10baseT_Full);
+ ecmd->duplex = !!(modes & ADVERTISED_10baseT_Full);
}
} else {
/* Report forced settings */
@@ -415,7 +434,7 @@ int mdio_clause45_set_settings(struct efx_nic *efx,
int phy_id = efx->mii.phy_id;
struct ethtool_cmd prev;
u32 required;
- int ctrl1_bits, reg;
+ int reg;
efx->phy_op->get_settings(efx, &prev);
@@ -430,99 +449,83 @@ int mdio_clause45_set_settings(struct efx_nic *efx,
if (prev.port != PORT_TP || ecmd->port != PORT_TP)
return -EINVAL;
- /* Check that PHY supports these settings and work out the
- * basic control bits */
- if (ecmd->duplex) {
+ /* Check that PHY supports these settings */
+ if (ecmd->autoneg) {
+ required = SUPPORTED_Autoneg;
+ } else if (ecmd->duplex) {
switch (ecmd->speed) {
- case SPEED_10:
- ctrl1_bits = BMCR_FULLDPLX;
- required = SUPPORTED_10baseT_Full;
- break;
- case SPEED_100:
- ctrl1_bits = BMCR_SPEED100 | BMCR_FULLDPLX;
- required = SUPPORTED_100baseT_Full;
- break;
- case SPEED_1000:
- ctrl1_bits = BMCR_SPEED1000 | BMCR_FULLDPLX;
- required = SUPPORTED_1000baseT_Full;
- break;
- case SPEED_10000:
- ctrl1_bits = (BMCR_SPEED1000 | BMCR_SPEED100 |
- BMCR_FULLDPLX);
- required = SUPPORTED_10000baseT_Full;
- break;
- default:
- return -EINVAL;
+ case SPEED_10: required = SUPPORTED_10baseT_Full; break;
+ case SPEED_100: required = SUPPORTED_100baseT_Full; break;
+ default: return -EINVAL;
}
} else {
switch (ecmd->speed) {
- case SPEED_10:
- ctrl1_bits = 0;
- required = SUPPORTED_10baseT_Half;
- break;
- case SPEED_100:
- ctrl1_bits = BMCR_SPEED100;
- required = SUPPORTED_100baseT_Half;
- break;
- case SPEED_1000:
- ctrl1_bits = BMCR_SPEED1000;
- required = SUPPORTED_1000baseT_Half;
- break;
- default:
- return -EINVAL;
+ case SPEED_10: required = SUPPORTED_10baseT_Half; break;
+ case SPEED_100: required = SUPPORTED_100baseT_Half; break;
+ default: return -EINVAL;
}
}
- if (ecmd->autoneg)
- required |= SUPPORTED_Autoneg;
required |= ecmd->advertising;
if (required & ~prev.supported)
return -EINVAL;
- /* Set the basic control bits */
- reg = mdio_clause45_read(efx, phy_id, MDIO_MMD_PMAPMD,
- MDIO_MMDREG_CTRL1);
- reg &= ~(BMCR_SPEED1000 | BMCR_SPEED100 | BMCR_FULLDPLX | 0x003c);
- reg |= ctrl1_bits;
- mdio_clause45_write(efx, phy_id, MDIO_MMD_PMAPMD, MDIO_MMDREG_CTRL1,
- reg);
-
- /* Set the AN registers */
- if (ecmd->autoneg != prev.autoneg ||
- ecmd->advertising != prev.advertising) {
- bool xnp = false;
-
- if (efx->phy_op->set_xnp_advertise)
- xnp = efx->phy_op->set_xnp_advertise(efx,
- ecmd->advertising);
-
- if (ecmd->autoneg) {
- reg = 0;
- if (ecmd->advertising & ADVERTISED_10baseT_Half)
- reg |= ADVERTISE_10HALF;
- if (ecmd->advertising & ADVERTISED_10baseT_Full)
- reg |= ADVERTISE_10FULL;
- if (ecmd->advertising & ADVERTISED_100baseT_Half)
- reg |= ADVERTISE_100HALF;
- if (ecmd->advertising & ADVERTISED_100baseT_Full)
- reg |= ADVERTISE_100FULL;
- if (xnp)
- reg |= ADVERTISE_RESV;
- mdio_clause45_write(efx, phy_id, MDIO_MMD_AN,
- MDIO_AN_ADVERTISE, reg);
- }
+ if (ecmd->autoneg) {
+ bool xnp = (ecmd->advertising & ADVERTISED_10000baseT_Full
+ || EFX_WORKAROUND_13204(efx));
+
+ /* Set up the base page */
+ reg = ADVERTISE_CSMA;
+ if (ecmd->advertising & ADVERTISED_10baseT_Half)
+ reg |= ADVERTISE_10HALF;
+ if (ecmd->advertising & ADVERTISED_10baseT_Full)
+ reg |= ADVERTISE_10FULL;
+ if (ecmd->advertising & ADVERTISED_100baseT_Half)
+ reg |= ADVERTISE_100HALF;
+ if (ecmd->advertising & ADVERTISED_100baseT_Full)
+ reg |= ADVERTISE_100FULL;
+ if (xnp)
+ reg |= ADVERTISE_RESV;
+ else if (ecmd->advertising & (ADVERTISED_1000baseT_Half |
+ ADVERTISED_1000baseT_Full))
+ reg |= ADVERTISE_NPAGE;
+ reg |= efx_fc_advertise(efx->wanted_fc);
+ mdio_clause45_write(efx, phy_id, MDIO_MMD_AN,
+ MDIO_AN_ADVERTISE, reg);
+ /* Set up the (extended) next page if necessary */
+ if (efx->phy_op->set_npage_adv)
+ efx->phy_op->set_npage_adv(efx, ecmd->advertising);
+
+ /* Enable and restart AN */
reg = mdio_clause45_read(efx, phy_id, MDIO_MMD_AN,
MDIO_MMDREG_CTRL1);
- if (ecmd->autoneg)
- reg |= BMCR_ANENABLE | BMCR_ANRESTART;
- else
- reg &= ~BMCR_ANENABLE;
+ reg |= BMCR_ANENABLE;
+ if (!(EFX_WORKAROUND_15195(efx) &&
+ LOOPBACK_MASK(efx) & efx->phy_op->loopbacks))
+ reg |= BMCR_ANRESTART;
if (xnp)
reg |= 1 << MDIO_AN_CTRL_XNP_LBN;
else
reg &= ~(1 << MDIO_AN_CTRL_XNP_LBN);
mdio_clause45_write(efx, phy_id, MDIO_MMD_AN,
MDIO_MMDREG_CTRL1, reg);
+ } else {
+ /* Disable AN */
+ mdio_clause45_set_flag(efx, phy_id, MDIO_MMD_AN,
+ MDIO_MMDREG_CTRL1,
+ __ffs(BMCR_ANENABLE), false);
+
+ /* Set the basic control bits */
+ reg = mdio_clause45_read(efx, phy_id, MDIO_MMD_PMAPMD,
+ MDIO_MMDREG_CTRL1);
+ reg &= ~(BMCR_SPEED1000 | BMCR_SPEED100 | BMCR_FULLDPLX |
+ 0x003c);
+ if (ecmd->speed == SPEED_100)
+ reg |= BMCR_SPEED100;
+ if (ecmd->duplex)
+ reg |= BMCR_FULLDPLX;
+ mdio_clause45_write(efx, phy_id, MDIO_MMD_PMAPMD,
+ MDIO_MMDREG_CTRL1, reg);
}
return 0;
diff --git a/drivers/net/sfc/mdio_10g.h b/drivers/net/sfc/mdio_10g.h
index 09bf801d0569..7014d2279c20 100644
--- a/drivers/net/sfc/mdio_10g.h
+++ b/drivers/net/sfc/mdio_10g.h
@@ -70,10 +70,10 @@
#define MDIO_MMDREG_STAT1_LPABLE_LBN (1)
#define MDIO_MMDREG_STAT1_LPABLE_WIDTH (1)
-/* Bits in ID reg */
-#define MDIO_ID_REV(_id32) (_id32 & 0xf)
-#define MDIO_ID_MODEL(_id32) ((_id32 >> 4) & 0x3f)
-#define MDIO_ID_OUI(_id32) (_id32 >> 10)
+/* Bits in combined ID regs */
+static inline unsigned mdio_id_rev(u32 id) { return id & 0xf; }
+static inline unsigned mdio_id_model(u32 id) { return (id >> 4) & 0x3f; }
+extern unsigned mdio_id_oui(u32 id);
/* Bits in MMDREG_DEVS0/1. Someone thoughtfully layed things out
* so the 'bit present' bit number of an MMD is the number of
@@ -155,7 +155,8 @@
#define MDIO_AN_XNP 22
#define MDIO_AN_LPA_XNP 25
-#define MDIO_AN_10GBT_ADVERTISE 32
+#define MDIO_AN_10GBT_CTRL 32
+#define MDIO_AN_10GBT_CTRL_ADV_10G_LBN 12
#define MDIO_AN_10GBT_STATUS (33)
#define MDIO_AN_10GBT_STATUS_MS_FLT_LBN (15) /* MASTER/SLAVE config fault */
#define MDIO_AN_10GBT_STATUS_MS_LBN (14) /* MASTER/SLAVE config */
diff --git a/drivers/net/sfc/mtd.c b/drivers/net/sfc/mtd.c
index 665cafb88d6a..820c233c3ea0 100644
--- a/drivers/net/sfc/mtd.c
+++ b/drivers/net/sfc/mtd.c
@@ -15,6 +15,7 @@
#define EFX_DRIVER_NAME "sfc_mtd"
#include "net_driver.h"
#include "spi.h"
+#include "efx.h"
#define EFX_SPI_VERIFY_BUF_LEN 16
diff --git a/drivers/net/sfc/net_driver.h b/drivers/net/sfc/net_driver.h
index 5f255f75754e..e169e5dcd1e6 100644
--- a/drivers/net/sfc/net_driver.h
+++ b/drivers/net/sfc/net_driver.h
@@ -25,15 +25,11 @@
#include <linux/device.h>
#include <linux/highmem.h>
#include <linux/workqueue.h>
-#include <linux/inet_lro.h>
#include <linux/i2c.h>
#include "enum.h"
#include "bitfield.h"
-#define EFX_MAX_LRO_DESCRIPTORS 8
-#define EFX_MAX_LRO_AGGR MAX_SKB_FRAGS
-
/**************************************************************************
*
* Build definitions
@@ -340,13 +336,12 @@ enum efx_rx_alloc_method {
* @eventq_read_ptr: Event queue read pointer
* @last_eventq_read_ptr: Last event queue read pointer value.
* @eventq_magic: Event queue magic value for driver-generated test events
- * @lro_mgr: LRO state
+ * @irq_count: Number of IRQs since last adaptive moderation decision
+ * @irq_mod_score: IRQ moderation score
* @rx_alloc_level: Watermark based heuristic counter for pushing descriptors
* and diagnostic counters
* @rx_alloc_push_pages: RX allocation method currently in use for pushing
* descriptors
- * @rx_alloc_pop_pages: RX allocation method currently in use for popping
- * descriptors
* @n_rx_tobe_disc: Count of RX_TOBE_DISC errors
* @n_rx_ip_frag_err: Count of RX IP fragment errors
* @n_rx_ip_hdr_chksum_err: Count of RX IP header checksum errors
@@ -371,10 +366,11 @@ struct efx_channel {
unsigned int last_eventq_read_ptr;
unsigned int eventq_magic;
- struct net_lro_mgr lro_mgr;
+ unsigned int irq_count;
+ unsigned int irq_mod_score;
+
int rx_alloc_level;
int rx_alloc_push_pages;
- int rx_alloc_pop_pages;
unsigned n_rx_tobe_disc;
unsigned n_rx_ip_frag_err;
@@ -394,13 +390,11 @@ struct efx_channel {
/**
* struct efx_blinker - S/W LED blinking context
- * @led_num: LED ID (board-specific meaning)
* @state: Current state - on or off
* @resubmit: Timer resubmission flag
* @timer: Control timer for blinking
*/
struct efx_blinker {
- int led_num;
bool state;
bool resubmit;
struct timer_list timer;
@@ -413,8 +407,8 @@ struct efx_blinker {
* @major: Major rev. ('A', 'B' ...)
* @minor: Minor rev. (0, 1, ...)
* @init: Initialisation function
- * @init_leds: Sets up board LEDs
- * @set_fault_led: Turns the fault LED on or off
+ * @init_leds: Sets up board LEDs. May be called repeatedly.
+ * @set_id_led: Turns the identification LED on or off
* @blink: Starts/stops blinking
* @monitor: Board-specific health check function
* @fini: Cleanup function
@@ -430,9 +424,9 @@ struct efx_board {
/* As the LEDs are typically attached to the PHY, LEDs
* have a separate init callback that happens later than
* board init. */
- int (*init_leds)(struct efx_nic *efx);
+ void (*init_leds)(struct efx_nic *efx);
+ void (*set_id_led) (struct efx_nic *efx, bool state);
int (*monitor) (struct efx_nic *nic);
- void (*set_fault_led) (struct efx_nic *efx, bool state);
void (*blink) (struct efx_nic *efx, bool start);
void (*fini) (struct efx_nic *nic);
struct efx_blinker blinker;
@@ -459,6 +453,7 @@ enum phy_type {
PHY_TYPE_QT2022C2 = 4,
PHY_TYPE_PM8358 = 6,
PHY_TYPE_SFT9001A = 8,
+ PHY_TYPE_QT2025C = 9,
PHY_TYPE_SFT9001B = 10,
PHY_TYPE_MAX /* Insert any new items before this */
};
@@ -566,7 +561,7 @@ struct efx_mac_operations {
* @poll: Poll for hardware state. Serialised by the mac_lock.
* @get_settings: Get ethtool settings. Serialised by the mac_lock.
* @set_settings: Set ethtool settings. Serialised by the mac_lock.
- * @set_xnp_advertise: Set abilities advertised in Extended Next Page
+ * @set_npage_adv: Set abilities advertised in (Extended) Next Page
* (only needed where AN bit is set in mmds)
* @num_tests: Number of PHY-specific tests/results
* @test_names: Names of the tests/results
@@ -586,7 +581,7 @@ struct efx_phy_operations {
struct ethtool_cmd *ecmd);
int (*set_settings) (struct efx_nic *efx,
struct ethtool_cmd *ecmd);
- bool (*set_xnp_advertise) (struct efx_nic *efx, u32);
+ void (*set_npage_adv) (struct efx_nic *efx, u32);
u32 num_tests;
const char *const *test_names;
int (*run_tests) (struct efx_nic *efx, int *results, unsigned flags);
@@ -713,6 +708,8 @@ union efx_multicast_hash {
* @membase: Memory BAR value
* @biu_lock: BIU (bus interface unit) lock
* @interrupt_mode: Interrupt mode
+ * @irq_rx_adaptive: Adaptive IRQ moderation enabled for RX event queues
+ * @irq_rx_moderation: IRQ moderation time for RX event queues
* @i2c_adap: I2C adapter
* @board_info: Board-level information
* @state: Device state flag. Serialised by the rtnl_lock.
@@ -754,8 +751,7 @@ union efx_multicast_hash {
* &struct net_device_stats.
* @stats_buffer: DMA buffer for statistics
* @stats_lock: Statistics update lock. Serialises statistics fetches
- * @stats_enabled: Temporarily disable statistics fetches.
- * Serialised by @stats_lock
+ * @stats_disable_count: Nest count for disabling statistics fetches
* @mac_op: MAC interface
* @mac_address: Permanent MAC address
* @phy_type: PHY type
@@ -795,6 +791,8 @@ struct efx_nic {
void __iomem *membase;
spinlock_t biu_lock;
enum efx_int_mode interrupt_mode;
+ bool irq_rx_adaptive;
+ unsigned int irq_rx_moderation;
struct i2c_adapter i2c_adap;
struct efx_board board_info;
@@ -837,7 +835,7 @@ struct efx_nic {
struct efx_mac_stats mac_stats;
struct efx_buffer stats_buffer;
spinlock_t stats_lock;
- bool stats_enabled;
+ unsigned int stats_disable_count;
struct efx_mac_operations *mac_op;
unsigned char mac_address[ETH_ALEN];
diff --git a/drivers/net/sfc/phy.h b/drivers/net/sfc/phy.h
index 58c493ef81bb..c1cff9c0c173 100644
--- a/drivers/net/sfc/phy.h
+++ b/drivers/net/sfc/phy.h
@@ -17,14 +17,17 @@ extern struct efx_phy_operations falcon_sfx7101_phy_ops;
extern struct efx_phy_operations falcon_sft9001_phy_ops;
extern void tenxpress_phy_blink(struct efx_nic *efx, bool blink);
-extern void tenxpress_crc_err(struct efx_nic *efx);
+
+/* Wait for the PHY to boot. Return 0 on success, -EINVAL if the PHY failed
+ * to boot due to corrupt flash, or some other negative error code. */
+extern int sft9001_wait_boot(struct efx_nic *efx);
/****************************************************************************
- * Exported functions from the driver for XFP optical PHYs
+ * AMCC/Quake QT20xx PHYs
*/
extern struct efx_phy_operations falcon_xfp_phy_ops;
-/* The QUAKE XFP PHY provides various H/W control states for LEDs */
+/* These PHYs provide various H/W control states for LEDs */
#define QUAKE_LED_LINK_INVAL (0)
#define QUAKE_LED_LINK_STAT (1)
#define QUAKE_LED_LINK_ACT (2)
diff --git a/drivers/net/sfc/rx.c b/drivers/net/sfc/rx.c
index b8ba4bbad889..66d7fe3db3e6 100644
--- a/drivers/net/sfc/rx.c
+++ b/drivers/net/sfc/rx.c
@@ -99,109 +99,6 @@ static inline unsigned int efx_rx_buf_size(struct efx_nic *efx)
}
-/**************************************************************************
- *
- * Linux generic LRO handling
- *
- **************************************************************************
- */
-
-static int efx_lro_get_skb_hdr(struct sk_buff *skb, void **ip_hdr,
- void **tcpudp_hdr, u64 *hdr_flags, void *priv)
-{
- struct efx_channel *channel = priv;
- struct iphdr *iph;
- struct tcphdr *th;
-
- iph = (struct iphdr *)skb->data;
- if (skb->protocol != htons(ETH_P_IP) || iph->protocol != IPPROTO_TCP)
- goto fail;
-
- th = (struct tcphdr *)(skb->data + iph->ihl * 4);
-
- *tcpudp_hdr = th;
- *ip_hdr = iph;
- *hdr_flags = LRO_IPV4 | LRO_TCP;
-
- channel->rx_alloc_level += RX_ALLOC_FACTOR_LRO;
- return 0;
-fail:
- channel->rx_alloc_level += RX_ALLOC_FACTOR_SKB;
- return -1;
-}
-
-static int efx_get_frag_hdr(struct skb_frag_struct *frag, void **mac_hdr,
- void **ip_hdr, void **tcpudp_hdr, u64 *hdr_flags,
- void *priv)
-{
- struct efx_channel *channel = priv;
- struct ethhdr *eh;
- struct iphdr *iph;
-
- /* We support EtherII and VLAN encapsulated IPv4 */
- eh = page_address(frag->page) + frag->page_offset;
- *mac_hdr = eh;
-
- if (eh->h_proto == htons(ETH_P_IP)) {
- iph = (struct iphdr *)(eh + 1);
- } else {
- struct vlan_ethhdr *veh = (struct vlan_ethhdr *)eh;
- if (veh->h_vlan_encapsulated_proto != htons(ETH_P_IP))
- goto fail;
-
- iph = (struct iphdr *)(veh + 1);
- }
- *ip_hdr = iph;
-
- /* We can only do LRO over TCP */
- if (iph->protocol != IPPROTO_TCP)
- goto fail;
-
- *hdr_flags = LRO_IPV4 | LRO_TCP;
- *tcpudp_hdr = (struct tcphdr *)((u8 *) iph + iph->ihl * 4);
-
- channel->rx_alloc_level += RX_ALLOC_FACTOR_LRO;
- return 0;
- fail:
- channel->rx_alloc_level += RX_ALLOC_FACTOR_SKB;
- return -1;
-}
-
-int efx_lro_init(struct net_lro_mgr *lro_mgr, struct efx_nic *efx)
-{
- size_t s = sizeof(struct net_lro_desc) * EFX_MAX_LRO_DESCRIPTORS;
- struct net_lro_desc *lro_arr;
-
- /* Allocate the LRO descriptors structure */
- lro_arr = kzalloc(s, GFP_KERNEL);
- if (lro_arr == NULL)
- return -ENOMEM;
-
- lro_mgr->lro_arr = lro_arr;
- lro_mgr->max_desc = EFX_MAX_LRO_DESCRIPTORS;
- lro_mgr->max_aggr = EFX_MAX_LRO_AGGR;
- lro_mgr->frag_align_pad = EFX_PAGE_SKB_ALIGN;
-
- lro_mgr->get_skb_header = efx_lro_get_skb_hdr;
- lro_mgr->get_frag_header = efx_get_frag_hdr;
- lro_mgr->dev = efx->net_dev;
-
- lro_mgr->features = LRO_F_NAPI;
-
- /* We can pass packets up with the checksum intact */
- lro_mgr->ip_summed = CHECKSUM_UNNECESSARY;
-
- lro_mgr->ip_summed_aggr = CHECKSUM_UNNECESSARY;
-
- return 0;
-}
-
-void efx_lro_fini(struct net_lro_mgr *lro_mgr)
-{
- kfree(lro_mgr->lro_arr);
- lro_mgr->lro_arr = NULL;
-}
-
/**
* efx_init_rx_buffer_skb - create new RX buffer using skb-based allocation
*
@@ -549,77 +446,31 @@ static void efx_rx_packet__check_len(struct efx_rx_queue *rx_queue,
static void efx_rx_packet_lro(struct efx_channel *channel,
struct efx_rx_buffer *rx_buf)
{
- struct net_lro_mgr *lro_mgr = &channel->lro_mgr;
- void *priv = channel;
+ struct napi_struct *napi = &channel->napi_str;
/* Pass the skb/page into the LRO engine */
if (rx_buf->page) {
- struct skb_frag_struct frags;
+ struct napi_gro_fraginfo info;
- frags.page = rx_buf->page;
- frags.page_offset = efx_rx_buf_offset(rx_buf);
- frags.size = rx_buf->len;
+ info.frags[0].page = rx_buf->page;
+ info.frags[0].page_offset = efx_rx_buf_offset(rx_buf);
+ info.frags[0].size = rx_buf->len;
+ info.nr_frags = 1;
+ info.ip_summed = CHECKSUM_UNNECESSARY;
+ info.len = rx_buf->len;
- lro_receive_frags(lro_mgr, &frags, rx_buf->len,
- rx_buf->len, priv, 0);
+ napi_gro_frags(napi, &info);
EFX_BUG_ON_PARANOID(rx_buf->skb);
rx_buf->page = NULL;
} else {
EFX_BUG_ON_PARANOID(!rx_buf->skb);
- lro_receive_skb(lro_mgr, rx_buf->skb, priv);
+ napi_gro_receive(napi, rx_buf->skb);
rx_buf->skb = NULL;
}
}
-/* Allocate and construct an SKB around a struct page.*/
-static struct sk_buff *efx_rx_mk_skb(struct efx_rx_buffer *rx_buf,
- struct efx_nic *efx,
- int hdr_len)
-{
- struct sk_buff *skb;
-
- /* Allocate an SKB to store the headers */
- skb = netdev_alloc_skb(efx->net_dev, hdr_len + EFX_PAGE_SKB_ALIGN);
- if (unlikely(skb == NULL)) {
- EFX_ERR_RL(efx, "RX out of memory for skb\n");
- return NULL;
- }
-
- EFX_BUG_ON_PARANOID(skb_shinfo(skb)->nr_frags);
- EFX_BUG_ON_PARANOID(rx_buf->len < hdr_len);
-
- skb->ip_summed = CHECKSUM_UNNECESSARY;
- skb_reserve(skb, EFX_PAGE_SKB_ALIGN);
-
- skb->len = rx_buf->len;
- skb->truesize = rx_buf->len + sizeof(struct sk_buff);
- memcpy(skb->data, rx_buf->data, hdr_len);
- skb->tail += hdr_len;
-
- /* Append the remaining page onto the frag list */
- if (unlikely(rx_buf->len > hdr_len)) {
- struct skb_frag_struct *frag = skb_shinfo(skb)->frags;
- frag->page = rx_buf->page;
- frag->page_offset = efx_rx_buf_offset(rx_buf) + hdr_len;
- frag->size = skb->len - hdr_len;
- skb_shinfo(skb)->nr_frags = 1;
- skb->data_len = frag->size;
- } else {
- __free_pages(rx_buf->page, efx->rx_buffer_order);
- skb->data_len = 0;
- }
-
- /* Ownership has transferred from the rx_buf to skb */
- rx_buf->page = NULL;
-
- /* Move past the ethernet header */
- skb->protocol = eth_type_trans(skb, efx->net_dev);
-
- return skb;
-}
-
void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index,
unsigned int len, bool checksummed, bool discard)
{
@@ -687,7 +538,6 @@ void __efx_rx_packet(struct efx_channel *channel,
{
struct efx_nic *efx = channel->efx;
struct sk_buff *skb;
- bool lro = !!(efx->net_dev->features & NETIF_F_LRO);
/* If we're in loopback test, then pass the packet directly to the
* loopback layer, and free the rx_buf here
@@ -709,41 +559,23 @@ void __efx_rx_packet(struct efx_channel *channel,
efx->net_dev);
}
- /* Both our generic-LRO and SFC-SSR support skb and page based
- * allocation, but neither support switching from one to the
- * other on the fly. If we spot that the allocation mode has
- * changed, then flush the LRO state.
- */
- if (unlikely(channel->rx_alloc_pop_pages != (rx_buf->page != NULL))) {
- efx_flush_lro(channel);
- channel->rx_alloc_pop_pages = (rx_buf->page != NULL);
- }
- if (likely(checksummed && lro)) {
+ if (likely(checksummed || rx_buf->page)) {
efx_rx_packet_lro(channel, rx_buf);
goto done;
}
- /* Form an skb if required */
- if (rx_buf->page) {
- int hdr_len = min(rx_buf->len, EFX_SKB_HEADERS);
- skb = efx_rx_mk_skb(rx_buf, efx, hdr_len);
- if (unlikely(skb == NULL)) {
- efx_free_rx_buffer(efx, rx_buf);
- goto done;
- }
- } else {
- /* We now own the SKB */
- skb = rx_buf->skb;
- rx_buf->skb = NULL;
- }
+ /* We now own the SKB */
+ skb = rx_buf->skb;
+ rx_buf->skb = NULL;
EFX_BUG_ON_PARANOID(rx_buf->page);
EFX_BUG_ON_PARANOID(rx_buf->skb);
EFX_BUG_ON_PARANOID(!skb);
/* Set the SKB flags */
- if (unlikely(!checksummed || !efx->rx_checksum_enabled))
- skb->ip_summed = CHECKSUM_NONE;
+ skb->ip_summed = CHECKSUM_NONE;
+
+ skb_record_rx_queue(skb, channel->channel);
/* Pass the packet up */
netif_receive_skb(skb);
@@ -760,7 +592,7 @@ void efx_rx_strategy(struct efx_channel *channel)
enum efx_rx_alloc_method method = rx_alloc_method;
/* Only makes sense to use page based allocation if LRO is enabled */
- if (!(channel->efx->net_dev->features & NETIF_F_LRO)) {
+ if (!(channel->efx->net_dev->features & NETIF_F_GRO)) {
method = RX_ALLOC_METHOD_SKB;
} else if (method == RX_ALLOC_METHOD_AUTO) {
/* Constrain the rx_alloc_level */
@@ -865,11 +697,6 @@ void efx_remove_rx_queue(struct efx_rx_queue *rx_queue)
rx_queue->buffer = NULL;
}
-void efx_flush_lro(struct efx_channel *channel)
-{
- lro_flush_all(&channel->lro_mgr);
-}
-
module_param(rx_alloc_method, int, 0644);
MODULE_PARM_DESC(rx_alloc_method, "Allocation method used for RX buffers");
diff --git a/drivers/net/sfc/rx.h b/drivers/net/sfc/rx.h
index 0e88a9ddc1c6..42ee7555a80b 100644
--- a/drivers/net/sfc/rx.h
+++ b/drivers/net/sfc/rx.h
@@ -17,9 +17,6 @@ void efx_remove_rx_queue(struct efx_rx_queue *rx_queue);
void efx_init_rx_queue(struct efx_rx_queue *rx_queue);
void efx_fini_rx_queue(struct efx_rx_queue *rx_queue);
-int efx_lro_init(struct net_lro_mgr *lro_mgr, struct efx_nic *efx);
-void efx_lro_fini(struct net_lro_mgr *lro_mgr);
-void efx_flush_lro(struct efx_channel *channel);
void efx_rx_strategy(struct efx_channel *channel);
void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue);
void efx_rx_work(struct work_struct *data);
diff --git a/drivers/net/sfc/selftest.c b/drivers/net/sfc/selftest.c
index dba0d64d50cd..0a598084c513 100644
--- a/drivers/net/sfc/selftest.c
+++ b/drivers/net/sfc/selftest.c
@@ -665,6 +665,7 @@ int efx_selftest(struct efx_nic *efx, struct efx_self_tests *tests,
{
enum efx_loopback_mode loopback_mode = efx->loopback_mode;
int phy_mode = efx->phy_mode;
+ enum reset_type reset_method = RESET_TYPE_INVISIBLE;
struct ethtool_cmd ecmd;
struct efx_channel *channel;
int rc_test = 0, rc_reset = 0, rc;
@@ -718,21 +719,21 @@ int efx_selftest(struct efx_nic *efx, struct efx_self_tests *tests,
mutex_unlock(&efx->mac_lock);
/* free up all consumers of SRAM (including all the queues) */
- efx_reset_down(efx, &ecmd);
+ efx_reset_down(efx, reset_method, &ecmd);
rc = efx_test_chip(efx, tests);
if (rc && !rc_test)
rc_test = rc;
/* reset the chip to recover from the register test */
- rc_reset = falcon_reset_hw(efx, RESET_TYPE_ALL);
+ rc_reset = falcon_reset_hw(efx, reset_method);
/* Ensure that the phy is powered and out of loopback
* for the bist and loopback tests */
efx->phy_mode &= ~PHY_MODE_LOW_POWER;
efx->loopback_mode = LOOPBACK_NONE;
- rc = efx_reset_up(efx, &ecmd, rc_reset == 0);
+ rc = efx_reset_up(efx, reset_method, &ecmd, rc_reset == 0);
if (rc && !rc_reset)
rc_reset = rc;
diff --git a/drivers/net/sfc/sfe4001.c b/drivers/net/sfc/sfe4001.c
index 16b80acb9992..4eac5da81e5a 100644
--- a/drivers/net/sfc/sfe4001.c
+++ b/drivers/net/sfc/sfe4001.c
@@ -24,6 +24,7 @@
*/
#include <linux/delay.h>
+#include <linux/rtnetlink.h>
#include "net_driver.h"
#include "efx.h"
#include "phy.h"
@@ -186,19 +187,22 @@ static int sfn4111t_reset(struct efx_nic *efx)
{
efx_oword_t reg;
- /* GPIO pins are also used for I2C, so block that temporarily */
+ /* GPIO 3 and the GPIO register are shared with I2C, so block that */
mutex_lock(&efx->i2c_adap.bus_lock);
+ /* Pull RST_N (GPIO 2) low then let it up again, setting the
+ * FLASH_CFG_1 strap (GPIO 3) appropriately. Only change the
+ * output enables; the output levels should always be 0 (low)
+ * and we rely on external pull-ups. */
falcon_read(efx, &reg, GPIO_CTL_REG_KER);
EFX_SET_OWORD_FIELD(reg, GPIO2_OEN, true);
- EFX_SET_OWORD_FIELD(reg, GPIO2_OUT, false);
falcon_write(efx, &reg, GPIO_CTL_REG_KER);
msleep(1000);
- EFX_SET_OWORD_FIELD(reg, GPIO2_OUT, true);
- EFX_SET_OWORD_FIELD(reg, GPIO3_OEN, true);
- EFX_SET_OWORD_FIELD(reg, GPIO3_OUT,
- !(efx->phy_mode & PHY_MODE_SPECIAL));
+ EFX_SET_OWORD_FIELD(reg, GPIO2_OEN, false);
+ EFX_SET_OWORD_FIELD(reg, GPIO3_OEN,
+ !!(efx->phy_mode & PHY_MODE_SPECIAL));
falcon_write(efx, &reg, GPIO_CTL_REG_KER);
+ msleep(1);
mutex_unlock(&efx->i2c_adap.bus_lock);
@@ -232,12 +236,18 @@ static ssize_t set_phy_flash_cfg(struct device *dev,
} else if (efx->state != STATE_RUNNING || netif_running(efx->net_dev)) {
err = -EBUSY;
} else {
+ /* Reset the PHY, reconfigure the MAC and enable/disable
+ * MAC stats accordingly. */
efx->phy_mode = new_mode;
+ if (new_mode & PHY_MODE_SPECIAL)
+ efx_stats_disable(efx);
if (efx->board_info.type == EFX_BOARD_SFE4001)
err = sfe4001_poweron(efx);
else
err = sfn4111t_reset(efx);
efx_reconfigure_port(efx);
+ if (!(new_mode & PHY_MODE_SPECIAL))
+ efx_stats_enable(efx);
}
rtnl_unlock();
@@ -326,6 +336,11 @@ int sfe4001_init(struct efx_nic *efx)
efx->board_info.monitor = sfe4001_check_hw;
efx->board_info.fini = sfe4001_fini;
+ if (efx->phy_mode & PHY_MODE_SPECIAL) {
+ /* PHY won't generate a 156.25 MHz clock and MAC stats fetch
+ * will fail. */
+ efx_stats_disable(efx);
+ }
rc = sfe4001_poweron(efx);
if (rc)
goto fail_ioexp;
@@ -372,17 +387,26 @@ static void sfn4111t_fini(struct efx_nic *efx)
i2c_unregister_device(efx->board_info.hwmon_client);
}
-static struct i2c_board_info sfn4111t_hwmon_info = {
+static struct i2c_board_info sfn4111t_a0_hwmon_info = {
I2C_BOARD_INFO("max6647", 0x4e),
.irq = -1,
};
+static struct i2c_board_info sfn4111t_r5_hwmon_info = {
+ I2C_BOARD_INFO("max6646", 0x4d),
+ .irq = -1,
+};
+
int sfn4111t_init(struct efx_nic *efx)
{
+ int i = 0;
int rc;
efx->board_info.hwmon_client =
- i2c_new_device(&efx->i2c_adap, &sfn4111t_hwmon_info);
+ i2c_new_device(&efx->i2c_adap,
+ (efx->board_info.minor < 5) ?
+ &sfn4111t_a0_hwmon_info :
+ &sfn4111t_r5_hwmon_info);
if (!efx->board_info.hwmon_client)
return -EIO;
@@ -394,11 +418,20 @@ int sfn4111t_init(struct efx_nic *efx)
if (rc)
goto fail_hwmon;
- if (efx->phy_mode & PHY_MODE_SPECIAL)
- sfn4111t_reset(efx);
-
- return 0;
+ do {
+ if (efx->phy_mode & PHY_MODE_SPECIAL) {
+ /* PHY may not generate a 156.25 MHz clock and MAC
+ * stats fetch will fail. */
+ efx_stats_disable(efx);
+ sfn4111t_reset(efx);
+ }
+ rc = sft9001_wait_boot(efx);
+ if (rc == 0)
+ return 0;
+ efx->phy_mode = PHY_MODE_SPECIAL;
+ } while (rc == -EINVAL && ++i < 2);
+ device_remove_file(&efx->pci_dev->dev, &dev_attr_phy_flash_cfg);
fail_hwmon:
i2c_unregister_device(efx->board_info.hwmon_client);
return rc;
diff --git a/drivers/net/sfc/tenxpress.c b/drivers/net/sfc/tenxpress.c
index 9ecb77da9545..e61dc4d4741c 100644
--- a/drivers/net/sfc/tenxpress.c
+++ b/drivers/net/sfc/tenxpress.c
@@ -8,6 +8,7 @@
*/
#include <linux/delay.h>
+#include <linux/rtnetlink.h>
#include <linux/seq_file.h>
#include "efx.h"
#include "mdio_10g.h"
@@ -67,6 +68,8 @@
#define PMA_PMD_EXT_CLK312_WIDTH 1
#define PMA_PMD_EXT_LPOWER_LBN 12
#define PMA_PMD_EXT_LPOWER_WIDTH 1
+#define PMA_PMD_EXT_ROBUST_LBN 14
+#define PMA_PMD_EXT_ROBUST_WIDTH 1
#define PMA_PMD_EXT_SSR_LBN 15
#define PMA_PMD_EXT_SSR_WIDTH 1
@@ -155,14 +158,16 @@
#define PCS_10GBASET_BLKLK_WIDTH 1
/* Boot status register */
-#define PCS_BOOT_STATUS_REG 53248
-#define PCS_BOOT_FATAL_ERR_LBN (0)
-#define PCS_BOOT_PROGRESS_LBN (1)
-#define PCS_BOOT_PROGRESS_WIDTH (2)
-#define PCS_BOOT_COMPLETE_LBN (3)
-
-#define PCS_BOOT_MAX_DELAY (100)
-#define PCS_BOOT_POLL_DELAY (10)
+#define PCS_BOOT_STATUS_REG 53248
+#define PCS_BOOT_FATAL_ERROR_LBN 0
+#define PCS_BOOT_PROGRESS_LBN 1
+#define PCS_BOOT_PROGRESS_WIDTH 2
+#define PCS_BOOT_PROGRESS_INIT 0
+#define PCS_BOOT_PROGRESS_WAIT_MDIO 1
+#define PCS_BOOT_PROGRESS_CHECKSUM 2
+#define PCS_BOOT_PROGRESS_JUMP 3
+#define PCS_BOOT_DOWNLOAD_WAIT_LBN 3
+#define PCS_BOOT_CODE_STARTED_LBN 4
/* 100M/1G PHY registers */
#define GPHY_XCONTROL_REG 49152
@@ -177,35 +182,24 @@
#define C22EXT_STATUS_LINK_LBN 2
#define C22EXT_STATUS_LINK_WIDTH 1
-#define C22EXT_MSTSLV_REG 49162
-#define C22EXT_MSTSLV_1000_HD_LBN 10
-#define C22EXT_MSTSLV_1000_HD_WIDTH 1
-#define C22EXT_MSTSLV_1000_FD_LBN 11
-#define C22EXT_MSTSLV_1000_FD_WIDTH 1
+#define C22EXT_MSTSLV_CTRL 49161
+#define C22EXT_MSTSLV_CTRL_ADV_1000_HD_LBN 8
+#define C22EXT_MSTSLV_CTRL_ADV_1000_FD_LBN 9
+
+#define C22EXT_MSTSLV_STATUS 49162
+#define C22EXT_MSTSLV_STATUS_LP_1000_HD_LBN 10
+#define C22EXT_MSTSLV_STATUS_LP_1000_FD_LBN 11
/* Time to wait between powering down the LNPGA and turning off the power
* rails */
#define LNPGA_PDOWN_WAIT (HZ / 5)
-static int crc_error_reset_threshold = 100;
-module_param(crc_error_reset_threshold, int, 0644);
-MODULE_PARM_DESC(crc_error_reset_threshold,
- "Max number of CRC errors before XAUI reset");
-
struct tenxpress_phy_data {
enum efx_loopback_mode loopback_mode;
- atomic_t bad_crc_count;
enum efx_phy_mode phy_mode;
int bad_lp_tries;
};
-void tenxpress_crc_err(struct efx_nic *efx)
-{
- struct tenxpress_phy_data *phy_data = efx->phy_data;
- if (phy_data != NULL)
- atomic_inc(&phy_data->bad_crc_count);
-}
-
static ssize_t show_phy_short_reach(struct device *dev,
struct device_attribute *attr, char *buf)
{
@@ -238,40 +232,62 @@ static ssize_t set_phy_short_reach(struct device *dev,
static DEVICE_ATTR(phy_short_reach, 0644, show_phy_short_reach,
set_phy_short_reach);
-/* Check that the C166 has booted successfully */
-static int tenxpress_phy_check(struct efx_nic *efx)
+int sft9001_wait_boot(struct efx_nic *efx)
{
- int phy_id = efx->mii.phy_id;
- int count = PCS_BOOT_MAX_DELAY / PCS_BOOT_POLL_DELAY;
+ unsigned long timeout = jiffies + HZ + 1;
int boot_stat;
- /* Wait for the boot to complete (or not) */
- while (count) {
- boot_stat = mdio_clause45_read(efx, phy_id,
+ for (;;) {
+ boot_stat = mdio_clause45_read(efx, efx->mii.phy_id,
MDIO_MMD_PCS,
PCS_BOOT_STATUS_REG);
- if (boot_stat & (1 << PCS_BOOT_COMPLETE_LBN))
- break;
- count--;
- udelay(PCS_BOOT_POLL_DELAY);
- }
+ if (boot_stat >= 0) {
+ EFX_LOG(efx, "PHY boot status = %#x\n", boot_stat);
+ switch (boot_stat &
+ ((1 << PCS_BOOT_FATAL_ERROR_LBN) |
+ (3 << PCS_BOOT_PROGRESS_LBN) |
+ (1 << PCS_BOOT_DOWNLOAD_WAIT_LBN) |
+ (1 << PCS_BOOT_CODE_STARTED_LBN))) {
+ case ((1 << PCS_BOOT_FATAL_ERROR_LBN) |
+ (PCS_BOOT_PROGRESS_CHECKSUM <<
+ PCS_BOOT_PROGRESS_LBN)):
+ case ((1 << PCS_BOOT_FATAL_ERROR_LBN) |
+ (PCS_BOOT_PROGRESS_INIT <<
+ PCS_BOOT_PROGRESS_LBN) |
+ (1 << PCS_BOOT_DOWNLOAD_WAIT_LBN)):
+ return -EINVAL;
+ case ((PCS_BOOT_PROGRESS_WAIT_MDIO <<
+ PCS_BOOT_PROGRESS_LBN) |
+ (1 << PCS_BOOT_DOWNLOAD_WAIT_LBN)):
+ return (efx->phy_mode & PHY_MODE_SPECIAL) ?
+ 0 : -EIO;
+ case ((PCS_BOOT_PROGRESS_JUMP <<
+ PCS_BOOT_PROGRESS_LBN) |
+ (1 << PCS_BOOT_CODE_STARTED_LBN)):
+ case ((PCS_BOOT_PROGRESS_JUMP <<
+ PCS_BOOT_PROGRESS_LBN) |
+ (1 << PCS_BOOT_DOWNLOAD_WAIT_LBN) |
+ (1 << PCS_BOOT_CODE_STARTED_LBN)):
+ return (efx->phy_mode & PHY_MODE_SPECIAL) ?
+ -EIO : 0;
+ default:
+ if (boot_stat & (1 << PCS_BOOT_FATAL_ERROR_LBN))
+ return -EIO;
+ break;
+ }
+ }
- if (!count) {
- EFX_ERR(efx, "%s: PHY boot timed out. Last status "
- "%x\n", __func__,
- (boot_stat >> PCS_BOOT_PROGRESS_LBN) &
- ((1 << PCS_BOOT_PROGRESS_WIDTH) - 1));
- return -ETIMEDOUT;
- }
+ if (time_after_eq(jiffies, timeout))
+ return -ETIMEDOUT;
- return 0;
+ msleep(50);
+ }
}
static int tenxpress_init(struct efx_nic *efx)
{
int phy_id = efx->mii.phy_id;
int reg;
- int rc;
if (efx->phy_type == PHY_TYPE_SFX7101) {
/* Enable 312.5 MHz clock */
@@ -284,7 +300,9 @@ static int tenxpress_init(struct efx_nic *efx)
PMA_PMD_XCONTROL_REG);
reg |= ((1 << PMA_PMD_EXT_GMII_EN_LBN) |
(1 << PMA_PMD_EXT_CLK_OUT_LBN) |
- (1 << PMA_PMD_EXT_CLK312_LBN));
+ (1 << PMA_PMD_EXT_CLK312_LBN) |
+ (1 << PMA_PMD_EXT_ROBUST_LBN));
+
mdio_clause45_write(efx, phy_id, MDIO_MMD_PMAPMD,
PMA_PMD_XCONTROL_REG, reg);
mdio_clause45_set_flag(efx, phy_id, MDIO_MMD_C22EXT,
@@ -292,10 +310,6 @@ static int tenxpress_init(struct efx_nic *efx)
false);
}
- rc = tenxpress_phy_check(efx);
- if (rc < 0)
- return rc;
-
/* Set the LEDs up as: Green = Link, Amber = Link/Act, Red = Off */
if (efx->phy_type == PHY_TYPE_SFX7101) {
mdio_clause45_set_flag(efx, phy_id, MDIO_MMD_PMAPMD,
@@ -306,7 +320,7 @@ static int tenxpress_init(struct efx_nic *efx)
PMA_PMD_LED_OVERR_REG, PMA_PMD_LED_DEFAULT);
}
- return rc;
+ return 0;
}
static int tenxpress_phy_init(struct efx_nic *efx)
@@ -346,6 +360,7 @@ static int tenxpress_phy_init(struct efx_nic *efx)
rc = tenxpress_init(efx);
if (rc < 0)
goto fail;
+ mdio_clause45_set_pause(efx);
if (efx->phy_type == PHY_TYPE_SFT9001B) {
rc = device_create_file(&efx->pci_dev->dev,
@@ -376,8 +391,8 @@ static int tenxpress_special_reset(struct efx_nic *efx)
/* The XGMAC clock is driven from the SFC7101/SFT9001 312MHz clock, so
* a special software reset can glitch the XGMAC sufficiently for stats
- * requests to fail. Since we don't often special_reset, just lock. */
- spin_lock(&efx->stats_lock);
+ * requests to fail. */
+ efx_stats_disable(efx);
/* Initiate reset */
reg = mdio_clause45_read(efx, efx->mii.phy_id,
@@ -392,17 +407,17 @@ static int tenxpress_special_reset(struct efx_nic *efx)
rc = mdio_clause45_wait_reset_mmds(efx,
TENXPRESS_REQUIRED_DEVS);
if (rc < 0)
- goto unlock;
+ goto out;
/* Try and reconfigure the device */
rc = tenxpress_init(efx);
if (rc < 0)
- goto unlock;
+ goto out;
/* Wait for the XGXS state machine to churn */
mdelay(10);
-unlock:
- spin_unlock(&efx->stats_lock);
+out:
+ efx_stats_enable(efx);
return rc;
}
@@ -520,7 +535,7 @@ static void tenxpress_phy_reconfigure(struct efx_nic *efx)
{
struct tenxpress_phy_data *phy_data = efx->phy_data;
struct ethtool_cmd ecmd;
- bool phy_mode_change, loop_reset, loop_toggle, loopback;
+ bool phy_mode_change, loop_reset;
if (efx->phy_mode & (PHY_MODE_OFF | PHY_MODE_SPECIAL)) {
phy_data->phy_mode = efx->phy_mode;
@@ -531,12 +546,10 @@ static void tenxpress_phy_reconfigure(struct efx_nic *efx)
phy_mode_change = (efx->phy_mode == PHY_MODE_NORMAL &&
phy_data->phy_mode != PHY_MODE_NORMAL);
- loopback = LOOPBACK_MASK(efx) & efx->phy_op->loopbacks;
- loop_toggle = LOOPBACK_CHANGED(phy_data, efx, efx->phy_op->loopbacks);
loop_reset = (LOOPBACK_OUT_OF(phy_data, efx, efx->phy_op->loopbacks) ||
LOOPBACK_CHANGED(phy_data, efx, 1 << LOOPBACK_GPHY));
- if (loop_reset || loop_toggle || loopback || phy_mode_change) {
+ if (loop_reset || phy_mode_change) {
int rc;
efx->phy_op->get_settings(efx, &ecmd);
@@ -551,20 +564,6 @@ static void tenxpress_phy_reconfigure(struct efx_nic *efx)
falcon_reset_xaui(efx);
}
- if (efx->phy_type != PHY_TYPE_SFX7101) {
- /* Only change autoneg once, on coming out or
- * going into loopback */
- if (loop_toggle)
- ecmd.autoneg = !loopback;
- if (loopback) {
- ecmd.duplex = DUPLEX_FULL;
- if (efx->loopback_mode == LOOPBACK_GPHY)
- ecmd.speed = SPEED_1000;
- else
- ecmd.speed = SPEED_10000;
- }
- }
-
rc = efx->phy_op->set_settings(efx, &ecmd);
WARN_ON(rc);
}
@@ -593,15 +592,14 @@ static void tenxpress_phy_reconfigure(struct efx_nic *efx)
static void tenxpress_phy_poll(struct efx_nic *efx)
{
struct tenxpress_phy_data *phy_data = efx->phy_data;
- bool change = false, link_ok;
- unsigned link_fc;
+ bool change = false;
if (efx->phy_type == PHY_TYPE_SFX7101) {
- link_ok = sfx7101_link_ok(efx);
+ bool link_ok = sfx7101_link_ok(efx);
if (link_ok != efx->link_up) {
change = true;
} else {
- link_fc = mdio_clause45_get_pause(efx);
+ unsigned int link_fc = mdio_clause45_get_pause(efx);
if (link_fc != efx->link_fc)
change = true;
}
@@ -623,13 +621,6 @@ static void tenxpress_phy_poll(struct efx_nic *efx)
if (phy_data->phy_mode != PHY_MODE_NORMAL)
return;
-
- if (EFX_WORKAROUND_10750(efx) &&
- atomic_read(&phy_data->bad_crc_count) > crc_error_reset_threshold) {
- EFX_ERR(efx, "Resetting XAUI due to too many CRC errors\n");
- falcon_reset_xaui(efx);
- atomic_set(&phy_data->bad_crc_count, 0);
- }
}
static void tenxpress_phy_fini(struct efx_nic *efx)
@@ -708,12 +699,10 @@ static int sft9001_run_tests(struct efx_nic *efx, int *results, unsigned flags)
{
struct ethtool_cmd ecmd;
int phy_id = efx->mii.phy_id;
- int rc = 0, rc2, i, res_reg;
-
- if (!(flags & ETH_TEST_FL_OFFLINE))
- return 0;
+ int rc = 0, rc2, i, ctrl_reg, res_reg;
- efx->phy_op->get_settings(efx, &ecmd);
+ if (flags & ETH_TEST_FL_OFFLINE)
+ efx->phy_op->get_settings(efx, &ecmd);
/* Initialise cable diagnostic results to unknown failure */
for (i = 1; i < 9; ++i)
@@ -721,18 +710,22 @@ static int sft9001_run_tests(struct efx_nic *efx, int *results, unsigned flags)
/* Run cable diagnostics; wait up to 5 seconds for them to complete.
* A cable fault is not a self-test failure, but a timeout is. */
+ ctrl_reg = ((1 << CDIAG_CTRL_IMMED_LBN) |
+ (CDIAG_CTRL_LEN_METRES << CDIAG_CTRL_LEN_UNIT_LBN));
+ if (flags & ETH_TEST_FL_OFFLINE) {
+ /* Break the link in order to run full diagnostics. We
+ * must reset the PHY to resume normal service. */
+ ctrl_reg |= (1 << CDIAG_CTRL_BRK_LINK_LBN);
+ }
mdio_clause45_write(efx, phy_id, MDIO_MMD_PMAPMD,
- PMA_PMD_CDIAG_CTRL_REG,
- (1 << CDIAG_CTRL_IMMED_LBN) |
- (1 << CDIAG_CTRL_BRK_LINK_LBN) |
- (CDIAG_CTRL_LEN_METRES << CDIAG_CTRL_LEN_UNIT_LBN));
+ PMA_PMD_CDIAG_CTRL_REG, ctrl_reg);
i = 0;
while (mdio_clause45_read(efx, phy_id, MDIO_MMD_PMAPMD,
PMA_PMD_CDIAG_CTRL_REG) &
(1 << CDIAG_CTRL_IN_PROG_LBN)) {
if (++i == 50) {
rc = -ETIMEDOUT;
- goto reset;
+ goto out;
}
msleep(100);
}
@@ -757,122 +750,92 @@ static int sft9001_run_tests(struct efx_nic *efx, int *results, unsigned flags)
results[5 + i] = len_reg;
}
- /* We must reset to exit cable diagnostic mode. The BIST will
- * also run when we do this. */
-reset:
- rc2 = tenxpress_special_reset(efx);
- results[0] = rc2 ? -1 : 1;
- if (!rc)
- rc = rc2;
-
- rc2 = efx->phy_op->set_settings(efx, &ecmd);
- if (!rc)
- rc = rc2;
+out:
+ if (flags & ETH_TEST_FL_OFFLINE) {
+ /* Reset, running the BIST and then resuming normal service. */
+ rc2 = tenxpress_special_reset(efx);
+ results[0] = rc2 ? -1 : 1;
+ if (!rc)
+ rc = rc2;
+
+ rc2 = efx->phy_op->set_settings(efx, &ecmd);
+ if (!rc)
+ rc = rc2;
+ }
return rc;
}
-static u32 tenxpress_get_xnp_lpa(struct efx_nic *efx)
+static void
+tenxpress_get_settings(struct efx_nic *efx, struct ethtool_cmd *ecmd)
{
- int phy = efx->mii.phy_id;
- u32 lpa = 0;
+ int phy_id = efx->mii.phy_id;
+ u32 adv = 0, lpa = 0;
int reg;
if (efx->phy_type != PHY_TYPE_SFX7101) {
- reg = mdio_clause45_read(efx, phy, MDIO_MMD_C22EXT,
- C22EXT_MSTSLV_REG);
- if (reg & (1 << C22EXT_MSTSLV_1000_HD_LBN))
+ reg = mdio_clause45_read(efx, phy_id, MDIO_MMD_C22EXT,
+ C22EXT_MSTSLV_CTRL);
+ if (reg & (1 << C22EXT_MSTSLV_CTRL_ADV_1000_FD_LBN))
+ adv |= ADVERTISED_1000baseT_Full;
+ reg = mdio_clause45_read(efx, phy_id, MDIO_MMD_C22EXT,
+ C22EXT_MSTSLV_STATUS);
+ if (reg & (1 << C22EXT_MSTSLV_STATUS_LP_1000_HD_LBN))
lpa |= ADVERTISED_1000baseT_Half;
- if (reg & (1 << C22EXT_MSTSLV_1000_FD_LBN))
+ if (reg & (1 << C22EXT_MSTSLV_STATUS_LP_1000_FD_LBN))
lpa |= ADVERTISED_1000baseT_Full;
}
- reg = mdio_clause45_read(efx, phy, MDIO_MMD_AN, MDIO_AN_10GBT_STATUS);
+ reg = mdio_clause45_read(efx, phy_id, MDIO_MMD_AN,
+ MDIO_AN_10GBT_CTRL);
+ if (reg & (1 << MDIO_AN_10GBT_CTRL_ADV_10G_LBN))
+ adv |= ADVERTISED_10000baseT_Full;
+ reg = mdio_clause45_read(efx, phy_id, MDIO_MMD_AN,
+ MDIO_AN_10GBT_STATUS);
if (reg & (1 << MDIO_AN_10GBT_STATUS_LP_10G_LBN))
lpa |= ADVERTISED_10000baseT_Full;
- return lpa;
-}
-static void sfx7101_get_settings(struct efx_nic *efx, struct ethtool_cmd *ecmd)
-{
- mdio_clause45_get_settings_ext(efx, ecmd, ADVERTISED_10000baseT_Full,
- tenxpress_get_xnp_lpa(efx));
- ecmd->supported |= SUPPORTED_10000baseT_Full;
- ecmd->advertising |= ADVERTISED_10000baseT_Full;
+ mdio_clause45_get_settings_ext(efx, ecmd, adv, lpa);
+
+ if (efx->phy_type != PHY_TYPE_SFX7101)
+ ecmd->supported |= (SUPPORTED_100baseT_Full |
+ SUPPORTED_1000baseT_Full);
+
+ /* In loopback, the PHY automatically brings up the correct interface,
+ * but doesn't advertise the correct speed. So override it */
+ if (efx->loopback_mode == LOOPBACK_GPHY)
+ ecmd->speed = SPEED_1000;
+ else if (LOOPBACK_MASK(efx) & efx->phy_op->loopbacks)
+ ecmd->speed = SPEED_10000;
}
-static void sft9001_get_settings(struct efx_nic *efx, struct ethtool_cmd *ecmd)
+static int tenxpress_set_settings(struct efx_nic *efx, struct ethtool_cmd *ecmd)
{
- int phy_id = efx->mii.phy_id;
- u32 xnp_adv = 0;
- int reg;
-
- reg = mdio_clause45_read(efx, phy_id, MDIO_MMD_PMAPMD,
- PMA_PMD_SPEED_ENABLE_REG);
- if (EFX_WORKAROUND_13204(efx) && (reg & (1 << PMA_PMD_100TX_ADV_LBN)))
- xnp_adv |= ADVERTISED_100baseT_Full;
- if (reg & (1 << PMA_PMD_1000T_ADV_LBN))
- xnp_adv |= ADVERTISED_1000baseT_Full;
- if (reg & (1 << PMA_PMD_10000T_ADV_LBN))
- xnp_adv |= ADVERTISED_10000baseT_Full;
+ if (!ecmd->autoneg)
+ return -EINVAL;
- mdio_clause45_get_settings_ext(efx, ecmd, xnp_adv,
- tenxpress_get_xnp_lpa(efx));
-
- ecmd->supported |= (SUPPORTED_100baseT_Half |
- SUPPORTED_100baseT_Full |
- SUPPORTED_1000baseT_Full);
-
- /* Use the vendor defined C22ext register for duplex settings */
- if (ecmd->speed != SPEED_10000 && !ecmd->autoneg) {
- reg = mdio_clause45_read(efx, phy_id, MDIO_MMD_C22EXT,
- GPHY_XCONTROL_REG);
- ecmd->duplex = (reg & (1 << GPHY_DUPLEX_LBN) ?
- DUPLEX_FULL : DUPLEX_HALF);
- }
+ return mdio_clause45_set_settings(efx, ecmd);
}
-static int sft9001_set_settings(struct efx_nic *efx, struct ethtool_cmd *ecmd)
+static void sfx7101_set_npage_adv(struct efx_nic *efx, u32 advertising)
{
- int phy_id = efx->mii.phy_id;
- int rc;
-
- rc = mdio_clause45_set_settings(efx, ecmd);
- if (rc)
- return rc;
-
- if (ecmd->speed != SPEED_10000 && !ecmd->autoneg)
- mdio_clause45_set_flag(efx, phy_id, MDIO_MMD_C22EXT,
- GPHY_XCONTROL_REG, GPHY_DUPLEX_LBN,
- ecmd->duplex == DUPLEX_FULL);
-
- return rc;
+ mdio_clause45_set_flag(efx, efx->mii.phy_id, MDIO_MMD_AN,
+ MDIO_AN_10GBT_CTRL,
+ MDIO_AN_10GBT_CTRL_ADV_10G_LBN,
+ advertising & ADVERTISED_10000baseT_Full);
}
-static bool sft9001_set_xnp_advertise(struct efx_nic *efx, u32 advertising)
+static void sft9001_set_npage_adv(struct efx_nic *efx, u32 advertising)
{
- int phy = efx->mii.phy_id;
- int reg = mdio_clause45_read(efx, phy, MDIO_MMD_PMAPMD,
- PMA_PMD_SPEED_ENABLE_REG);
- bool enabled;
-
- reg &= ~((1 << 2) | (1 << 3));
- if (EFX_WORKAROUND_13204(efx) &&
- (advertising & ADVERTISED_100baseT_Full))
- reg |= 1 << PMA_PMD_100TX_ADV_LBN;
- if (advertising & ADVERTISED_1000baseT_Full)
- reg |= 1 << PMA_PMD_1000T_ADV_LBN;
- if (advertising & ADVERTISED_10000baseT_Full)
- reg |= 1 << PMA_PMD_10000T_ADV_LBN;
- mdio_clause45_write(efx, phy, MDIO_MMD_PMAPMD,
- PMA_PMD_SPEED_ENABLE_REG, reg);
-
- enabled = (advertising &
- (ADVERTISED_1000baseT_Half |
- ADVERTISED_1000baseT_Full |
- ADVERTISED_10000baseT_Full));
- if (EFX_WORKAROUND_13204(efx))
- enabled |= (advertising & ADVERTISED_100baseT_Full);
- return enabled;
+ int phy_id = efx->mii.phy_id;
+
+ mdio_clause45_set_flag(efx, phy_id, MDIO_MMD_C22EXT,
+ C22EXT_MSTSLV_CTRL,
+ C22EXT_MSTSLV_CTRL_ADV_1000_FD_LBN,
+ advertising & ADVERTISED_1000baseT_Full);
+ mdio_clause45_set_flag(efx, phy_id, MDIO_MMD_AN,
+ MDIO_AN_10GBT_CTRL,
+ MDIO_AN_10GBT_CTRL_ADV_10G_LBN,
+ advertising & ADVERTISED_10000baseT_Full);
}
struct efx_phy_operations falcon_sfx7101_phy_ops = {
@@ -882,8 +845,9 @@ struct efx_phy_operations falcon_sfx7101_phy_ops = {
.poll = tenxpress_phy_poll,
.fini = tenxpress_phy_fini,
.clear_interrupt = efx_port_dummy_op_void,
- .get_settings = sfx7101_get_settings,
- .set_settings = mdio_clause45_set_settings,
+ .get_settings = tenxpress_get_settings,
+ .set_settings = tenxpress_set_settings,
+ .set_npage_adv = sfx7101_set_npage_adv,
.num_tests = ARRAY_SIZE(sfx7101_test_names),
.test_names = sfx7101_test_names,
.run_tests = sfx7101_run_tests,
@@ -898,9 +862,9 @@ struct efx_phy_operations falcon_sft9001_phy_ops = {
.poll = tenxpress_phy_poll,
.fini = tenxpress_phy_fini,
.clear_interrupt = efx_port_dummy_op_void,
- .get_settings = sft9001_get_settings,
- .set_settings = sft9001_set_settings,
- .set_xnp_advertise = sft9001_set_xnp_advertise,
+ .get_settings = tenxpress_get_settings,
+ .set_settings = tenxpress_set_settings,
+ .set_npage_adv = sft9001_set_npage_adv,
.num_tests = ARRAY_SIZE(sft9001_test_names),
.test_names = sft9001_test_names,
.run_tests = sft9001_run_tests,
diff --git a/drivers/net/sfc/tx.c b/drivers/net/sfc/tx.c
index da3e9ff339f5..d6681edb7014 100644
--- a/drivers/net/sfc/tx.c
+++ b/drivers/net/sfc/tx.c
@@ -162,6 +162,14 @@ static int efx_enqueue_skb(struct efx_tx_queue *tx_queue,
/* Get size of the initial fragment */
len = skb_headlen(skb);
+ /* Pad if necessary */
+ if (EFX_WORKAROUND_15592(efx) && skb->len <= 32) {
+ EFX_BUG_ON_PARANOID(skb->data_len);
+ len = 32 + 1;
+ if (skb_pad(skb, len - skb->len))
+ return NETDEV_TX_OK;
+ }
+
fill_level = tx_queue->insert_count - tx_queue->old_read_count;
q_space = efx->type->txd_ring_mask - 1 - fill_level;
@@ -376,6 +384,9 @@ int efx_hard_start_xmit(struct sk_buff *skb, struct net_device *net_dev)
struct efx_nic *efx = netdev_priv(net_dev);
struct efx_tx_queue *tx_queue;
+ if (unlikely(efx->port_inhibited))
+ return NETDEV_TX_BUSY;
+
if (likely(skb->ip_summed == CHECKSUM_PARTIAL))
tx_queue = &efx->tx_queue[EFX_TX_QUEUE_OFFLOAD_CSUM];
else
@@ -397,7 +408,7 @@ void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index)
* separates the update of read_count from the test of
* stopped. */
smp_mb();
- if (unlikely(tx_queue->stopped)) {
+ if (unlikely(tx_queue->stopped) && likely(efx->port_enabled)) {
fill_level = tx_queue->insert_count - tx_queue->read_count;
if (fill_level < EFX_NETDEV_TX_THRESHOLD(tx_queue)) {
EFX_BUG_ON_PARANOID(!efx_dev_registered(efx));
diff --git a/drivers/net/sfc/workarounds.h b/drivers/net/sfc/workarounds.h
index 82e03e1d7371..c821c15445a0 100644
--- a/drivers/net/sfc/workarounds.h
+++ b/drivers/net/sfc/workarounds.h
@@ -18,8 +18,8 @@
#define EFX_WORKAROUND_ALWAYS(efx) 1
#define EFX_WORKAROUND_FALCON_A(efx) (falcon_rev(efx) <= FALCON_REV_A1)
#define EFX_WORKAROUND_10G(efx) EFX_IS10G(efx)
-#define EFX_WORKAROUND_SFX7101(efx) ((efx)->phy_type == PHY_TYPE_SFX7101)
-#define EFX_WORKAROUND_SFT9001A(efx) ((efx)->phy_type == PHY_TYPE_SFT9001A)
+#define EFX_WORKAROUND_SFT9001(efx) ((efx)->phy_type == PHY_TYPE_SFT9001A || \
+ (efx)->phy_type == PHY_TYPE_SFT9001B)
/* XAUI resets if link not detected */
#define EFX_WORKAROUND_5147 EFX_WORKAROUND_ALWAYS
@@ -29,8 +29,6 @@
#define EFX_WORKAROUND_7884 EFX_WORKAROUND_10G
/* TX pkt parser problem with <= 16 byte TXes */
#define EFX_WORKAROUND_9141 EFX_WORKAROUND_ALWAYS
-/* Low rate CRC errors require XAUI reset */
-#define EFX_WORKAROUND_10750 EFX_WORKAROUND_SFX7101
/* TX_EV_PKT_ERR can be caused by a dangling TX descriptor
* or a PCIe error (bug 11028) */
#define EFX_WORKAROUND_10727 EFX_WORKAROUND_ALWAYS
@@ -38,6 +36,8 @@
#define EFX_WORKAROUND_11482 EFX_WORKAROUND_ALWAYS
/* Flush events can take a very long time to appear */
#define EFX_WORKAROUND_11557 EFX_WORKAROUND_ALWAYS
+/* Truncated IPv4 packets can confuse the TX packet parser */
+#define EFX_WORKAROUND_15592 EFX_WORKAROUND_ALWAYS
/* Spurious parity errors in TSORT buffers */
#define EFX_WORKAROUND_5129 EFX_WORKAROUND_FALCON_A
@@ -55,8 +55,8 @@
#define EFX_WORKAROUND_8071 EFX_WORKAROUND_FALCON_A
/* Need to send XNP pages for 100BaseT */
-#define EFX_WORKAROUND_13204 EFX_WORKAROUND_SFT9001A
-/* Need to keep AN enabled */
-#define EFX_WORKAROUND_13963 EFX_WORKAROUND_SFT9001A
+#define EFX_WORKAROUND_13204 EFX_WORKAROUND_SFT9001
+/* Don't restart AN in near-side loopback */
+#define EFX_WORKAROUND_15195 EFX_WORKAROUND_SFT9001
#endif /* EFX_WORKAROUNDS_H */
diff --git a/drivers/net/sfc/xfp_phy.c b/drivers/net/sfc/xfp_phy.c
index 2d50b6ecf5f9..bb1ef77d5f56 100644
--- a/drivers/net/sfc/xfp_phy.c
+++ b/drivers/net/sfc/xfp_phy.c
@@ -7,8 +7,8 @@
* by the Free Software Foundation, incorporated herein by reference.
*/
/*
- * Driver for XFP optical PHYs (plus some support specific to the Quake 2022/32)
- * See www.amcc.com for details (search for qt2032)
+ * Driver for SFP+ and XFP optical PHYs plus some support specific to the
+ * AMCC QT20xx adapters; see www.amcc.com for details
*/
#include <linux/timer.h>
@@ -31,6 +31,21 @@
/* Quake-specific MDIO registers */
#define MDIO_QUAKE_LED0_REG (0xD006)
+/* QT2025C only */
+#define PCS_FW_HEARTBEAT_REG 0xd7ee
+#define PCS_FW_HEARTB_LBN 0
+#define PCS_FW_HEARTB_WIDTH 8
+#define PCS_UC8051_STATUS_REG 0xd7fd
+#define PCS_UC_STATUS_LBN 0
+#define PCS_UC_STATUS_WIDTH 8
+#define PCS_UC_STATUS_FW_SAVE 0x20
+#define PMA_PMD_FTX_CTRL2_REG 0xc309
+#define PMA_PMD_FTX_STATIC_LBN 13
+#define PMA_PMD_VEND1_REG 0xc001
+#define PMA_PMD_VEND1_LBTXD_LBN 15
+#define PCS_VEND1_REG 0xc000
+#define PCS_VEND1_LBTXD_LBN 5
+
void xfp_set_led(struct efx_nic *p, int led, int mode)
{
int addr = MDIO_QUAKE_LED0_REG + led;
@@ -45,7 +60,49 @@ struct xfp_phy_data {
#define XFP_MAX_RESET_TIME 500
#define XFP_RESET_WAIT 10
-/* Reset the PHYXS MMD. This is documented (for the Quake PHY) as doing
+static int qt2025c_wait_reset(struct efx_nic *efx)
+{
+ unsigned long timeout = jiffies + 10 * HZ;
+ int phy_id = efx->mii.phy_id;
+ int reg, old_counter = 0;
+
+ /* Wait for firmware heartbeat to start */
+ for (;;) {
+ int counter;
+ reg = mdio_clause45_read(efx, phy_id, MDIO_MMD_PCS,
+ PCS_FW_HEARTBEAT_REG);
+ if (reg < 0)
+ return reg;
+ counter = ((reg >> PCS_FW_HEARTB_LBN) &
+ ((1 << PCS_FW_HEARTB_WIDTH) - 1));
+ if (old_counter == 0)
+ old_counter = counter;
+ else if (counter != old_counter)
+ break;
+ if (time_after(jiffies, timeout))
+ return -ETIMEDOUT;
+ msleep(10);
+ }
+
+ /* Wait for firmware status to look good */
+ for (;;) {
+ reg = mdio_clause45_read(efx, phy_id, MDIO_MMD_PCS,
+ PCS_UC8051_STATUS_REG);
+ if (reg < 0)
+ return reg;
+ if ((reg &
+ ((1 << PCS_UC_STATUS_WIDTH) - 1) << PCS_UC_STATUS_LBN) >=
+ PCS_UC_STATUS_FW_SAVE)
+ break;
+ if (time_after(jiffies, timeout))
+ return -ETIMEDOUT;
+ msleep(100);
+ }
+
+ return 0;
+}
+
+/* Reset the PHYXS MMD. This is documented (for the Quake PHYs) as doing
* a complete soft reset.
*/
static int xfp_reset_phy(struct efx_nic *efx)
@@ -58,6 +115,12 @@ static int xfp_reset_phy(struct efx_nic *efx)
if (rc < 0)
goto fail;
+ if (efx->phy_type == PHY_TYPE_QT2025C) {
+ rc = qt2025c_wait_reset(efx);
+ if (rc < 0)
+ goto fail;
+ }
+
/* Wait 250ms for the PHY to complete bootup */
msleep(250);
@@ -73,7 +136,7 @@ static int xfp_reset_phy(struct efx_nic *efx)
return rc;
fail:
- EFX_ERR(efx, "XFP: reset timed out!\n");
+ EFX_ERR(efx, "PHY reset timed out\n");
return rc;
}
@@ -88,15 +151,15 @@ static int xfp_phy_init(struct efx_nic *efx)
return -ENOMEM;
efx->phy_data = phy_data;
- EFX_INFO(efx, "XFP: PHY ID reg %x (OUI %x model %x revision"
- " %x)\n", devid, MDIO_ID_OUI(devid), MDIO_ID_MODEL(devid),
- MDIO_ID_REV(devid));
+ EFX_INFO(efx, "PHY ID reg %x (OUI %06x model %02x revision %x)\n",
+ devid, mdio_id_oui(devid), mdio_id_model(devid),
+ mdio_id_rev(devid));
phy_data->phy_mode = efx->phy_mode;
rc = xfp_reset_phy(efx);
- EFX_INFO(efx, "XFP: PHY init %s.\n",
+ EFX_INFO(efx, "PHY init %s.\n",
rc ? "failed" : "successful");
if (rc < 0)
goto fail;
@@ -131,12 +194,28 @@ static void xfp_phy_reconfigure(struct efx_nic *efx)
{
struct xfp_phy_data *phy_data = efx->phy_data;
- /* Reset the PHY when moving from tx off to tx on */
- if (!(efx->phy_mode & PHY_MODE_TX_DISABLED) &&
- (phy_data->phy_mode & PHY_MODE_TX_DISABLED))
- xfp_reset_phy(efx);
+ if (efx->phy_type == PHY_TYPE_QT2025C) {
+ /* There are several different register bits which can
+ * disable TX (and save power) on direct-attach cables
+ * or optical transceivers, varying somewhat between
+ * firmware versions. Only 'static mode' appears to
+ * cover everything. */
+ mdio_clause45_set_flag(
+ efx, efx->mii.phy_id, MDIO_MMD_PMAPMD,
+ PMA_PMD_FTX_CTRL2_REG, PMA_PMD_FTX_STATIC_LBN,
+ efx->phy_mode & PHY_MODE_TX_DISABLED ||
+ efx->phy_mode & PHY_MODE_LOW_POWER ||
+ efx->loopback_mode == LOOPBACK_PCS ||
+ efx->loopback_mode == LOOPBACK_PMAPMD);
+ } else {
+ /* Reset the PHY when moving from tx off to tx on */
+ if (!(efx->phy_mode & PHY_MODE_TX_DISABLED) &&
+ (phy_data->phy_mode & PHY_MODE_TX_DISABLED))
+ xfp_reset_phy(efx);
+
+ mdio_clause45_transmit_disable(efx);
+ }
- mdio_clause45_transmit_disable(efx);
mdio_clause45_phy_reconfigure(efx);
phy_data->phy_mode = efx->phy_mode;