summaryrefslogtreecommitdiff
path: root/drivers/net/ipa
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2022-08-03 16:29:08 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2022-08-03 16:29:08 -0700
commitf86d1fbbe7858884d6754534a0afbb74fc30bc26 (patch)
treef61796870edefbe77d495e9d719c68af1d14275b /drivers/net/ipa
parent526942b8134cc34d25d27f95dfff98b8ce2f6fcd (diff)
parent7c6327c77d509e78bff76f2a4551fcfee851682e (diff)
Merge tag 'net-next-6.0' of git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net-next
Pull networking changes from Paolo Abeni: "Core: - Refactor the forward memory allocation to better cope with memory pressure with many open sockets, moving from a per socket cache to a per-CPU one - Replace rwlocks with RCU for better fairness in ping, raw sockets and IP multicast router. - Network-side support for IO uring zero-copy send. - A few skb drop reason improvements, including codegen the source file with string mapping instead of using macro magic. - Rename reference tracking helpers to a more consistent netdev_* schema. - Adapt u64_stats_t type to address load/store tearing issues. - Refine debug helper usage to reduce the log noise caused by bots. BPF: - Improve socket map performance, avoiding skb cloning on read operation. - Add support for 64 bits enum, to match types exposed by kernel. - Introduce support for sleepable uprobes program. - Introduce support for enum textual representation in libbpf. - New helpers to implement synproxy with eBPF/XDP. - Improve loop performances, inlining indirect calls when possible. - Removed all the deprecated libbpf APIs. - Implement new eBPF-based LSM flavor. - Add type match support, which allow accurate queries to the eBPF used types. - A few TCP congetsion control framework usability improvements. - Add new infrastructure to manipulate CT entries via eBPF programs. - Allow for livepatch (KLP) and BPF trampolines to attach to the same kernel function. Protocols: - Introduce per network namespace lookup tables for unix sockets, increasing scalability and reducing contention. - Preparation work for Wi-Fi 7 Multi-Link Operation (MLO) support. - Add support to forciby close TIME_WAIT TCP sockets via user-space tools. - Significant performance improvement for the TLS 1.3 receive path, both for zero-copy and not-zero-copy. - Support for changing the initial MTPCP subflow priority/backup status - Introduce virtually contingus buffers for sockets over RDMA, to cope better with memory pressure. - Extend CAN ethtool support with timestamping capabilities - Refactor CAN build infrastructure to allow building only the needed features. Driver API: - Remove devlink mutex to allow parallel commands on multiple links. - Add support for pause stats in distributed switch. - Implement devlink helpers to query and flash line cards. - New helper for phy mode to register conversion. New hardware / drivers: - Ethernet DSA driver for the rockchip mt7531 on BPI-R2 Pro. - Ethernet DSA driver for the Renesas RZ/N1 A5PSW switch. - Ethernet DSA driver for the Microchip LAN937x switch. - Ethernet PHY driver for the Aquantia AQR113C EPHY. - CAN driver for the OBD-II ELM327 interface. - CAN driver for RZ/N1 SJA1000 CAN controller. - Bluetooth: Infineon CYW55572 Wi-Fi plus Bluetooth combo device. Drivers: - Intel Ethernet NICs: - i40e: add support for vlan pruning - i40e: add support for XDP framented packets - ice: improved vlan offload support - ice: add support for PPPoE offload - Mellanox Ethernet (mlx5) - refactor packet steering offload for performance and scalability - extend support for TC offload - refactor devlink code to clean-up the locking schema - support stacked vlans for bridge offloads - use TLS objects pool to improve connection rate - Netronome Ethernet NICs (nfp): - extend support for IPv6 fields mangling offload - add support for vepa mode in HW bridge - better support for virtio data path acceleration (VDPA) - enable TSO by default - Microsoft vNIC driver (mana) - add support for XDP redirect - Others Ethernet drivers: - bonding: add per-port priority support - microchip lan743x: extend phy support - Fungible funeth: support UDP segmentation offload and XDP xmit - Solarflare EF100: add support for virtual function representors - MediaTek SoC: add XDP support - Mellanox Ethernet/IB switch (mlxsw): - dropped support for unreleased H/W (XM router). - improved stats accuracy - unified bridge model coversion improving scalability (parts 1-6) - support for PTP in Spectrum-2 asics - Broadcom PHYs - add PTP support for BCM54210E - add support for the BCM53128 internal PHY - Marvell Ethernet switches (prestera): - implement support for multicast forwarding offload - Embedded Ethernet switches: - refactor OcteonTx MAC filter for better scalability - improve TC H/W offload for the Felix driver - refactor the Microchip ksz8 and ksz9477 drivers to share the probe code (parts 1, 2), add support for phylink mac configuration - Other WiFi: - Microchip wilc1000: diable WEP support and enable WPA3 - Atheros ath10k: encapsulation offload support Old code removal: - Neterion vxge ethernet driver: this is untouched since more than 10 years" * tag 'net-next-6.0' of git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net-next: (1890 commits) doc: sfp-phylink: Fix a broken reference wireguard: selftests: support UML wireguard: allowedips: don't corrupt stack when detecting overflow wireguard: selftests: update config fragments wireguard: ratelimiter: use hrtimer in selftest net/mlx5e: xsk: Discard unaligned XSK frames on striding RQ net: usb: ax88179_178a: Bind only to vendor-specific interface selftests: net: fix IOAM test skip return code net: usb: make USB_RTL8153_ECM non user configurable net: marvell: prestera: remove reduntant code octeontx2-pf: Reduce minimum mtu size to 60 net: devlink: Fix missing mutex_unlock() call net/tls: Remove redundant workqueue flush before destroy net: txgbe: Fix an error handling path in txgbe_probe() net: dsa: Fix spelling mistakes and cleanup code Documentation: devlink: add add devlink-selftests to the table of contents dccp: put dccp_qpolicy_full() and dccp_qpolicy_push() in the same lock net: ionic: fix error check for vlan flags in ionic_set_nic_features() net: ice: fix error NETIF_F_HW_VLAN_CTAG_FILTER check in ice_vsi_sync_fltr() nfp: flower: add support for tunnel offload without key ID ...
Diffstat (limited to 'drivers/net/ipa')
-rw-r--r--drivers/net/ipa/Makefile10
-rw-r--r--drivers/net/ipa/data/ipa_data-v3.1.c (renamed from drivers/net/ipa/ipa_data-v3.1.c)8
-rw-r--r--drivers/net/ipa/data/ipa_data-v3.5.1.c (renamed from drivers/net/ipa/ipa_data-v3.5.1.c)8
-rw-r--r--drivers/net/ipa/data/ipa_data-v4.11.c (renamed from drivers/net/ipa/ipa_data-v4.11.c)8
-rw-r--r--drivers/net/ipa/data/ipa_data-v4.2.c (renamed from drivers/net/ipa/ipa_data-v4.2.c)8
-rw-r--r--drivers/net/ipa/data/ipa_data-v4.5.c (renamed from drivers/net/ipa/ipa_data-v4.5.c)8
-rw-r--r--drivers/net/ipa/data/ipa_data-v4.9.c (renamed from drivers/net/ipa/ipa_data-v4.9.c)8
-rw-r--r--drivers/net/ipa/gsi.c252
-rw-r--r--drivers/net/ipa/gsi.h26
-rw-r--r--drivers/net/ipa/gsi_private.h24
-rw-r--r--drivers/net/ipa/gsi_trans.c197
-rw-r--r--drivers/net/ipa/gsi_trans.h15
-rw-r--r--drivers/net/ipa/ipa_cmd.c8
-rw-r--r--drivers/net/ipa/ipa_endpoint.c27
-rw-r--r--drivers/net/ipa/ipa_endpoint.h4
-rw-r--r--drivers/net/ipa/ipa_main.c3
-rw-r--r--drivers/net/ipa/ipa_sysfs.c69
-rw-r--r--drivers/net/ipa/ipa_sysfs.h1
18 files changed, 364 insertions, 320 deletions
diff --git a/drivers/net/ipa/Makefile b/drivers/net/ipa/Makefile
index bdfb2430ab2c..8b2220eb6b92 100644
--- a/drivers/net/ipa/Makefile
+++ b/drivers/net/ipa/Makefile
@@ -1,3 +1,9 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Makefile for the Qualcomm IPA driver.
+
+IPA_VERSIONS := 3.1 3.5.1 4.2 4.5 4.9 4.11
+
obj-$(CONFIG_QCOM_IPA) += ipa.o
ipa-y := ipa_main.o ipa_power.o ipa_reg.o ipa_mem.o \
@@ -7,6 +13,4 @@ ipa-y := ipa_main.o ipa_power.o ipa_reg.o ipa_mem.o \
ipa_resource.o ipa_qmi.o ipa_qmi_msg.o \
ipa_sysfs.o
-ipa-y += ipa_data-v3.1.o ipa_data-v3.5.1.o \
- ipa_data-v4.2.o ipa_data-v4.5.o \
- ipa_data-v4.9.o ipa_data-v4.11.o
+ipa-y += $(IPA_VERSIONS:%=data/ipa_data-v%.o)
diff --git a/drivers/net/ipa/ipa_data-v3.1.c b/drivers/net/ipa/data/ipa_data-v3.1.c
index 00f4e506e6e5..1c1895aea811 100644
--- a/drivers/net/ipa/ipa_data-v3.1.c
+++ b/drivers/net/ipa/data/ipa_data-v3.1.c
@@ -6,10 +6,10 @@
#include <linux/log2.h>
-#include "gsi.h"
-#include "ipa_data.h"
-#include "ipa_endpoint.h"
-#include "ipa_mem.h"
+#include "../gsi.h"
+#include "../ipa_data.h"
+#include "../ipa_endpoint.h"
+#include "../ipa_mem.h"
/** enum ipa_resource_type - IPA resource types for an SoC having IPA v3.1 */
enum ipa_resource_type {
diff --git a/drivers/net/ipa/ipa_data-v3.5.1.c b/drivers/net/ipa/data/ipa_data-v3.5.1.c
index b7e32e87733e..58b708d2fc75 100644
--- a/drivers/net/ipa/ipa_data-v3.5.1.c
+++ b/drivers/net/ipa/data/ipa_data-v3.5.1.c
@@ -6,10 +6,10 @@
#include <linux/log2.h>
-#include "gsi.h"
-#include "ipa_data.h"
-#include "ipa_endpoint.h"
-#include "ipa_mem.h"
+#include "../gsi.h"
+#include "../ipa_data.h"
+#include "../ipa_endpoint.h"
+#include "../ipa_mem.h"
/** enum ipa_resource_type - IPA resource types for an SoC having IPA v3.5.1 */
enum ipa_resource_type {
diff --git a/drivers/net/ipa/ipa_data-v4.11.c b/drivers/net/ipa/data/ipa_data-v4.11.c
index 1be823e5c5c2..a204e439c23d 100644
--- a/drivers/net/ipa/ipa_data-v4.11.c
+++ b/drivers/net/ipa/data/ipa_data-v4.11.c
@@ -4,10 +4,10 @@
#include <linux/log2.h>
-#include "gsi.h"
-#include "ipa_data.h"
-#include "ipa_endpoint.h"
-#include "ipa_mem.h"
+#include "../gsi.h"
+#include "../ipa_data.h"
+#include "../ipa_endpoint.h"
+#include "../ipa_mem.h"
/** enum ipa_resource_type - IPA resource types for an SoC having IPA v4.11 */
enum ipa_resource_type {
diff --git a/drivers/net/ipa/ipa_data-v4.2.c b/drivers/net/ipa/data/ipa_data-v4.2.c
index 683f1f91042f..04f574fe006f 100644
--- a/drivers/net/ipa/ipa_data-v4.2.c
+++ b/drivers/net/ipa/data/ipa_data-v4.2.c
@@ -4,10 +4,10 @@
#include <linux/log2.h>
-#include "gsi.h"
-#include "ipa_data.h"
-#include "ipa_endpoint.h"
-#include "ipa_mem.h"
+#include "../gsi.h"
+#include "../ipa_data.h"
+#include "../ipa_endpoint.h"
+#include "../ipa_mem.h"
/** enum ipa_resource_type - IPA resource types for an SoC having IPA v4.2 */
enum ipa_resource_type {
diff --git a/drivers/net/ipa/ipa_data-v4.5.c b/drivers/net/ipa/data/ipa_data-v4.5.c
index 79398f286a9c..684239e71f46 100644
--- a/drivers/net/ipa/ipa_data-v4.5.c
+++ b/drivers/net/ipa/data/ipa_data-v4.5.c
@@ -4,10 +4,10 @@
#include <linux/log2.h>
-#include "gsi.h"
-#include "ipa_data.h"
-#include "ipa_endpoint.h"
-#include "ipa_mem.h"
+#include "../gsi.h"
+#include "../ipa_data.h"
+#include "../ipa_endpoint.h"
+#include "../ipa_mem.h"
/** enum ipa_resource_type - IPA resource types for an SoC having IPA v4.5 */
enum ipa_resource_type {
diff --git a/drivers/net/ipa/ipa_data-v4.9.c b/drivers/net/ipa/data/ipa_data-v4.9.c
index 4b96efd05cf2..2333e15f9533 100644
--- a/drivers/net/ipa/ipa_data-v4.9.c
+++ b/drivers/net/ipa/data/ipa_data-v4.9.c
@@ -4,10 +4,10 @@
#include <linux/log2.h>
-#include "gsi.h"
-#include "ipa_data.h"
-#include "ipa_endpoint.h"
-#include "ipa_mem.h"
+#include "../gsi.h"
+#include "../ipa_data.h"
+#include "../ipa_endpoint.h"
+#include "../ipa_mem.h"
/** enum ipa_resource_type - IPA resource types for an SoC having IPA v4.9 */
enum ipa_resource_type {
diff --git a/drivers/net/ipa/gsi.c b/drivers/net/ipa/gsi.c
index 9cfe84319ee4..9e307eebd33f 100644
--- a/drivers/net/ipa/gsi.c
+++ b/drivers/net/ipa/gsi.c
@@ -665,7 +665,8 @@ static void gsi_evt_ring_doorbell(struct gsi *gsi, u32 evt_ring_id, u32 index)
static void gsi_evt_ring_program(struct gsi *gsi, u32 evt_ring_id)
{
struct gsi_evt_ring *evt_ring = &gsi->evt_ring[evt_ring_id];
- size_t size = evt_ring->ring.count * GSI_RING_ELEMENT_SIZE;
+ struct gsi_ring *ring = &evt_ring->ring;
+ size_t size;
u32 val;
/* We program all event rings as GPI type/protocol */
@@ -674,6 +675,7 @@ static void gsi_evt_ring_program(struct gsi *gsi, u32 evt_ring_id)
val |= u32_encode_bits(GSI_RING_ELEMENT_SIZE, EV_ELEMENT_SIZE_FMASK);
iowrite32(val, gsi->virt + GSI_EV_CH_E_CNTXT_0_OFFSET(evt_ring_id));
+ size = ring->count * GSI_RING_ELEMENT_SIZE;
val = ev_r_length_encoded(gsi->version, size);
iowrite32(val, gsi->virt + GSI_EV_CH_E_CNTXT_1_OFFSET(evt_ring_id));
@@ -681,9 +683,9 @@ static void gsi_evt_ring_program(struct gsi *gsi, u32 evt_ring_id)
* high-order 32 bits of the address of the event ring,
* respectively.
*/
- val = lower_32_bits(evt_ring->ring.addr);
+ val = lower_32_bits(ring->addr);
iowrite32(val, gsi->virt + GSI_EV_CH_E_CNTXT_2_OFFSET(evt_ring_id));
- val = upper_32_bits(evt_ring->ring.addr);
+ val = upper_32_bits(ring->addr);
iowrite32(val, gsi->virt + GSI_EV_CH_E_CNTXT_3_OFFSET(evt_ring_id));
/* Enable interrupt moderation by setting the moderation delay */
@@ -700,8 +702,8 @@ static void gsi_evt_ring_program(struct gsi *gsi, u32 evt_ring_id)
iowrite32(0, gsi->virt + GSI_EV_CH_E_CNTXT_12_OFFSET(evt_ring_id));
iowrite32(0, gsi->virt + GSI_EV_CH_E_CNTXT_13_OFFSET(evt_ring_id));
- /* Finally, tell the hardware we've completed event 0 (arbitrary) */
- gsi_evt_ring_doorbell(gsi, evt_ring_id, 0);
+ /* Finally, tell the hardware our "last processed" event (arbitrary) */
+ gsi_evt_ring_doorbell(gsi, evt_ring_id, ring->index);
}
/* Find the transaction whose completion indicates a channel is quiesced */
@@ -720,6 +722,9 @@ static struct gsi_trans *gsi_channel_trans_last(struct gsi_channel *channel)
list = &trans_info->alloc;
if (!list_empty(list))
goto done;
+ list = &trans_info->committed;
+ if (!list_empty(list))
+ goto done;
list = &trans_info->pending;
if (!list_empty(list))
goto done;
@@ -770,9 +775,6 @@ static void gsi_channel_program(struct gsi_channel *channel, bool doorbell)
u32 wrr_weight = 0;
u32 val;
- /* Arbitrarily pick TRE 0 as the first channel element to use */
- channel->tre_ring.index = 0;
-
/* We program all channels as GPI type/protocol */
val = chtype_protocol_encoded(gsi->version, GSI_CHANNEL_TYPE_GPI);
if (channel->toward_ipa)
@@ -823,7 +825,7 @@ static void gsi_channel_program(struct gsi_channel *channel, bool doorbell)
/* Now update the scratch registers for GPI protocol */
gpi = &scr.gpi;
- gpi->max_outstanding_tre = gsi_channel_trans_tre_max(gsi, channel_id) *
+ gpi->max_outstanding_tre = channel->trans_tre_max *
GSI_RING_ELEMENT_SIZE;
gpi->outstanding_threshold = 2 * GSI_RING_ELEMENT_SIZE;
@@ -949,6 +951,8 @@ void gsi_channel_reset(struct gsi *gsi, u32 channel_id, bool doorbell)
if (gsi->version < IPA_VERSION_4_0 && !channel->toward_ipa)
gsi_channel_reset_command(channel);
+ /* Hardware assumes this is 0 following reset */
+ channel->tre_ring.index = 0;
gsi_channel_program(channel, doorbell);
gsi_channel_trans_cancel_pending(channel);
@@ -991,75 +995,66 @@ void gsi_resume(struct gsi *gsi)
enable_irq(gsi->irq);
}
-/**
- * gsi_channel_tx_queued() - Report queued TX transfers for a channel
- * @channel: Channel for which to report
- *
- * Report to the network stack the number of bytes and transactions that
- * have been queued to hardware since last call. This and the next function
- * supply information used by the network stack for throttling.
- *
- * For each channel we track the number of transactions used and bytes of
- * data those transactions represent. We also track what those values are
- * each time this function is called. Subtracting the two tells us
- * the number of bytes and transactions that have been added between
- * successive calls.
- *
- * Calling this each time we ring the channel doorbell allows us to
- * provide accurate information to the network stack about how much
- * work we've given the hardware at any point in time.
- */
-void gsi_channel_tx_queued(struct gsi_channel *channel)
+void gsi_trans_tx_committed(struct gsi_trans *trans)
+{
+ struct gsi_channel *channel = &trans->gsi->channel[trans->channel_id];
+
+ channel->trans_count++;
+ channel->byte_count += trans->len;
+
+ trans->trans_count = channel->trans_count;
+ trans->byte_count = channel->byte_count;
+}
+
+void gsi_trans_tx_queued(struct gsi_trans *trans)
{
+ u32 channel_id = trans->channel_id;
+ struct gsi *gsi = trans->gsi;
+ struct gsi_channel *channel;
u32 trans_count;
u32 byte_count;
+ channel = &gsi->channel[channel_id];
+
byte_count = channel->byte_count - channel->queued_byte_count;
trans_count = channel->trans_count - channel->queued_trans_count;
channel->queued_byte_count = channel->byte_count;
channel->queued_trans_count = channel->trans_count;
- ipa_gsi_channel_tx_queued(channel->gsi, gsi_channel_id(channel),
- trans_count, byte_count);
+ ipa_gsi_channel_tx_queued(gsi, channel_id, trans_count, byte_count);
}
/**
- * gsi_channel_tx_update() - Report completed TX transfers
- * @channel: Channel that has completed transmitting packets
- * @trans: Last transation known to be complete
- *
- * Compute the number of transactions and bytes that have been transferred
- * over a TX channel since the given transaction was committed. Report this
- * information to the network stack.
+ * gsi_trans_tx_completed() - Report completed TX transactions
+ * @trans: TX channel transaction that has completed
*
- * At the time a transaction is committed, we record its channel's
- * committed transaction and byte counts *in the transaction*.
- * Completions are signaled by the hardware with an interrupt, and
- * we can determine the latest completed transaction at that time.
+ * Report that a transaction on a TX channel has completed. At the time a
+ * transaction is committed, we record *in the transaction* its channel's
+ * committed transaction and byte counts. Transactions are completed in
+ * order, and the difference between the channel's byte/transaction count
+ * when the transaction was committed and when it completes tells us
+ * exactly how much data has been transferred while the transaction was
+ * pending.
*
- * The difference between the byte/transaction count recorded in
- * the transaction and the count last time we recorded a completion
- * tells us exactly how much data has been transferred between
- * completions.
- *
- * Calling this each time we learn of a newly-completed transaction
- * allows us to provide accurate information to the network stack
- * about how much work has been completed by the hardware at a given
- * point in time.
+ * We report this information to the network stack, which uses it to manage
+ * the rate at which data is sent to hardware.
*/
-static void
-gsi_channel_tx_update(struct gsi_channel *channel, struct gsi_trans *trans)
+static void gsi_trans_tx_completed(struct gsi_trans *trans)
{
- u64 byte_count = trans->byte_count + trans->len;
- u64 trans_count = trans->trans_count + 1;
+ u32 channel_id = trans->channel_id;
+ struct gsi *gsi = trans->gsi;
+ struct gsi_channel *channel;
+ u32 trans_count;
+ u32 byte_count;
+
+ channel = &gsi->channel[channel_id];
+ trans_count = trans->trans_count - channel->compl_trans_count;
+ byte_count = trans->byte_count - channel->compl_byte_count;
- byte_count -= channel->compl_byte_count;
- channel->compl_byte_count += byte_count;
- trans_count -= channel->compl_trans_count;
channel->compl_trans_count += trans_count;
+ channel->compl_byte_count += byte_count;
- ipa_gsi_channel_tx_completed(channel->gsi, gsi_channel_id(channel),
- trans_count, byte_count);
+ ipa_gsi_channel_tx_completed(gsi, channel_id, trans_count, byte_count);
}
/* Channel control interrupt handler */
@@ -1327,61 +1322,73 @@ static int gsi_irq_init(struct gsi *gsi, struct platform_device *pdev)
}
/* Return the transaction associated with a transfer completion event */
-static struct gsi_trans *gsi_event_trans(struct gsi_channel *channel,
- struct gsi_event *event)
+static struct gsi_trans *
+gsi_event_trans(struct gsi *gsi, struct gsi_event *event)
{
+ u32 channel_id = event->chid;
+ struct gsi_channel *channel;
+ struct gsi_trans *trans;
u32 tre_offset;
u32 tre_index;
+ channel = &gsi->channel[channel_id];
+ if (WARN(!channel->gsi, "event has bad channel %u\n", channel_id))
+ return NULL;
+
/* Event xfer_ptr records the TRE it's associated with */
tre_offset = lower_32_bits(le64_to_cpu(event->xfer_ptr));
tre_index = gsi_ring_index(&channel->tre_ring, tre_offset);
- return gsi_channel_trans_mapped(channel, tre_index);
+ trans = gsi_channel_trans_mapped(channel, tre_index);
+
+ if (WARN(!trans, "channel %u event with no transaction\n", channel_id))
+ return NULL;
+
+ return trans;
}
/**
- * gsi_evt_ring_rx_update() - Record lengths of received data
- * @evt_ring: Event ring associated with channel that received packets
- * @index: Event index in ring reported by hardware
+ * gsi_evt_ring_update() - Update transaction state from hardware
+ * @gsi: GSI pointer
+ * @evt_ring_id: Event ring ID
+ * @index: Event index in ring reported by hardware
*
* Events for RX channels contain the actual number of bytes received into
* the buffer. Every event has a transaction associated with it, and here
* we update transactions to record their actual received lengths.
*
+ * When an event for a TX channel arrives we use information in the
+ * transaction to report the number of requests and bytes have been
+ * transferred.
+ *
* This function is called whenever we learn that the GSI hardware has filled
* new events since the last time we checked. The ring's index field tells
* the first entry in need of processing. The index provided is the
* first *unfilled* event in the ring (following the last filled one).
*
* Events are sequential within the event ring, and transactions are
- * sequential within the transaction pool.
+ * sequential within the transaction array.
*
* Note that @index always refers to an element *within* the event ring.
*/
-static void gsi_evt_ring_rx_update(struct gsi_evt_ring *evt_ring, u32 index)
+static void gsi_evt_ring_update(struct gsi *gsi, u32 evt_ring_id, u32 index)
{
- struct gsi_channel *channel = evt_ring->channel;
+ struct gsi_evt_ring *evt_ring = &gsi->evt_ring[evt_ring_id];
struct gsi_ring *ring = &evt_ring->ring;
- struct gsi_trans_info *trans_info;
struct gsi_event *event_done;
struct gsi_event *event;
- struct gsi_trans *trans;
- u32 trans_count = 0;
- u32 byte_count = 0;
u32 event_avail;
u32 old_index;
- trans_info = &channel->trans_info;
-
- /* We'll start with the oldest un-processed event. RX channels
- * replenish receive buffers in single-TRE transactions, so we
- * can just map that event to its transaction. Transactions
- * associated with completion events are consecutive.
+ /* Starting with the oldest un-processed event, determine which
+ * transaction (and which channel) is associated with the event.
+ * For RX channels, update each completed transaction with the
+ * number of bytes that were actually received. For TX channels
+ * associated with a network device, report to the network stack
+ * the number of transfers and bytes this completion represents.
*/
old_index = ring->index;
event = gsi_ring_virt(ring, old_index);
- trans = gsi_event_trans(channel, event);
/* Compute the number of events to process before we wrap,
* and determine when we'll be done processing events.
@@ -1389,21 +1396,28 @@ static void gsi_evt_ring_rx_update(struct gsi_evt_ring *evt_ring, u32 index)
event_avail = ring->count - old_index % ring->count;
event_done = gsi_ring_virt(ring, index);
do {
- trans->len = __le16_to_cpu(event->len);
- byte_count += trans->len;
- trans_count++;
+ struct gsi_trans *trans;
+
+ trans = gsi_event_trans(gsi, event);
+ if (!trans)
+ return;
+
+ if (trans->direction == DMA_FROM_DEVICE)
+ trans->len = __le16_to_cpu(event->len);
+ else
+ gsi_trans_tx_completed(trans);
+
+ gsi_trans_move_complete(trans);
/* Move on to the next event and transaction */
if (--event_avail)
event++;
else
event = gsi_ring_virt(ring, 0);
- trans = gsi_trans_pool_next(&trans_info->pool, trans);
} while (event != event_done);
- /* We record RX bytes when they are received */
- channel->byte_count += byte_count;
- channel->trans_count += trans_count;
+ /* Tell the hardware we've handled these events */
+ gsi_evt_ring_doorbell(gsi, evt_ring_id, index);
}
/* Initialize a ring, including allocating DMA memory for its entries */
@@ -1423,6 +1437,7 @@ static int gsi_ring_alloc(struct gsi *gsi, struct gsi_ring *ring, u32 count)
ring->addr = addr;
ring->count = count;
+ ring->index = 0;
return 0;
}
@@ -1493,22 +1508,16 @@ static struct gsi_trans *gsi_channel_update(struct gsi_channel *channel)
return NULL;
/* Get the transaction for the latest completed event. */
- trans = gsi_event_trans(channel, gsi_ring_virt(ring, index - 1));
+ trans = gsi_event_trans(gsi, gsi_ring_virt(ring, index - 1));
+ if (!trans)
+ return NULL;
/* For RX channels, update each completed transaction with the number
* of bytes that were actually received. For TX channels, report
* the number of transactions and bytes this completion represents
* up the network stack.
*/
- if (channel->toward_ipa)
- gsi_channel_tx_update(channel, trans);
- else
- gsi_evt_ring_rx_update(evt_ring, index);
-
- gsi_trans_move_complete(trans);
-
- /* Tell the hardware we've handled these events */
- gsi_evt_ring_doorbell(gsi, evt_ring_id, index);
+ gsi_evt_ring_update(gsi, evt_ring_id, index);
return gsi_channel_trans_complete(channel);
}
@@ -2001,9 +2010,10 @@ static void gsi_channel_evt_ring_exit(struct gsi_channel *channel)
gsi_evt_ring_id_free(gsi, evt_ring_id);
}
-static bool gsi_channel_data_valid(struct gsi *gsi,
+static bool gsi_channel_data_valid(struct gsi *gsi, bool command,
const struct ipa_gsi_endpoint_data *data)
{
+ const struct gsi_channel_data *channel_data;
u32 channel_id = data->channel_id;
struct device *dev = gsi->dev;
@@ -2019,10 +2029,24 @@ static bool gsi_channel_data_valid(struct gsi *gsi,
return false;
}
- if (!data->channel.tlv_count ||
- data->channel.tlv_count > GSI_TLV_MAX) {
+ if (command && !data->toward_ipa) {
+ dev_err(dev, "command channel %u is not TX\n", channel_id);
+ return false;
+ }
+
+ channel_data = &data->channel;
+
+ if (!channel_data->tlv_count ||
+ channel_data->tlv_count > GSI_TLV_MAX) {
dev_err(dev, "channel %u bad tlv_count %u; must be 1..%u\n",
- channel_id, data->channel.tlv_count, GSI_TLV_MAX);
+ channel_id, channel_data->tlv_count, GSI_TLV_MAX);
+ return false;
+ }
+
+ if (command && IPA_COMMAND_TRANS_TRE_MAX > channel_data->tlv_count) {
+ dev_err(dev, "command TRE max too big for channel %u (%u > %u)\n",
+ channel_id, IPA_COMMAND_TRANS_TRE_MAX,
+ channel_data->tlv_count);
return false;
}
@@ -2031,22 +2055,22 @@ static bool gsi_channel_data_valid(struct gsi *gsi,
* gsi_channel_tre_max() is computed, tre_count has to be almost
* twice the TLV FIFO size to satisfy this requirement.
*/
- if (data->channel.tre_count < 2 * data->channel.tlv_count - 1) {
+ if (channel_data->tre_count < 2 * channel_data->tlv_count - 1) {
dev_err(dev, "channel %u TLV count %u exceeds TRE count %u\n",
- channel_id, data->channel.tlv_count,
- data->channel.tre_count);
+ channel_id, channel_data->tlv_count,
+ channel_data->tre_count);
return false;
}
- if (!is_power_of_2(data->channel.tre_count)) {
+ if (!is_power_of_2(channel_data->tre_count)) {
dev_err(dev, "channel %u bad tre_count %u; not power of 2\n",
- channel_id, data->channel.tre_count);
+ channel_id, channel_data->tre_count);
return false;
}
- if (!is_power_of_2(data->channel.event_count)) {
+ if (!is_power_of_2(channel_data->event_count)) {
dev_err(dev, "channel %u bad event_count %u; not power of 2\n",
- channel_id, data->channel.event_count);
+ channel_id, channel_data->event_count);
return false;
}
@@ -2062,7 +2086,7 @@ static int gsi_channel_init_one(struct gsi *gsi,
u32 tre_count;
int ret;
- if (!gsi_channel_data_valid(gsi, data))
+ if (!gsi_channel_data_valid(gsi, command, data))
return -EINVAL;
/* Worst case we need an event for every outstanding TRE */
@@ -2080,7 +2104,7 @@ static int gsi_channel_init_one(struct gsi *gsi,
channel->gsi = gsi;
channel->toward_ipa = data->toward_ipa;
channel->command = command;
- channel->tlv_count = data->channel.tlv_count;
+ channel->trans_tre_max = data->channel.tlv_count;
channel->tre_count = tre_count;
channel->event_count = data->channel.event_count;
@@ -2295,13 +2319,5 @@ u32 gsi_channel_tre_max(struct gsi *gsi, u32 channel_id)
struct gsi_channel *channel = &gsi->channel[channel_id];
/* Hardware limit is channel->tre_count - 1 */
- return channel->tre_count - (channel->tlv_count - 1);
-}
-
-/* Returns the maximum number of TREs in a single transaction for a channel */
-u32 gsi_channel_trans_tre_max(struct gsi *gsi, u32 channel_id)
-{
- struct gsi_channel *channel = &gsi->channel[channel_id];
-
- return channel->tlv_count;
+ return channel->tre_count - (channel->trans_tre_max - 1);
}
diff --git a/drivers/net/ipa/gsi.h b/drivers/net/ipa/gsi.h
index 5d66116b46b0..23de5f67374c 100644
--- a/drivers/net/ipa/gsi.h
+++ b/drivers/net/ipa/gsi.h
@@ -48,12 +48,13 @@ struct gsi_ring {
*
* A channel ring consists of TRE entries filled by the AP and passed
* to the hardware for processing. For a channel ring, the ring index
- * identifies the next unused entry to be filled by the AP.
+ * identifies the next unused entry to be filled by the AP. In this
+ * case the initial value is assumed by hardware to be 0.
*
* An event ring consists of event structures filled by the hardware
* and passed to the AP. For event rings, the ring index identifies
* the next ring entry that is not known to have been filled by the
- * hardware.
+ * hardware. The initial value used is arbitrary (so we use 0).
*/
u32 index;
};
@@ -82,13 +83,15 @@ struct gsi_trans_pool {
struct gsi_trans_info {
atomic_t tre_avail; /* TREs available for allocation */
struct gsi_trans_pool pool; /* transaction pool */
+ struct gsi_trans **map; /* TRE -> transaction map */
+
struct gsi_trans_pool sg_pool; /* scatterlist pool */
struct gsi_trans_pool cmd_pool; /* command payload DMA pool */
- struct gsi_trans **map; /* TRE -> transaction map */
spinlock_t spinlock; /* protects updates to the lists */
struct list_head alloc; /* allocated, not committed */
- struct list_head pending; /* committed, awaiting completion */
+ struct list_head committed; /* committed, awaiting doorbell */
+ struct list_head pending; /* pending, awaiting completion */
struct list_head complete; /* completed, awaiting poll */
struct list_head polled; /* returned by gsi_channel_poll_one() */
};
@@ -110,16 +113,16 @@ struct gsi_channel {
bool toward_ipa;
bool command; /* AP command TX channel or not */
- u8 tlv_count; /* # entries in TLV FIFO */
+ u8 trans_tre_max; /* max TREs in a transaction */
u16 tre_count;
u16 event_count;
struct gsi_ring tre_ring;
u32 evt_ring_id;
+ /* The following counts are used only for TX endpoints */
u64 byte_count; /* total # bytes transferred */
u64 trans_count; /* total # transactions */
- /* The following counts are used only for TX endpoints */
u64 queued_byte_count; /* last reported queued byte count */
u64 queued_trans_count; /* ...and queued trans count */
u64 compl_byte_count; /* last reported completed byte count */
@@ -184,20 +187,11 @@ void gsi_teardown(struct gsi *gsi);
* @gsi: GSI pointer
* @channel_id: Channel whose limit is to be returned
*
- * Return: The maximum number of TREs oustanding on the channel
+ * Return: The maximum number of TREs outstanding on the channel
*/
u32 gsi_channel_tre_max(struct gsi *gsi, u32 channel_id);
/**
- * gsi_channel_trans_tre_max() - Maximum TREs in a single transaction
- * @gsi: GSI pointer
- * @channel_id: Channel whose limit is to be returned
- *
- * Return: The maximum TRE count per transaction on the channel
- */
-u32 gsi_channel_trans_tre_max(struct gsi *gsi, u32 channel_id);
-
-/**
* gsi_channel_start() - Start an allocated GSI channel
* @gsi: GSI pointer
* @channel_id: Channel to start
diff --git a/drivers/net/ipa/gsi_private.h b/drivers/net/ipa/gsi_private.h
index ea333a244cf5..0b2516fa21b5 100644
--- a/drivers/net/ipa/gsi_private.h
+++ b/drivers/net/ipa/gsi_private.h
@@ -16,9 +16,6 @@ struct gsi_channel;
#define GSI_RING_ELEMENT_SIZE 16 /* bytes; must be a power of 2 */
-/* Return the entry that follows one provided in a transaction pool */
-void *gsi_trans_pool_next(struct gsi_trans_pool *pool, void *element);
-
/**
* gsi_trans_move_complete() - Mark a GSI transaction completed
* @trans: Transaction to commit
@@ -105,14 +102,21 @@ void gsi_channel_doorbell(struct gsi_channel *channel);
void *gsi_ring_virt(struct gsi_ring *ring, u32 index);
/**
- * gsi_channel_tx_queued() - Report the number of bytes queued to hardware
- * @channel: Channel whose bytes have been queued
+ * gsi_trans_tx_committed() - Record bytes committed for transmit
+ * @trans: TX endpoint transaction being committed
+ *
+ * Report that a TX transaction has been committed. It updates some
+ * statistics used to manage transmit rates.
+ */
+void gsi_trans_tx_committed(struct gsi_trans *trans);
+
+/**
+ * gsi_trans_tx_queued() - Report a queued TX channel transaction
+ * @trans: Transaction being passed to hardware
*
- * This arranges for the the number of transactions and bytes for
- * transfer that have been queued to hardware to be reported. It
- * passes this information up the network stack so it can be used to
- * throttle transmissions.
+ * Report to the network stack that a TX transaction is being supplied
+ * to the hardware.
*/
-void gsi_channel_tx_queued(struct gsi_channel *channel);
+void gsi_trans_tx_queued(struct gsi_trans *trans);
#endif /* _GSI_PRIVATE_H_ */
diff --git a/drivers/net/ipa/gsi_trans.c b/drivers/net/ipa/gsi_trans.c
index 55f8fe7d2668..18e7e8c405be 100644
--- a/drivers/net/ipa/gsi_trans.c
+++ b/drivers/net/ipa/gsi_trans.c
@@ -214,26 +214,14 @@ void *gsi_trans_pool_alloc_dma(struct gsi_trans_pool *pool, dma_addr_t *addr)
return pool->base + offset;
}
-/* Return the pool element that immediately follows the one given.
- * This only works done if elements are allocated one at a time.
- */
-void *gsi_trans_pool_next(struct gsi_trans_pool *pool, void *element)
+/* Map a TRE ring entry index to the transaction it is associated with */
+static void gsi_trans_map(struct gsi_trans *trans, u32 index)
{
- void *end = pool->base + pool->count * pool->size;
-
- WARN_ON(element < pool->base);
- WARN_ON(element >= end);
- WARN_ON(pool->max_alloc != 1);
-
- element += pool->size;
+ struct gsi_channel *channel = &trans->gsi->channel[trans->channel_id];
- return element < end ? element : pool->base;
-}
+ /* The completion event will indicate the last TRE used */
+ index += trans->used_count - 1;
-/* Map a given ring entry index to the transaction associated with it */
-static void gsi_channel_trans_map(struct gsi_channel *channel, u32 index,
- struct gsi_trans *trans)
-{
/* Note: index *must* be used modulo the ring count here */
channel->trans_info.map[index % channel->tre_ring.count] = trans;
}
@@ -253,15 +241,31 @@ struct gsi_trans *gsi_channel_trans_complete(struct gsi_channel *channel)
struct gsi_trans, links);
}
-/* Move a transaction from the allocated list to the pending list */
+/* Move a transaction from the allocated list to the committed list */
+static void gsi_trans_move_committed(struct gsi_trans *trans)
+{
+ struct gsi_channel *channel = &trans->gsi->channel[trans->channel_id];
+ struct gsi_trans_info *trans_info = &channel->trans_info;
+
+ spin_lock_bh(&trans_info->spinlock);
+
+ list_move_tail(&trans->links, &trans_info->committed);
+
+ spin_unlock_bh(&trans_info->spinlock);
+}
+
+/* Move transactions from the committed list to the pending list */
static void gsi_trans_move_pending(struct gsi_trans *trans)
{
struct gsi_channel *channel = &trans->gsi->channel[trans->channel_id];
struct gsi_trans_info *trans_info = &channel->trans_info;
+ struct list_head list;
spin_lock_bh(&trans_info->spinlock);
- list_move_tail(&trans->links, &trans_info->pending);
+ /* Move this transaction and all predecessors to the pending list */
+ list_cut_position(&list, &trans_info->committed, &trans->links);
+ list_splice_tail(&list, &trans_info->pending);
spin_unlock_bh(&trans_info->spinlock);
}
@@ -340,7 +344,7 @@ struct gsi_trans *gsi_channel_trans_alloc(struct gsi *gsi, u32 channel_id,
struct gsi_trans_info *trans_info;
struct gsi_trans *trans;
- if (WARN_ON(tre_count > gsi_channel_trans_tre_max(gsi, channel_id)))
+ if (WARN_ON(tre_count > channel->trans_tre_max))
return NULL;
trans_info = &channel->trans_info;
@@ -351,14 +355,14 @@ struct gsi_trans *gsi_channel_trans_alloc(struct gsi *gsi, u32 channel_id,
if (!gsi_trans_tre_reserve(trans_info, tre_count))
return NULL;
- /* Allocate and initialize non-zero fields in the the transaction */
+ /* Allocate and initialize non-zero fields in the transaction */
trans = gsi_trans_pool_alloc(&trans_info->pool, 1);
trans->gsi = gsi;
trans->channel_id = channel_id;
- trans->tre_count = tre_count;
+ trans->rsvd_count = tre_count;
init_completion(&trans->completion);
- /* Allocate the scatterlist and (if requested) info entries. */
+ /* Allocate the scatterlist */
trans->sgl = gsi_trans_pool_alloc(&trans_info->sg_pool, tre_count);
sg_init_marker(trans->sgl, tre_count);
@@ -400,22 +404,23 @@ void gsi_trans_free(struct gsi_trans *trans)
if (!last)
return;
- ipa_gsi_trans_release(trans);
+ if (trans->used_count)
+ ipa_gsi_trans_release(trans);
/* Releasing the reserved TREs implicitly frees the sgl[] and
* (if present) info[] arrays, plus the transaction itself.
*/
- gsi_trans_tre_release(trans_info, trans->tre_count);
+ gsi_trans_tre_release(trans_info, trans->rsvd_count);
}
/* Add an immediate command to a transaction */
void gsi_trans_cmd_add(struct gsi_trans *trans, void *buf, u32 size,
dma_addr_t addr, enum ipa_cmd_opcode opcode)
{
- u32 which = trans->used++;
+ u32 which = trans->used_count++;
struct scatterlist *sg;
- WARN_ON(which >= trans->tre_count);
+ WARN_ON(which >= trans->rsvd_count);
/* Commands are quite different from data transfer requests.
* Their payloads come from a pool whose memory is allocated
@@ -446,9 +451,9 @@ int gsi_trans_page_add(struct gsi_trans *trans, struct page *page, u32 size,
struct scatterlist *sg = &trans->sgl[0];
int ret;
- if (WARN_ON(trans->tre_count != 1))
+ if (WARN_ON(trans->rsvd_count != 1))
return -EINVAL;
- if (WARN_ON(trans->used))
+ if (WARN_ON(trans->used_count))
return -EINVAL;
sg_set_page(sg, page, size, offset);
@@ -456,7 +461,7 @@ int gsi_trans_page_add(struct gsi_trans *trans, struct page *page, u32 size,
if (!ret)
return -ENOMEM;
- trans->used++; /* Transaction now owns the (DMA mapped) page */
+ trans->used_count++; /* Transaction now owns the (DMA mapped) page */
return 0;
}
@@ -465,25 +470,26 @@ int gsi_trans_page_add(struct gsi_trans *trans, struct page *page, u32 size,
int gsi_trans_skb_add(struct gsi_trans *trans, struct sk_buff *skb)
{
struct scatterlist *sg = &trans->sgl[0];
- u32 used;
+ u32 used_count;
int ret;
- if (WARN_ON(trans->tre_count != 1))
+ if (WARN_ON(trans->rsvd_count != 1))
return -EINVAL;
- if (WARN_ON(trans->used))
+ if (WARN_ON(trans->used_count))
return -EINVAL;
/* skb->len will not be 0 (checked early) */
ret = skb_to_sgvec(skb, sg, 0, skb->len);
if (ret < 0)
return ret;
- used = ret;
+ used_count = ret;
- ret = dma_map_sg(trans->gsi->dev, sg, used, trans->direction);
+ ret = dma_map_sg(trans->gsi->dev, sg, used_count, trans->direction);
if (!ret)
return -ENOMEM;
- trans->used += used; /* Transaction now owns the (DMA mapped) skb */
+ /* Transaction now owns the (DMA mapped) skb */
+ trans->used_count += used_count;
return 0;
}
@@ -549,7 +555,7 @@ static void gsi_trans_tre_fill(struct gsi_tre *dest_tre, dma_addr_t addr,
static void __gsi_trans_commit(struct gsi_trans *trans, bool ring_db)
{
struct gsi_channel *channel = &trans->gsi->channel[trans->channel_id];
- struct gsi_ring *ring = &channel->tre_ring;
+ struct gsi_ring *tre_ring = &channel->tre_ring;
enum ipa_cmd_opcode opcode = IPA_CMD_NONE;
bool bei = channel->toward_ipa;
struct gsi_tre *dest_tre;
@@ -559,7 +565,7 @@ static void __gsi_trans_commit(struct gsi_trans *trans, bool ring_db)
u32 avail;
u32 i;
- WARN_ON(!trans->used);
+ WARN_ON(!trans->used_count);
/* Consume the entries. If we cross the end of the ring while
* filling them we'll switch to the beginning to finish.
@@ -567,43 +573,39 @@ static void __gsi_trans_commit(struct gsi_trans *trans, bool ring_db)
* transfer request, whose opcode is IPA_CMD_NONE.
*/
cmd_opcode = channel->command ? &trans->cmd_opcode[0] : NULL;
- avail = ring->count - ring->index % ring->count;
- dest_tre = gsi_ring_virt(ring, ring->index);
- for_each_sg(trans->sgl, sg, trans->used, i) {
- bool last_tre = i == trans->used - 1;
+ avail = tre_ring->count - tre_ring->index % tre_ring->count;
+ dest_tre = gsi_ring_virt(tre_ring, tre_ring->index);
+ for_each_sg(trans->sgl, sg, trans->used_count, i) {
+ bool last_tre = i == trans->used_count - 1;
dma_addr_t addr = sg_dma_address(sg);
u32 len = sg_dma_len(sg);
byte_count += len;
if (!avail--)
- dest_tre = gsi_ring_virt(ring, 0);
+ dest_tre = gsi_ring_virt(tre_ring, 0);
if (cmd_opcode)
opcode = *cmd_opcode++;
gsi_trans_tre_fill(dest_tre, addr, len, last_tre, bei, opcode);
dest_tre++;
}
- ring->index += trans->used;
-
- if (channel->toward_ipa) {
- /* We record TX bytes when they are sent */
- trans->len = byte_count;
- trans->trans_count = channel->trans_count;
- trans->byte_count = channel->byte_count;
- channel->trans_count++;
- channel->byte_count += byte_count;
- }
+ /* Associate the TRE with the transaction */
+ gsi_trans_map(trans, tre_ring->index);
- /* Associate the last TRE with the transaction */
- gsi_channel_trans_map(channel, ring->index - 1, trans);
+ tre_ring->index += trans->used_count;
- gsi_trans_move_pending(trans);
+ trans->len = byte_count;
+ if (channel->toward_ipa)
+ gsi_trans_tx_committed(trans);
+
+ gsi_trans_move_committed(trans);
/* Ring doorbell if requested, or if all TREs are allocated */
if (ring_db || !atomic_read(&channel->trans_info.tre_avail)) {
/* Report what we're handing off to hardware for TX channels */
if (channel->toward_ipa)
- gsi_channel_tx_queued(channel);
+ gsi_trans_tx_queued(trans);
+ gsi_trans_move_pending(trans);
gsi_channel_doorbell(channel);
}
}
@@ -611,7 +613,7 @@ static void __gsi_trans_commit(struct gsi_trans *trans, bool ring_db)
/* Commit a GSI transaction */
void gsi_trans_commit(struct gsi_trans *trans, bool ring_db)
{
- if (trans->used)
+ if (trans->used_count)
__gsi_trans_commit(trans, ring_db);
else
gsi_trans_free(trans);
@@ -620,7 +622,7 @@ void gsi_trans_commit(struct gsi_trans *trans, bool ring_db)
/* Commit a GSI transaction and wait for it to complete */
void gsi_trans_commit_wait(struct gsi_trans *trans)
{
- if (!trans->used)
+ if (!trans->used_count)
goto out_trans_free;
refcount_inc(&trans->refcount);
@@ -638,7 +640,7 @@ void gsi_trans_complete(struct gsi_trans *trans)
{
/* If the entire SGL was mapped when added, unmap it now */
if (trans->direction != DMA_NONE)
- dma_unmap_sg(trans->gsi->dev, trans->sgl, trans->used,
+ dma_unmap_sg(trans->gsi->dev, trans->sgl, trans->used_count,
trans->direction);
ipa_gsi_trans_complete(trans);
@@ -675,7 +677,7 @@ void gsi_channel_trans_cancel_pending(struct gsi_channel *channel)
int gsi_trans_read_byte(struct gsi *gsi, u32 channel_id, dma_addr_t addr)
{
struct gsi_channel *channel = &gsi->channel[channel_id];
- struct gsi_ring *ring = &channel->tre_ring;
+ struct gsi_ring *tre_ring = &channel->tre_ring;
struct gsi_trans_info *trans_info;
struct gsi_tre *dest_tre;
@@ -685,12 +687,12 @@ int gsi_trans_read_byte(struct gsi *gsi, u32 channel_id, dma_addr_t addr)
if (!gsi_trans_tre_reserve(trans_info, 1))
return -EBUSY;
- /* Now fill the the reserved TRE and tell the hardware */
+ /* Now fill the reserved TRE and tell the hardware */
- dest_tre = gsi_ring_virt(ring, ring->index);
+ dest_tre = gsi_ring_virt(tre_ring, tre_ring->index);
gsi_trans_tre_fill(dest_tre, addr, 1, true, false, IPA_CMD_NONE);
- ring->index++;
+ tre_ring->index++;
gsi_channel_doorbell(channel);
return 0;
@@ -708,6 +710,7 @@ void gsi_trans_read_byte_done(struct gsi *gsi, u32 channel_id)
int gsi_channel_trans_init(struct gsi *gsi, u32 channel_id)
{
struct gsi_channel *channel = &gsi->channel[channel_id];
+ u32 tre_count = channel->tre_count;
struct gsi_trans_info *trans_info;
u32 tre_max;
int ret;
@@ -715,68 +718,66 @@ int gsi_channel_trans_init(struct gsi *gsi, u32 channel_id)
/* Ensure the size of a channel element is what's expected */
BUILD_BUG_ON(sizeof(struct gsi_tre) != GSI_RING_ELEMENT_SIZE);
- /* The map array is used to determine what transaction is associated
- * with a TRE that the hardware reports has completed. We need one
- * map entry per TRE.
- */
trans_info = &channel->trans_info;
- trans_info->map = kcalloc(channel->tre_count, sizeof(*trans_info->map),
- GFP_KERNEL);
- if (!trans_info->map)
- return -ENOMEM;
- /* We can't use more TREs than there are available in the ring.
- * This limits the number of transactions that can be oustanding.
- * Worst case is one TRE per transaction (but we actually limit
- * it to something a little less than that). We allocate resources
- * for transactions (including transaction structures) based on
- * this maximum number.
+ /* The tre_avail field is what ultimately limits the number of
+ * outstanding transactions and their resources. A transaction
+ * allocation succeeds only if the TREs available are sufficient
+ * for what the transaction might need.
*/
tre_max = gsi_channel_tre_max(channel->gsi, channel_id);
+ atomic_set(&trans_info->tre_avail, tre_max);
- /* Transactions are allocated one at a time. */
+ /* We can't use more TREs than the number available in the ring.
+ * This limits the number of transactions that can be outstanding.
+ * Worst case is one TRE per transaction (but we actually limit
+ * it to something a little less than that). By allocating a
+ * power-of-two number of transactions we can use an index
+ * modulo that number to determine the next one that's free.
+ * Transactions are allocated one at a time.
+ */
ret = gsi_trans_pool_init(&trans_info->pool, sizeof(struct gsi_trans),
tre_max, 1);
if (ret)
- goto err_kfree;
+ return -ENOMEM;
+
+ /* A completion event contains a pointer to the TRE that caused
+ * the event (which will be the last one used by the transaction).
+ * Each entry in this map records the transaction associated
+ * with a corresponding completed TRE.
+ */
+ trans_info->map = kcalloc(tre_count, sizeof(*trans_info->map),
+ GFP_KERNEL);
+ if (!trans_info->map) {
+ ret = -ENOMEM;
+ goto err_trans_free;
+ }
/* A transaction uses a scatterlist array to represent the data
* transfers implemented by the transaction. Each scatterlist
* element is used to fill a single TRE when the transaction is
* committed. So we need as many scatterlist elements as the
* maximum number of TREs that can be outstanding.
- *
- * All TREs in a transaction must fit within the channel's TLV FIFO.
- * A transaction on a channel can allocate as many TREs as that but
- * no more.
*/
ret = gsi_trans_pool_init(&trans_info->sg_pool,
sizeof(struct scatterlist),
- tre_max, channel->tlv_count);
+ tre_max, channel->trans_tre_max);
if (ret)
- goto err_trans_pool_exit;
-
- /* Finally, the tre_avail field is what ultimately limits the number
- * of outstanding transactions and their resources. A transaction
- * allocation succeeds only if the TREs available are sufficient for
- * what the transaction might need. Transaction resource pools are
- * sized based on the maximum number of outstanding TREs, so there
- * will always be resources available if there are TREs available.
- */
- atomic_set(&trans_info->tre_avail, tre_max);
+ goto err_map_free;
spin_lock_init(&trans_info->spinlock);
INIT_LIST_HEAD(&trans_info->alloc);
+ INIT_LIST_HEAD(&trans_info->committed);
INIT_LIST_HEAD(&trans_info->pending);
INIT_LIST_HEAD(&trans_info->complete);
INIT_LIST_HEAD(&trans_info->polled);
return 0;
-err_trans_pool_exit:
- gsi_trans_pool_exit(&trans_info->pool);
-err_kfree:
+err_map_free:
kfree(trans_info->map);
+err_trans_free:
+ gsi_trans_pool_exit(&trans_info->pool);
dev_err(gsi->dev, "error %d initializing channel %u transactions\n",
ret, channel_id);
diff --git a/drivers/net/ipa/gsi_trans.h b/drivers/net/ipa/gsi_trans.h
index 020c3b32de1d..7084507830c2 100644
--- a/drivers/net/ipa/gsi_trans.h
+++ b/drivers/net/ipa/gsi_trans.h
@@ -33,9 +33,9 @@ struct gsi_trans_pool;
* @gsi: GSI pointer
* @channel_id: Channel number transaction is associated with
* @cancelled: If set by the core code, transaction was cancelled
- * @tre_count: Number of TREs reserved for this transaction
- * @used: Number of TREs *used* (could be less than tre_count)
- * @len: Total # of transfer bytes represented in sgl[] (set by core)
+ * @rsvd_count: Number of TREs reserved for this transaction
+ * @used_count: Number of TREs *used* (could be less than rsvd_count)
+ * @len: Number of bytes sent or received by the transaction
* @data: Preserved but not touched by the core transaction code
* @cmd_opcode: Array of command opcodes (command channel only)
* @sgl: An array of scatter/gather entries managed by core code
@@ -45,8 +45,9 @@ struct gsi_trans_pool;
* @byte_count: TX channel byte count recorded when transaction committed
* @trans_count: Channel transaction count when committed (for BQL accounting)
*
- * The size used for some fields in this structure were chosen to ensure
- * the full structure size is no larger than 128 bytes.
+ * The @len field is set when the transaction is committed. For RX
+ * transactions it is updated later to reflect the actual number of bytes
+ * received.
*/
struct gsi_trans {
struct list_head links; /* gsi_channel lists */
@@ -56,8 +57,8 @@ struct gsi_trans {
bool cancelled; /* true if transaction was cancelled */
- u8 tre_count; /* # TREs requested */
- u8 used; /* # entries used in sgl[] */
+ u8 rsvd_count; /* # TREs requested */
+ u8 used_count; /* # entries used in sgl[] */
u32 len; /* total # bytes across sgl[] */
union {
diff --git a/drivers/net/ipa/ipa_cmd.c b/drivers/net/ipa/ipa_cmd.c
index e58cd4478fd3..6dea40259b60 100644
--- a/drivers/net/ipa/ipa_cmd.c
+++ b/drivers/net/ipa/ipa_cmd.c
@@ -353,13 +353,13 @@ int ipa_cmd_pool_init(struct gsi_channel *channel, u32 tre_max)
/* This is as good a place as any to validate build constants */
ipa_cmd_validate_build();
- /* Even though command payloads are allocated one at a time,
- * a single transaction can require up to tlv_count of them,
- * so we treat them as if that many can be allocated at once.
+ /* Command payloads are allocated one at a time, but a single
+ * transaction can require up to the maximum supported by the
+ * channel; treat them as if they were allocated all at once.
*/
return gsi_trans_pool_init_dma(dev, &trans_info->cmd_pool,
sizeof(union ipa_cmd_payload),
- tre_max, channel->tlv_count);
+ tre_max, channel->trans_tre_max);
}
void ipa_cmd_pool_exit(struct gsi_channel *channel)
diff --git a/drivers/net/ipa/ipa_endpoint.c b/drivers/net/ipa/ipa_endpoint.c
index d3b3255ac3d1..66d2bfdf9e42 100644
--- a/drivers/net/ipa/ipa_endpoint.c
+++ b/drivers/net/ipa/ipa_endpoint.c
@@ -1020,7 +1020,7 @@ int ipa_endpoint_skb_tx(struct ipa_endpoint *endpoint, struct sk_buff *skb)
* If not, see if we can linearize it before giving up.
*/
nr_frags = skb_shinfo(skb)->nr_frags;
- if (1 + nr_frags > endpoint->trans_tre_max) {
+ if (nr_frags > endpoint->skb_frag_max) {
if (skb_linearize(skb))
return -E2BIG;
nr_frags = 0;
@@ -1368,18 +1368,14 @@ static void ipa_endpoint_status_parse(struct ipa_endpoint *endpoint,
}
}
-/* Complete a TX transaction, command or from ipa_endpoint_skb_tx() */
-static void ipa_endpoint_tx_complete(struct ipa_endpoint *endpoint,
- struct gsi_trans *trans)
-{
-}
-
-/* Complete transaction initiated in ipa_endpoint_replenish_one() */
-static void ipa_endpoint_rx_complete(struct ipa_endpoint *endpoint,
- struct gsi_trans *trans)
+void ipa_endpoint_trans_complete(struct ipa_endpoint *endpoint,
+ struct gsi_trans *trans)
{
struct page *page;
+ if (endpoint->toward_ipa)
+ return;
+
if (trans->cancelled)
goto done;
@@ -1393,15 +1389,6 @@ done:
ipa_endpoint_replenish(endpoint);
}
-void ipa_endpoint_trans_complete(struct ipa_endpoint *endpoint,
- struct gsi_trans *trans)
-{
- if (endpoint->toward_ipa)
- ipa_endpoint_tx_complete(endpoint, trans);
- else
- ipa_endpoint_rx_complete(endpoint, trans);
-}
-
void ipa_endpoint_trans_release(struct ipa_endpoint *endpoint,
struct gsi_trans *trans)
{
@@ -1721,7 +1708,7 @@ static void ipa_endpoint_setup_one(struct ipa_endpoint *endpoint)
if (endpoint->ee_id != GSI_EE_AP)
return;
- endpoint->trans_tre_max = gsi_channel_trans_tre_max(gsi, channel_id);
+ endpoint->skb_frag_max = gsi->channel[channel_id].trans_tre_max - 1;
if (!endpoint->toward_ipa) {
/* RX transactions require a single TRE, so the maximum
* backlog is the same as the maximum outstanding TREs.
diff --git a/drivers/net/ipa/ipa_endpoint.h b/drivers/net/ipa/ipa_endpoint.h
index 01790c60bee8..28e0a7386fd7 100644
--- a/drivers/net/ipa/ipa_endpoint.h
+++ b/drivers/net/ipa/ipa_endpoint.h
@@ -142,7 +142,7 @@ enum ipa_replenish_flag {
* @endpoint_id: IPA endpoint number
* @toward_ipa: Endpoint direction (true = TX, false = RX)
* @config: Default endpoint configuration
- * @trans_tre_max: Maximum number of TRE descriptors per transaction
+ * @skb_frag_max: Maximum allowed number of TX SKB fragments
* @evt_ring_id: GSI event ring used by the endpoint
* @netdev: Network device pointer, if endpoint uses one
* @replenish_flags: Replenishing state flags
@@ -157,7 +157,7 @@ struct ipa_endpoint {
bool toward_ipa;
struct ipa_endpoint_config config;
- u32 trans_tre_max;
+ u32 skb_frag_max; /* Used for netdev TX only */
u32 evt_ring_id;
/* Net device this endpoint is associated with, if any */
diff --git a/drivers/net/ipa/ipa_main.c b/drivers/net/ipa/ipa_main.c
index 3757ce3de2c5..32962d885acd 100644
--- a/drivers/net/ipa/ipa_main.c
+++ b/drivers/net/ipa/ipa_main.c
@@ -836,6 +836,8 @@ out_power_put:
kfree(ipa);
ipa_power_exit(power);
+ dev_info(dev, "IPA driver removed");
+
return 0;
}
@@ -851,6 +853,7 @@ static void ipa_shutdown(struct platform_device *pdev)
static const struct attribute_group *ipa_attribute_groups[] = {
&ipa_attribute_group,
&ipa_feature_attribute_group,
+ &ipa_endpoint_id_attribute_group,
&ipa_modem_attribute_group,
NULL,
};
diff --git a/drivers/net/ipa/ipa_sysfs.c b/drivers/net/ipa/ipa_sysfs.c
index ff61dbdd70d8..c0c8641cdd14 100644
--- a/drivers/net/ipa/ipa_sysfs.c
+++ b/drivers/net/ipa/ipa_sysfs.c
@@ -96,38 +96,71 @@ const struct attribute_group ipa_feature_attribute_group = {
.attrs = ipa_feature_attrs,
};
-static ssize_t
-ipa_endpoint_id_show(struct ipa *ipa, char *buf, enum ipa_endpoint_name name)
+static umode_t ipa_endpoint_id_is_visible(struct kobject *kobj,
+ struct attribute *attr, int n)
{
- u32 endpoint_id = ipa->name_map[name]->endpoint_id;
+ struct ipa *ipa = dev_get_drvdata(kobj_to_dev(kobj));
+ struct device_attribute *dev_attr;
+ struct dev_ext_attribute *ea;
+ bool visible;
+
+ /* An endpoint id attribute is only visible if it's defined */
+ dev_attr = container_of(attr, struct device_attribute, attr);
+ ea = container_of(dev_attr, struct dev_ext_attribute, attr);
- return scnprintf(buf, PAGE_SIZE, "%u\n", endpoint_id);
+ visible = !!ipa->name_map[(enum ipa_endpoint_name)(uintptr_t)ea->var];
+
+ return visible ? attr->mode : 0;
}
-static ssize_t rx_endpoint_id_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t endpoint_id_attr_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct ipa *ipa = dev_get_drvdata(dev);
+ struct ipa_endpoint *endpoint;
+ struct dev_ext_attribute *ea;
+
+ ea = container_of(attr, struct dev_ext_attribute, attr);
+ endpoint = ipa->name_map[(enum ipa_endpoint_name)(uintptr_t)ea->var];
- return ipa_endpoint_id_show(ipa, buf, IPA_ENDPOINT_AP_MODEM_RX);
+ return sysfs_emit(buf, "%u\n", endpoint->endpoint_id);
}
-static DEVICE_ATTR_RO(rx_endpoint_id);
+#define ENDPOINT_ID_ATTR(_n, _endpoint_name) \
+ static struct dev_ext_attribute dev_attr_endpoint_id_ ## _n = { \
+ .attr = __ATTR(_n, 0444, endpoint_id_attr_show, NULL), \
+ .var = (void *)(_endpoint_name), \
+ }
-static ssize_t tx_endpoint_id_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct ipa *ipa = dev_get_drvdata(dev);
+ENDPOINT_ID_ATTR(modem_rx, IPA_ENDPOINT_AP_MODEM_RX);
+ENDPOINT_ID_ATTR(modem_tx, IPA_ENDPOINT_AP_MODEM_TX);
- return ipa_endpoint_id_show(ipa, buf, IPA_ENDPOINT_AP_MODEM_TX);
-}
+static struct attribute *ipa_endpoint_id_attrs[] = {
+ &dev_attr_endpoint_id_modem_rx.attr.attr,
+ &dev_attr_endpoint_id_modem_tx.attr.attr,
+ NULL
+};
-static DEVICE_ATTR_RO(tx_endpoint_id);
+const struct attribute_group ipa_endpoint_id_attribute_group = {
+ .name = "endpoint_id",
+ .is_visible = ipa_endpoint_id_is_visible,
+ .attrs = ipa_endpoint_id_attrs,
+};
+
+/* Reuse endpoint ID attributes for the legacy modem endpoint IDs */
+#define MODEM_ATTR(_n, _endpoint_name) \
+ static struct dev_ext_attribute dev_attr_modem_ ## _n = { \
+ .attr = __ATTR(_n, 0444, endpoint_id_attr_show, NULL), \
+ .var = (void *)(_endpoint_name), \
+ }
+
+MODEM_ATTR(rx_endpoint_id, IPA_ENDPOINT_AP_MODEM_RX);
+MODEM_ATTR(tx_endpoint_id, IPA_ENDPOINT_AP_MODEM_TX);
static struct attribute *ipa_modem_attrs[] = {
- &dev_attr_rx_endpoint_id.attr,
- &dev_attr_tx_endpoint_id.attr,
- NULL
+ &dev_attr_modem_rx_endpoint_id.attr.attr,
+ &dev_attr_modem_tx_endpoint_id.attr.attr,
+ NULL,
};
const struct attribute_group ipa_modem_attribute_group = {
diff --git a/drivers/net/ipa/ipa_sysfs.h b/drivers/net/ipa/ipa_sysfs.h
index b34e5650bf8c..4a3ffd1e4e3f 100644
--- a/drivers/net/ipa/ipa_sysfs.h
+++ b/drivers/net/ipa/ipa_sysfs.h
@@ -10,6 +10,7 @@ struct attribute_group;
extern const struct attribute_group ipa_attribute_group;
extern const struct attribute_group ipa_feature_attribute_group;
+extern const struct attribute_group ipa_endpoint_id_attribute_group;
extern const struct attribute_group ipa_modem_attribute_group;
#endif /* _IPA_SYSFS_H_ */