summaryrefslogtreecommitdiff
path: root/drivers/net/wireless/ath/ath12k/dp.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/wireless/ath/ath12k/dp.c')
-rw-r--r--drivers/net/wireless/ath/ath12k/dp.c83
1 files changed, 51 insertions, 32 deletions
diff --git a/drivers/net/wireless/ath/ath12k/dp.c b/drivers/net/wireless/ath/ath12k/dp.c
index 7843c76a82c1..61aa78d8bd8c 100644
--- a/drivers/net/wireless/ath/ath12k/dp.c
+++ b/drivers/net/wireless/ath/ath12k/dp.c
@@ -132,7 +132,9 @@ static int ath12k_dp_srng_find_ring_in_mask(int ring_num, const u8 *grp_mask)
static int ath12k_dp_srng_calculate_msi_group(struct ath12k_base *ab,
enum hal_ring_type type, int ring_num)
{
+ const struct ath12k_hal_tcl_to_wbm_rbm_map *map;
const u8 *grp_mask;
+ int i;
switch (type) {
case HAL_WBM2SW_RELEASE:
@@ -140,6 +142,14 @@ static int ath12k_dp_srng_calculate_msi_group(struct ath12k_base *ab,
grp_mask = &ab->hw_params->ring_mask->rx_wbm_rel[0];
ring_num = 0;
} else {
+ map = ab->hw_params->hal_ops->tcl_to_wbm_rbm_map;
+ for (i = 0; i < ab->hw_params->max_tx_ring; i++) {
+ if (ring_num == map[i].wbm_ring_num) {
+ ring_num = i;
+ break;
+ }
+ }
+
grp_mask = &ab->hw_params->ring_mask->tx[0];
}
break;
@@ -457,8 +467,6 @@ static void ath12k_dp_srng_common_cleanup(struct ath12k_base *ab)
ath12k_dp_srng_cleanup(ab, &dp->tx_ring[i].tcl_comp_ring);
ath12k_dp_srng_cleanup(ab, &dp->tx_ring[i].tcl_data_ring);
}
- ath12k_dp_srng_cleanup(ab, &dp->tcl_status_ring);
- ath12k_dp_srng_cleanup(ab, &dp->tcl_cmd_ring);
ath12k_dp_srng_cleanup(ab, &dp->wbm_desc_rel_ring);
}
@@ -479,20 +487,6 @@ static int ath12k_dp_srng_common_setup(struct ath12k_base *ab)
goto err;
}
- ret = ath12k_dp_srng_setup(ab, &dp->tcl_cmd_ring, HAL_TCL_CMD, 0, 0,
- DP_TCL_CMD_RING_SIZE);
- if (ret) {
- ath12k_warn(ab, "failed to set up tcl_cmd ring :%d\n", ret);
- goto err;
- }
-
- ret = ath12k_dp_srng_setup(ab, &dp->tcl_status_ring, HAL_TCL_STATUS,
- 0, 0, DP_TCL_STATUS_RING_SIZE);
- if (ret) {
- ath12k_warn(ab, "failed to set up tcl_status ring :%d\n", ret);
- goto err;
- }
-
for (i = 0; i < ab->hw_params->max_tx_ring; i++) {
map = ab->hw_params->hal_ops->tcl_to_wbm_rbm_map;
tx_comp_ring_num = map[i].wbm_ring_num;
@@ -616,6 +610,7 @@ static int ath12k_dp_scatter_idle_link_desc_setup(struct ath12k_base *ab,
int i;
int ret = 0;
u32 end_offset, cookie;
+ enum hal_rx_buf_return_buf_manager rbm = dp->idle_link_rbm;
n_entries_per_buf = HAL_WBM_IDLE_SCATTER_BUF_SIZE /
ath12k_hal_srng_get_entrysize(ab, HAL_WBM_IDLE_LINK);
@@ -646,7 +641,8 @@ static int ath12k_dp_scatter_idle_link_desc_setup(struct ath12k_base *ab,
paddr = link_desc_banks[i].paddr;
while (n_entries) {
cookie = DP_LINK_DESC_COOKIE_SET(n_entries, i);
- ath12k_hal_set_link_desc_addr(scatter_buf, cookie, paddr);
+ ath12k_hal_set_link_desc_addr(scatter_buf, cookie,
+ paddr, rbm);
n_entries--;
paddr += HAL_LINK_DESC_SIZE;
if (rem_entries) {
@@ -790,6 +786,7 @@ int ath12k_dp_link_desc_setup(struct ath12k_base *ab,
u32 paddr;
int i, ret;
u32 cookie;
+ enum hal_rx_buf_return_buf_manager rbm = ab->dp.idle_link_rbm;
tot_mem_sz = n_link_desc * HAL_LINK_DESC_SIZE;
tot_mem_sz += HAL_LINK_DESC_ALIGN;
@@ -850,8 +847,7 @@ int ath12k_dp_link_desc_setup(struct ath12k_base *ab,
while (n_entries &&
(desc = ath12k_hal_srng_src_get_next_entry(ab, srng))) {
cookie = DP_LINK_DESC_COOKIE_SET(n_entries, i);
- ath12k_hal_set_link_desc_addr(desc,
- cookie, paddr);
+ ath12k_hal_set_link_desc_addr(desc, cookie, paddr, rbm);
n_entries--;
paddr += HAL_LINK_DESC_SIZE;
}
@@ -881,11 +877,9 @@ int ath12k_dp_service_srng(struct ath12k_base *ab,
enum dp_monitor_mode monitor_mode;
u8 ring_mask;
- while (i < ab->hw_params->max_tx_ring) {
- if (ab->hw_params->ring_mask->tx[grp_id] &
- BIT(ab->hw_params->hal_ops->tcl_to_wbm_rbm_map[i].wbm_ring_num))
- ath12k_dp_tx_completion_handler(ab, i);
- i++;
+ if (ab->hw_params->ring_mask->tx[grp_id]) {
+ i = fls(ab->hw_params->ring_mask->tx[grp_id]) - 1;
+ ath12k_dp_tx_completion_handler(ab, i);
}
if (ab->hw_params->ring_mask->rx_err[grp_id]) {
@@ -921,8 +915,8 @@ int ath12k_dp_service_srng(struct ath12k_base *ab,
monitor_mode = ATH12K_DP_RX_MONITOR_MODE;
ring_mask = ab->hw_params->ring_mask->rx_mon_dest[grp_id];
for (i = 0; i < ab->num_radios; i++) {
- for (j = 0; j < ab->hw_params->num_rxmda_per_pdev; j++) {
- int id = i * ab->hw_params->num_rxmda_per_pdev + j;
+ for (j = 0; j < ab->hw_params->num_rxdma_per_pdev; j++) {
+ int id = i * ab->hw_params->num_rxdma_per_pdev + j;
if (ring_mask & BIT(id)) {
work_done =
@@ -942,8 +936,8 @@ int ath12k_dp_service_srng(struct ath12k_base *ab,
monitor_mode = ATH12K_DP_TX_MONITOR_MODE;
ring_mask = ab->hw_params->ring_mask->tx_mon_dest[grp_id];
for (i = 0; i < ab->num_radios; i++) {
- for (j = 0; j < ab->hw_params->num_rxmda_per_pdev; j++) {
- int id = i * ab->hw_params->num_rxmda_per_pdev + j;
+ for (j = 0; j < ab->hw_params->num_rxdma_per_pdev; j++) {
+ int id = i * ab->hw_params->num_rxdma_per_pdev + j;
if (ring_mask & BIT(id)) {
work_done =
@@ -1031,7 +1025,7 @@ static void ath12k_dp_service_mon_ring(struct timer_list *t)
struct ath12k_base *ab = from_timer(ab, t, mon_reap_timer);
int i;
- for (i = 0; i < ab->hw_params->num_rxmda_per_pdev; i++)
+ for (i = 0; i < ab->hw_params->num_rxdma_per_pdev; i++)
ath12k_dp_mon_process_ring(ab, i, NULL, DP_MON_SERVICE_BUDGET,
ATH12K_DP_RX_MONITOR_MODE);
@@ -1355,13 +1349,14 @@ static inline void *ath12k_dp_cc_get_desc_addr_ptr(struct ath12k_base *ab,
struct ath12k_rx_desc_info *ath12k_dp_get_rx_desc(struct ath12k_base *ab,
u32 cookie)
{
+ struct ath12k_dp *dp = &ab->dp;
struct ath12k_rx_desc_info **desc_addr_ptr;
u16 start_ppt_idx, end_ppt_idx, ppt_idx, spt_idx;
ppt_idx = u32_get_bits(cookie, ATH12K_DP_CC_COOKIE_PPT);
spt_idx = u32_get_bits(cookie, ATH12K_DP_CC_COOKIE_SPT);
- start_ppt_idx = ATH12K_RX_SPT_PAGE_OFFSET;
+ start_ppt_idx = dp->rx_ppt_base + ATH12K_RX_SPT_PAGE_OFFSET;
end_ppt_idx = start_ppt_idx + ATH12K_NUM_RX_SPT_PAGES;
if (ppt_idx < start_ppt_idx ||
@@ -1369,6 +1364,7 @@ struct ath12k_rx_desc_info *ath12k_dp_get_rx_desc(struct ath12k_base *ab,
spt_idx > ATH12K_MAX_SPT_ENTRIES)
return NULL;
+ ppt_idx = ppt_idx - dp->rx_ppt_base;
desc_addr_ptr = ath12k_dp_cc_get_desc_addr_ptr(ab, ppt_idx, spt_idx);
return *desc_addr_ptr;
@@ -1403,7 +1399,7 @@ static int ath12k_dp_cc_desc_init(struct ath12k_base *ab)
struct ath12k_rx_desc_info *rx_descs, **rx_desc_addr;
struct ath12k_tx_desc_info *tx_descs, **tx_desc_addr;
u32 i, j, pool_id, tx_spt_page;
- u32 ppt_idx;
+ u32 ppt_idx, cookie_ppt_idx;
spin_lock_bh(&dp->rx_desc_lock);
@@ -1418,10 +1414,11 @@ static int ath12k_dp_cc_desc_init(struct ath12k_base *ab)
}
ppt_idx = ATH12K_RX_SPT_PAGE_OFFSET + i;
+ cookie_ppt_idx = dp->rx_ppt_base + ppt_idx;
dp->spt_info->rxbaddr[i] = &rx_descs[0];
for (j = 0; j < ATH12K_MAX_SPT_ENTRIES; j++) {
- rx_descs[j].cookie = ath12k_dp_cc_cookie_gen(ppt_idx, j);
+ rx_descs[j].cookie = ath12k_dp_cc_cookie_gen(cookie_ppt_idx, j);
rx_descs[j].magic = ATH12K_DP_RX_DESC_MAGIC;
list_add_tail(&rx_descs[j].list, &dp->rx_desc_free_list);
@@ -1482,6 +1479,7 @@ static int ath12k_dp_cmem_init(struct ath12k_base *ab,
end = start + ATH12K_NUM_TX_SPT_PAGES;
break;
case ATH12K_DP_RX_DESC:
+ cmem_base += ATH12K_PPT_ADDR_OFFSET(dp->rx_ppt_base);
start = ATH12K_RX_SPT_PAGE_OFFSET;
end = start + ATH12K_NUM_RX_SPT_PAGES;
break;
@@ -1524,6 +1522,8 @@ static int ath12k_dp_cc_init(struct ath12k_base *ab)
return -ENOMEM;
}
+ dp->rx_ppt_base = ab->device_id * ATH12K_NUM_RX_SPT_PAGES;
+
for (i = 0; i < dp->num_spt_pages; i++) {
dp->spt_info[i].vaddr = dma_alloc_coherent(ab->dev,
ATH12K_PAGE_SIZE,
@@ -1587,6 +1587,24 @@ static int ath12k_dp_reoq_lut_setup(struct ath12k_base *ab)
return 0;
}
+static enum hal_rx_buf_return_buf_manager
+ath12k_dp_get_idle_link_rbm(struct ath12k_base *ab)
+{
+ switch (ab->device_id) {
+ case 0:
+ return HAL_RX_BUF_RBM_WBM_DEV0_IDLE_DESC_LIST;
+ case 1:
+ return HAL_RX_BUF_RBM_WBM_DEV1_IDLE_DESC_LIST;
+ case 2:
+ return HAL_RX_BUF_RBM_WBM_DEV2_IDLE_DESC_LIST;
+ default:
+ ath12k_warn(ab, "invalid %d device id, so choose default rbm\n",
+ ab->device_id);
+ WARN_ON(1);
+ return HAL_RX_BUF_RBM_WBM_DEV0_IDLE_DESC_LIST;
+ }
+}
+
int ath12k_dp_alloc(struct ath12k_base *ab)
{
struct ath12k_dp *dp = &ab->dp;
@@ -1603,6 +1621,7 @@ int ath12k_dp_alloc(struct ath12k_base *ab)
spin_lock_init(&dp->reo_cmd_lock);
dp->reo_cmd_cache_flush_count = 0;
+ dp->idle_link_rbm = ath12k_dp_get_idle_link_rbm(ab);
ret = ath12k_wbm_idle_ring_setup(ab, &n_link_desc);
if (ret) {