summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorZhengchao Shao <shaozhengchao@huawei.com>2022-07-01 09:55:11 +0800
committerHerbert Xu <herbert@gondor.apana.org.au>2022-07-08 15:21:16 +0800
commit02884a4f12de11f54d4ca67a07dd1f111d96fdbd (patch)
treeeed31f5a4f14b9cec97b1eb98d325a606ad8be79
parent32c0f7d4194c4352f902f07d2ab990994800596e (diff)
crypto: hisilicon/sec - don't sleep when in softirq
When kunpeng920 encryption driver is used to deencrypt and decrypt packets during the softirq, it is not allowed to use mutex lock. The kernel will report the following error: BUG: scheduling while atomic: swapper/57/0/0x00000300 Call trace: dump_backtrace+0x0/0x1e4 show_stack+0x20/0x2c dump_stack+0xd8/0x140 __schedule_bug+0x68/0x80 __schedule+0x728/0x840 schedule+0x50/0xe0 schedule_preempt_disabled+0x18/0x24 __mutex_lock.constprop.0+0x594/0x5dc __mutex_lock_slowpath+0x1c/0x30 mutex_lock+0x50/0x60 sec_request_init+0x8c/0x1a0 [hisi_sec2] sec_process+0x28/0x1ac [hisi_sec2] sec_skcipher_crypto+0xf4/0x1d4 [hisi_sec2] sec_skcipher_encrypt+0x1c/0x30 [hisi_sec2] crypto_skcipher_encrypt+0x2c/0x40 crypto_authenc_encrypt+0xc8/0xfc [authenc] crypto_aead_encrypt+0x2c/0x40 echainiv_encrypt+0x144/0x1a0 [echainiv] crypto_aead_encrypt+0x2c/0x40 esp_output_tail+0x348/0x5c0 [esp4] esp_output+0x120/0x19c [esp4] xfrm_output_one+0x25c/0x4d4 xfrm_output_resume+0x6c/0x1fc xfrm_output+0xac/0x3c0 xfrm4_output+0x64/0x130 ip_build_and_send_pkt+0x158/0x20c tcp_v4_send_synack+0xdc/0x1f0 tcp_conn_request+0x7d0/0x994 tcp_v4_conn_request+0x58/0x6c tcp_v6_conn_request+0xf0/0x100 tcp_rcv_state_process+0x1cc/0xd60 tcp_v4_do_rcv+0x10c/0x250 tcp_v4_rcv+0xfc4/0x10a4 ip_protocol_deliver_rcu+0xf4/0x200 ip_local_deliver_finish+0x58/0x70 ip_local_deliver+0x68/0x120 ip_sublist_rcv_finish+0x70/0x94 ip_list_rcv_finish.constprop.0+0x17c/0x1d0 ip_sublist_rcv+0x40/0xb0 ip_list_rcv+0x140/0x1dc __netif_receive_skb_list_core+0x154/0x28c __netif_receive_skb_list+0x120/0x1a0 netif_receive_skb_list_internal+0xe4/0x1f0 napi_complete_done+0x70/0x1f0 gro_cell_poll+0x9c/0xb0 napi_poll+0xcc/0x264 net_rx_action+0xd4/0x21c __do_softirq+0x130/0x358 irq_exit+0x11c/0x13c __handle_domain_irq+0x88/0xf0 gic_handle_irq+0x78/0x2c0 el1_irq+0xb8/0x140 arch_cpu_idle+0x18/0x40 default_idle_call+0x5c/0x1c0 cpuidle_idle_call+0x174/0x1b0 do_idle+0xc8/0x160 cpu_startup_entry+0x30/0x11c secondary_start_kernel+0x158/0x1e4 softirq: huh, entered softirq 3 NET_RX 0000000093774ee4 with preempt_count 00000100, exited with fffffe00? Fixes: 416d82204df4 ("crypto: hisilicon - add HiSilicon SEC V2 driver") Signed-off-by: Zhengchao Shao <shaozhengchao@huawei.com> Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
-rw-r--r--drivers/crypto/hisilicon/sec2/sec.h2
-rw-r--r--drivers/crypto/hisilicon/sec2/sec_crypto.c20
2 files changed, 11 insertions, 11 deletions
diff --git a/drivers/crypto/hisilicon/sec2/sec.h b/drivers/crypto/hisilicon/sec2/sec.h
index 42bb486f3b6d..d2a0bc93e752 100644
--- a/drivers/crypto/hisilicon/sec2/sec.h
+++ b/drivers/crypto/hisilicon/sec2/sec.h
@@ -119,7 +119,7 @@ struct sec_qp_ctx {
struct idr req_idr;
struct sec_alg_res res[QM_Q_DEPTH];
struct sec_ctx *ctx;
- struct mutex req_lock;
+ spinlock_t req_lock;
struct list_head backlog;
struct hisi_acc_sgl_pool *c_in_pool;
struct hisi_acc_sgl_pool *c_out_pool;
diff --git a/drivers/crypto/hisilicon/sec2/sec_crypto.c b/drivers/crypto/hisilicon/sec2/sec_crypto.c
index 6eebe739893c..71dfa7db6394 100644
--- a/drivers/crypto/hisilicon/sec2/sec_crypto.c
+++ b/drivers/crypto/hisilicon/sec2/sec_crypto.c
@@ -127,11 +127,11 @@ static int sec_alloc_req_id(struct sec_req *req, struct sec_qp_ctx *qp_ctx)
{
int req_id;
- mutex_lock(&qp_ctx->req_lock);
+ spin_lock_bh(&qp_ctx->req_lock);
req_id = idr_alloc_cyclic(&qp_ctx->req_idr, NULL,
0, QM_Q_DEPTH, GFP_ATOMIC);
- mutex_unlock(&qp_ctx->req_lock);
+ spin_unlock_bh(&qp_ctx->req_lock);
if (unlikely(req_id < 0)) {
dev_err(req->ctx->dev, "alloc req id fail!\n");
return req_id;
@@ -156,9 +156,9 @@ static void sec_free_req_id(struct sec_req *req)
qp_ctx->req_list[req_id] = NULL;
req->qp_ctx = NULL;
- mutex_lock(&qp_ctx->req_lock);
+ spin_lock_bh(&qp_ctx->req_lock);
idr_remove(&qp_ctx->req_idr, req_id);
- mutex_unlock(&qp_ctx->req_lock);
+ spin_unlock_bh(&qp_ctx->req_lock);
}
static u8 pre_parse_finished_bd(struct bd_status *status, void *resp)
@@ -273,7 +273,7 @@ static int sec_bd_send(struct sec_ctx *ctx, struct sec_req *req)
!(req->flag & CRYPTO_TFM_REQ_MAY_BACKLOG))
return -EBUSY;
- mutex_lock(&qp_ctx->req_lock);
+ spin_lock_bh(&qp_ctx->req_lock);
ret = hisi_qp_send(qp_ctx->qp, &req->sec_sqe);
if (ctx->fake_req_limit <=
@@ -281,10 +281,10 @@ static int sec_bd_send(struct sec_ctx *ctx, struct sec_req *req)
list_add_tail(&req->backlog_head, &qp_ctx->backlog);
atomic64_inc(&ctx->sec->debug.dfx.send_cnt);
atomic64_inc(&ctx->sec->debug.dfx.send_busy_cnt);
- mutex_unlock(&qp_ctx->req_lock);
+ spin_unlock_bh(&qp_ctx->req_lock);
return -EBUSY;
}
- mutex_unlock(&qp_ctx->req_lock);
+ spin_unlock_bh(&qp_ctx->req_lock);
if (unlikely(ret == -EBUSY))
return -ENOBUFS;
@@ -487,7 +487,7 @@ static int sec_create_qp_ctx(struct hisi_qm *qm, struct sec_ctx *ctx,
qp->req_cb = sec_req_cb;
- mutex_init(&qp_ctx->req_lock);
+ spin_lock_init(&qp_ctx->req_lock);
idr_init(&qp_ctx->req_idr);
INIT_LIST_HEAD(&qp_ctx->backlog);
@@ -1382,7 +1382,7 @@ static struct sec_req *sec_back_req_clear(struct sec_ctx *ctx,
{
struct sec_req *backlog_req = NULL;
- mutex_lock(&qp_ctx->req_lock);
+ spin_lock_bh(&qp_ctx->req_lock);
if (ctx->fake_req_limit >=
atomic_read(&qp_ctx->qp->qp_status.used) &&
!list_empty(&qp_ctx->backlog)) {
@@ -1390,7 +1390,7 @@ static struct sec_req *sec_back_req_clear(struct sec_ctx *ctx,
typeof(*backlog_req), backlog_head);
list_del(&backlog_req->backlog_head);
}
- mutex_unlock(&qp_ctx->req_lock);
+ spin_unlock_bh(&qp_ctx->req_lock);
return backlog_req;
}