From b363700ef50e2c0b3122b47ffba70281f45cb686 Mon Sep 17 00:00:00 2001 From: Quentin Lambert Date: Fri, 22 Jul 2016 15:32:42 +0200 Subject: crypto: ixp4xx - Fix a "simple if" coding style warning Signed-off-by: Quentin Lambert Signed-off-by: Herbert Xu --- drivers/crypto/ixp4xx_crypto.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/crypto/ixp4xx_crypto.c b/drivers/crypto/ixp4xx_crypto.c index 2296934455fc..a6ab2575df28 100644 --- a/drivers/crypto/ixp4xx_crypto.c +++ b/drivers/crypto/ixp4xx_crypto.c @@ -447,9 +447,8 @@ static int init_ixp_crypto(struct device *dev) if (!npe_running(npe_c)) { ret = npe_load_firmware(npe_c, npe_name(npe_c), dev); - if (ret) { + if (ret) return ret; - } if (npe_recv_message(npe_c, msg, "STATUS_MSG")) goto npe_error; } else { -- cgit v1.2.3-58-ga151 From c5736a4067ad4c59d1447762172def7f27fe869a Mon Sep 17 00:00:00 2001 From: Quentin Lambert Date: Fri, 22 Jul 2016 15:32:41 +0200 Subject: crypto: ixp4xx - Add missing npe_c release in error branches Most error branches following the call to npe_request contain a call to npe_request. This patch add a call to npe_release to error branches following the call to npe_request that do not have it. This issue was found with Hector. Signed-off-by: Quentin Lambert Signed-off-by: Herbert Xu --- drivers/crypto/ixp4xx_crypto.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/crypto/ixp4xx_crypto.c b/drivers/crypto/ixp4xx_crypto.c index a6ab2575df28..7868765a70c5 100644 --- a/drivers/crypto/ixp4xx_crypto.c +++ b/drivers/crypto/ixp4xx_crypto.c @@ -448,7 +448,7 @@ static int init_ixp_crypto(struct device *dev) if (!npe_running(npe_c)) { ret = npe_load_firmware(npe_c, npe_name(npe_c), dev); if (ret) - return ret; + goto npe_release; if (npe_recv_message(npe_c, msg, "STATUS_MSG")) goto npe_error; } else { @@ -472,7 +472,8 @@ static int init_ixp_crypto(struct device *dev) default: printk(KERN_ERR "Firmware of %s lacks crypto support\n", npe_name(npe_c)); - return -ENODEV; + ret = -ENODEV; + goto npe_release; } /* buffer_pool will also be used to sometimes store the hmac, * so assure it is large enough @@ -511,6 +512,7 @@ npe_error: err: dma_pool_destroy(ctx_pool); dma_pool_destroy(buffer_pool); +npe_release: npe_release(npe_c); return ret; } -- cgit v1.2.3-58-ga151 From fa242e80c7fb581eddbe636186020786f2e117da Mon Sep 17 00:00:00 2001 From: Gary R Hook Date: Tue, 26 Jul 2016 18:09:46 -0500 Subject: crypto: ccp - Fix non-conforming comment style Adhere to the cryptodev comment convention. Signed-off-by: Gary R Hook Signed-off-by: Herbert Xu --- drivers/crypto/ccp/ccp-dev.h | 24 ++++++++---------------- 1 file changed, 8 insertions(+), 16 deletions(-) (limited to 'drivers') diff --git a/drivers/crypto/ccp/ccp-dev.h b/drivers/crypto/ccp/ccp-dev.h index bd41ffceff82..5fbee638c9ba 100644 --- a/drivers/crypto/ccp/ccp-dev.h +++ b/drivers/crypto/ccp/ccp-dev.h @@ -253,16 +253,14 @@ struct ccp_device { struct device *dev; - /* - * Bus specific device information + /* Bus specific device information */ void *dev_specific; int (*get_irq)(struct ccp_device *ccp); void (*free_irq)(struct ccp_device *ccp); unsigned int irq; - /* - * I/O area used for device communication. The register mapping + /* I/O area used for device communication. The register mapping * starts at an offset into the mapped bar. * The CMD_REQx registers and the Delete_Cmd_Queue_Job register * need to be protected while a command queue thread is accessing @@ -272,8 +270,7 @@ struct ccp_device { void __iomem *io_map; void __iomem *io_regs; - /* - * Master lists that all cmds are queued on. Because there can be + /* Master lists that all cmds are queued on. Because there can be * more than one CCP command queue that can process a cmd a separate * backlog list is neeeded so that the backlog completion call * completes before the cmd is available for execution. @@ -283,34 +280,29 @@ struct ccp_device { struct list_head cmd; struct list_head backlog; - /* - * The command queues. These represent the queues available on the + /* The command queues. These represent the queues available on the * CCP that are available for processing cmds */ struct ccp_cmd_queue cmd_q[MAX_HW_QUEUES]; unsigned int cmd_q_count; - /* - * Support for the CCP True RNG + /* Support for the CCP True RNG */ struct hwrng hwrng; unsigned int hwrng_retries; - /* - * Support for the CCP DMA capabilities + /* Support for the CCP DMA capabilities */ struct dma_device dma_dev; struct ccp_dma_chan *ccp_dma_chan; struct kmem_cache *dma_cmd_cache; struct kmem_cache *dma_desc_cache; - /* - * A counter used to generate job-ids for cmds submitted to the CCP + /* A counter used to generate job-ids for cmds submitted to the CCP */ atomic_t current_id ____cacheline_aligned; - /* - * The CCP uses key storage blocks (KSB) to maintain context for certain + /* The CCP uses key storage blocks (KSB) to maintain context for certain * operations. To prevent multiple cmds from using the same KSB range * a command queue reserves a KSB range for the duration of the cmd. * Each queue, will however, reserve 2 KSB blocks for operations that -- cgit v1.2.3-58-ga151 From fba8855cb2403707b0639bdff0d34149699f14a2 Mon Sep 17 00:00:00 2001 From: Gary R Hook Date: Tue, 26 Jul 2016 19:09:20 -0500 Subject: crypto: ccp - Abstract PCI info for the CCP Device-specific values for the BAR and offset should be found in the version data structure. Signed-off-by: Gary R Hook Signed-off-by: Herbert Xu --- drivers/crypto/ccp/ccp-dev-v3.c | 3 +++ drivers/crypto/ccp/ccp-dev.h | 3 +++ drivers/crypto/ccp/ccp-pci.c | 15 +++++++-------- 3 files changed, 13 insertions(+), 8 deletions(-) (limited to 'drivers') diff --git a/drivers/crypto/ccp/ccp-dev-v3.c b/drivers/crypto/ccp/ccp-dev-v3.c index d7a710347967..2f7f3c51a6c8 100644 --- a/drivers/crypto/ccp/ccp-dev-v3.c +++ b/drivers/crypto/ccp/ccp-dev-v3.c @@ -4,6 +4,7 @@ * Copyright (C) 2013,2016 Advanced Micro Devices, Inc. * * Author: Tom Lendacky + * Author: Gary R Hook * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as @@ -541,4 +542,6 @@ static const struct ccp_actions ccp3_actions = { struct ccp_vdata ccpv3 = { .version = CCP_VERSION(3, 0), .perform = &ccp3_actions, + .bar = 2, + .offset = 0x20000, }; diff --git a/drivers/crypto/ccp/ccp-dev.h b/drivers/crypto/ccp/ccp-dev.h index 5fbee638c9ba..8824e41677c6 100644 --- a/drivers/crypto/ccp/ccp-dev.h +++ b/drivers/crypto/ccp/ccp-dev.h @@ -4,6 +4,7 @@ * Copyright (C) 2013,2016 Advanced Micro Devices, Inc. * * Author: Tom Lendacky + * Author: Gary R Hook * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as @@ -163,6 +164,8 @@ struct ccp_actions { struct ccp_vdata { unsigned int version; const struct ccp_actions *perform; + const unsigned int bar; + const unsigned int offset; }; extern struct ccp_vdata ccpv3; diff --git a/drivers/crypto/ccp/ccp-pci.c b/drivers/crypto/ccp/ccp-pci.c index 0bf262e36b6b..072bcedef386 100644 --- a/drivers/crypto/ccp/ccp-pci.c +++ b/drivers/crypto/ccp/ccp-pci.c @@ -4,6 +4,7 @@ * Copyright (C) 2013,2016 Advanced Micro Devices, Inc. * * Author: Tom Lendacky + * Author: Gary R Hook * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as @@ -25,9 +26,6 @@ #include "ccp-dev.h" -#define IO_BAR 2 -#define IO_OFFSET 0x20000 - #define MSIX_VECTORS 2 struct ccp_msix { @@ -156,10 +154,11 @@ static int ccp_find_mmio_area(struct ccp_device *ccp) resource_size_t io_len; unsigned long io_flags; - io_flags = pci_resource_flags(pdev, IO_BAR); - io_len = pci_resource_len(pdev, IO_BAR); - if ((io_flags & IORESOURCE_MEM) && (io_len >= (IO_OFFSET + 0x800))) - return IO_BAR; + io_flags = pci_resource_flags(pdev, ccp->vdata->bar); + io_len = pci_resource_len(pdev, ccp->vdata->bar); + if ((io_flags & IORESOURCE_MEM) && + (io_len >= (ccp->vdata->offset + 0x800))) + return ccp->vdata->bar; return -EIO; } @@ -216,7 +215,7 @@ static int ccp_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) dev_err(dev, "pci_iomap failed\n"); goto e_device; } - ccp->io_regs = ccp->io_map + IO_OFFSET; + ccp->io_regs = ccp->io_map + ccp->vdata->offset; ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(48)); if (ret) { -- cgit v1.2.3-58-ga151 From a43eb98507574acfc435c38a6b7fb1fab6605519 Mon Sep 17 00:00:00 2001 From: Gary R Hook Date: Tue, 26 Jul 2016 19:09:31 -0500 Subject: crypto: ccp - Shorten the fields of the action structure Use more concise field names; "perform_" is too verbose. Signed-off-by: Gary R Hook Signed-off-by: Herbert Xu --- drivers/crypto/ccp/ccp-dev-v3.c | 12 ++++++------ drivers/crypto/ccp/ccp-dev.h | 12 ++++++------ drivers/crypto/ccp/ccp-ops.c | 21 +++++++++++---------- 3 files changed, 23 insertions(+), 22 deletions(-) (limited to 'drivers') diff --git a/drivers/crypto/ccp/ccp-dev-v3.c b/drivers/crypto/ccp/ccp-dev-v3.c index 2f7f3c51a6c8..1a94d2ea4ff1 100644 --- a/drivers/crypto/ccp/ccp-dev-v3.c +++ b/drivers/crypto/ccp/ccp-dev-v3.c @@ -528,12 +528,12 @@ static irqreturn_t ccp_irq_handler(int irq, void *data) } static const struct ccp_actions ccp3_actions = { - .perform_aes = ccp_perform_aes, - .perform_xts_aes = ccp_perform_xts_aes, - .perform_sha = ccp_perform_sha, - .perform_rsa = ccp_perform_rsa, - .perform_passthru = ccp_perform_passthru, - .perform_ecc = ccp_perform_ecc, + .aes = ccp_perform_aes, + .xts_aes = ccp_perform_xts_aes, + .sha = ccp_perform_sha, + .rsa = ccp_perform_rsa, + .passthru = ccp_perform_passthru, + .ecc = ccp_perform_ecc, .init = ccp_init, .destroy = ccp_destroy, .irqhandler = ccp_irq_handler, diff --git a/drivers/crypto/ccp/ccp-dev.h b/drivers/crypto/ccp/ccp-dev.h index 8824e41677c6..46d3ef30c6e9 100644 --- a/drivers/crypto/ccp/ccp-dev.h +++ b/drivers/crypto/ccp/ccp-dev.h @@ -149,12 +149,12 @@ struct ccp_op; /* Structure for computation functions that are device-specific */ struct ccp_actions { - int (*perform_aes)(struct ccp_op *); - int (*perform_xts_aes)(struct ccp_op *); - int (*perform_sha)(struct ccp_op *); - int (*perform_rsa)(struct ccp_op *); - int (*perform_passthru)(struct ccp_op *); - int (*perform_ecc)(struct ccp_op *); + int (*aes)(struct ccp_op *); + int (*xts_aes)(struct ccp_op *); + int (*sha)(struct ccp_op *); + int (*rsa)(struct ccp_op *); + int (*passthru)(struct ccp_op *); + int (*ecc)(struct ccp_op *); int (*init)(struct ccp_device *); void (*destroy)(struct ccp_device *); irqreturn_t (*irqhandler)(int, void *); diff --git a/drivers/crypto/ccp/ccp-ops.c b/drivers/crypto/ccp/ccp-ops.c index ffa2891035ac..d1024771e926 100644 --- a/drivers/crypto/ccp/ccp-ops.c +++ b/drivers/crypto/ccp/ccp-ops.c @@ -4,6 +4,7 @@ * Copyright (C) 2013,2016 Advanced Micro Devices, Inc. * * Author: Tom Lendacky + * Author: Gary R Hook * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as @@ -478,7 +479,7 @@ static int ccp_copy_to_from_ksb(struct ccp_cmd_queue *cmd_q, op.u.passthru.byte_swap = byte_swap; - return cmd_q->ccp->vdata->perform->perform_passthru(&op); + return cmd_q->ccp->vdata->perform->passthru(&op); } static int ccp_copy_to_ksb(struct ccp_cmd_queue *cmd_q, @@ -610,7 +611,7 @@ static int ccp_run_aes_cmac_cmd(struct ccp_cmd_queue *cmd_q, } } - ret = cmd_q->ccp->vdata->perform->perform_aes(&op); + ret = cmd_q->ccp->vdata->perform->aes(&op); if (ret) { cmd->engine_error = cmd_q->cmd_error; goto e_src; @@ -772,7 +773,7 @@ static int ccp_run_aes_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) op.soc = 1; } - ret = cmd_q->ccp->vdata->perform->perform_aes(&op); + ret = cmd_q->ccp->vdata->perform->aes(&op); if (ret) { cmd->engine_error = cmd_q->cmd_error; goto e_dst; @@ -937,7 +938,7 @@ static int ccp_run_xts_aes_cmd(struct ccp_cmd_queue *cmd_q, if (!src.sg_wa.bytes_left) op.eom = 1; - ret = cmd_q->ccp->vdata->perform->perform_xts_aes(&op); + ret = cmd_q->ccp->vdata->perform->xts_aes(&op); if (ret) { cmd->engine_error = cmd_q->cmd_error; goto e_dst; @@ -1094,7 +1095,7 @@ static int ccp_run_sha_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) if (sha->final && !src.sg_wa.bytes_left) op.eom = 1; - ret = cmd_q->ccp->vdata->perform->perform_sha(&op); + ret = cmd_q->ccp->vdata->perform->sha(&op); if (ret) { cmd->engine_error = cmd_q->cmd_error; goto e_data; @@ -1274,7 +1275,7 @@ static int ccp_run_rsa_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) op.u.rsa.mod_size = rsa->key_size; op.u.rsa.input_len = i_len; - ret = cmd_q->ccp->vdata->perform->perform_rsa(&op); + ret = cmd_q->ccp->vdata->perform->rsa(&op); if (ret) { cmd->engine_error = cmd_q->cmd_error; goto e_dst; @@ -1399,7 +1400,7 @@ static int ccp_run_passthru_cmd(struct ccp_cmd_queue *cmd_q, op.dst.u.dma.offset = dst.sg_wa.sg_used; op.dst.u.dma.length = op.src.u.dma.length; - ret = cmd_q->ccp->vdata->perform->perform_passthru(&op); + ret = cmd_q->ccp->vdata->perform->passthru(&op); if (ret) { cmd->engine_error = cmd_q->cmd_error; goto e_dst; @@ -1484,7 +1485,7 @@ static int ccp_run_passthru_nomap_cmd(struct ccp_cmd_queue *cmd_q, op.dst.u.dma.offset = 0; op.dst.u.dma.length = pt->src_len; - ret = cmd_q->ccp->vdata->perform->perform_passthru(&op); + ret = cmd_q->ccp->vdata->perform->passthru(&op); if (ret) cmd->engine_error = cmd_q->cmd_error; @@ -1575,7 +1576,7 @@ static int ccp_run_ecc_mm_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) op.u.ecc.function = cmd->u.ecc.function; - ret = cmd_q->ccp->vdata->perform->perform_ecc(&op); + ret = cmd_q->ccp->vdata->perform->ecc(&op); if (ret) { cmd->engine_error = cmd_q->cmd_error; goto e_dst; @@ -1739,7 +1740,7 @@ static int ccp_run_ecc_pm_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) op.u.ecc.function = cmd->u.ecc.function; - ret = cmd_q->ccp->vdata->perform->perform_ecc(&op); + ret = cmd_q->ccp->vdata->perform->ecc(&op); if (ret) { cmd->engine_error = cmd_q->cmd_error; goto e_dst; -- cgit v1.2.3-58-ga151 From 956ee21a6df08afd9c1c64e0f394a9a1b65e897d Mon Sep 17 00:00:00 2001 From: Gary R Hook Date: Tue, 26 Jul 2016 19:09:40 -0500 Subject: crypto: ccp - Refactoring: symbol cleanup Form and use of the local storage block in the CCP is particular to the device version. Much of the code that accesses the storage block can treat it as a virtual resource, and will under go some renaming. Device-specific access to the memory will be moved into device file. Service functions will be added to the actions structure. Signed-off-by: Gary R Hook Signed-off-by: Herbert Xu --- drivers/crypto/ccp/ccp-dev-v3.c | 32 ++--- drivers/crypto/ccp/ccp-dev.c | 7 +- drivers/crypto/ccp/ccp-dev.h | 43 +++---- drivers/crypto/ccp/ccp-ops.c | 266 ++++++++++++++++++++-------------------- 4 files changed, 175 insertions(+), 173 deletions(-) (limited to 'drivers') diff --git a/drivers/crypto/ccp/ccp-dev-v3.c b/drivers/crypto/ccp/ccp-dev-v3.c index 1a94d2ea4ff1..19eafb85708f 100644 --- a/drivers/crypto/ccp/ccp-dev-v3.c +++ b/drivers/crypto/ccp/ccp-dev-v3.c @@ -100,10 +100,10 @@ static int ccp_perform_aes(struct ccp_op *op) | (op->u.aes.type << REQ1_AES_TYPE_SHIFT) | (op->u.aes.mode << REQ1_AES_MODE_SHIFT) | (op->u.aes.action << REQ1_AES_ACTION_SHIFT) - | (op->ksb_key << REQ1_KEY_KSB_SHIFT); + | (op->sb_key << REQ1_KEY_KSB_SHIFT); cr[1] = op->src.u.dma.length - 1; cr[2] = ccp_addr_lo(&op->src.u.dma); - cr[3] = (op->ksb_ctx << REQ4_KSB_SHIFT) + cr[3] = (op->sb_ctx << REQ4_KSB_SHIFT) | (CCP_MEMTYPE_SYSTEM << REQ4_MEMTYPE_SHIFT) | ccp_addr_hi(&op->src.u.dma); cr[4] = ccp_addr_lo(&op->dst.u.dma); @@ -130,10 +130,10 @@ static int ccp_perform_xts_aes(struct ccp_op *op) cr[0] = (CCP_ENGINE_XTS_AES_128 << REQ1_ENGINE_SHIFT) | (op->u.xts.action << REQ1_AES_ACTION_SHIFT) | (op->u.xts.unit_size << REQ1_XTS_AES_SIZE_SHIFT) - | (op->ksb_key << REQ1_KEY_KSB_SHIFT); + | (op->sb_key << REQ1_KEY_KSB_SHIFT); cr[1] = op->src.u.dma.length - 1; cr[2] = ccp_addr_lo(&op->src.u.dma); - cr[3] = (op->ksb_ctx << REQ4_KSB_SHIFT) + cr[3] = (op->sb_ctx << REQ4_KSB_SHIFT) | (CCP_MEMTYPE_SYSTEM << REQ4_MEMTYPE_SHIFT) | ccp_addr_hi(&op->src.u.dma); cr[4] = ccp_addr_lo(&op->dst.u.dma); @@ -159,7 +159,7 @@ static int ccp_perform_sha(struct ccp_op *op) | REQ1_INIT; cr[1] = op->src.u.dma.length - 1; cr[2] = ccp_addr_lo(&op->src.u.dma); - cr[3] = (op->ksb_ctx << REQ4_KSB_SHIFT) + cr[3] = (op->sb_ctx << REQ4_KSB_SHIFT) | (CCP_MEMTYPE_SYSTEM << REQ4_MEMTYPE_SHIFT) | ccp_addr_hi(&op->src.u.dma); @@ -182,11 +182,11 @@ static int ccp_perform_rsa(struct ccp_op *op) /* Fill out the register contents for REQ1 through REQ6 */ cr[0] = (CCP_ENGINE_RSA << REQ1_ENGINE_SHIFT) | (op->u.rsa.mod_size << REQ1_RSA_MOD_SIZE_SHIFT) - | (op->ksb_key << REQ1_KEY_KSB_SHIFT) + | (op->sb_key << REQ1_KEY_KSB_SHIFT) | REQ1_EOM; cr[1] = op->u.rsa.input_len - 1; cr[2] = ccp_addr_lo(&op->src.u.dma); - cr[3] = (op->ksb_ctx << REQ4_KSB_SHIFT) + cr[3] = (op->sb_ctx << REQ4_KSB_SHIFT) | (CCP_MEMTYPE_SYSTEM << REQ4_MEMTYPE_SHIFT) | ccp_addr_hi(&op->src.u.dma); cr[4] = ccp_addr_lo(&op->dst.u.dma); @@ -216,10 +216,10 @@ static int ccp_perform_passthru(struct ccp_op *op) | ccp_addr_hi(&op->src.u.dma); if (op->u.passthru.bit_mod != CCP_PASSTHRU_BITWISE_NOOP) - cr[3] |= (op->ksb_key << REQ4_KSB_SHIFT); + cr[3] |= (op->sb_key << REQ4_KSB_SHIFT); } else { - cr[2] = op->src.u.ksb * CCP_KSB_BYTES; - cr[3] = (CCP_MEMTYPE_KSB << REQ4_MEMTYPE_SHIFT); + cr[2] = op->src.u.sb * CCP_SB_BYTES; + cr[3] = (CCP_MEMTYPE_SB << REQ4_MEMTYPE_SHIFT); } if (op->dst.type == CCP_MEMTYPE_SYSTEM) { @@ -227,8 +227,8 @@ static int ccp_perform_passthru(struct ccp_op *op) cr[5] = (CCP_MEMTYPE_SYSTEM << REQ6_MEMTYPE_SHIFT) | ccp_addr_hi(&op->dst.u.dma); } else { - cr[4] = op->dst.u.ksb * CCP_KSB_BYTES; - cr[5] = (CCP_MEMTYPE_KSB << REQ6_MEMTYPE_SHIFT); + cr[4] = op->dst.u.sb * CCP_SB_BYTES; + cr[5] = (CCP_MEMTYPE_SB << REQ6_MEMTYPE_SHIFT); } if (op->eom) @@ -322,9 +322,9 @@ static int ccp_init(struct ccp_device *ccp) cmd_q->dma_pool = dma_pool; /* Reserve 2 KSB regions for the queue */ - cmd_q->ksb_key = KSB_START + ccp->ksb_start++; - cmd_q->ksb_ctx = KSB_START + ccp->ksb_start++; - ccp->ksb_count -= 2; + cmd_q->sb_key = KSB_START + ccp->sb_start++; + cmd_q->sb_ctx = KSB_START + ccp->sb_start++; + ccp->sb_count -= 2; /* Preset some register values and masks that are queue * number dependent @@ -376,7 +376,7 @@ static int ccp_init(struct ccp_device *ccp) } /* Initialize the queues used to wait for KSB space and suspend */ - init_waitqueue_head(&ccp->ksb_queue); + init_waitqueue_head(&ccp->sb_queue); init_waitqueue_head(&ccp->suspend_queue); /* Create a kthread for each queue */ diff --git a/drivers/crypto/ccp/ccp-dev.c b/drivers/crypto/ccp/ccp-dev.c index 87b9f2bfa623..9c8cfbb6841f 100644 --- a/drivers/crypto/ccp/ccp-dev.c +++ b/drivers/crypto/ccp/ccp-dev.c @@ -4,6 +4,7 @@ * Copyright (C) 2013,2016 Advanced Micro Devices, Inc. * * Author: Tom Lendacky + * Author: Gary R Hook * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as @@ -397,9 +398,9 @@ struct ccp_device *ccp_alloc_struct(struct device *dev) spin_lock_init(&ccp->cmd_lock); mutex_init(&ccp->req_mutex); - mutex_init(&ccp->ksb_mutex); - ccp->ksb_count = KSB_COUNT; - ccp->ksb_start = 0; + mutex_init(&ccp->sb_mutex); + ccp->sb_count = KSB_COUNT; + ccp->sb_start = 0; ccp->ord = ccp_increment_unit_ordinal(); snprintf(ccp->name, MAX_CCP_NAME_LEN, "ccp-%u", ccp->ord); diff --git a/drivers/crypto/ccp/ccp-dev.h b/drivers/crypto/ccp/ccp-dev.h index 46d3ef30c6e9..1e30568d7c04 100644 --- a/drivers/crypto/ccp/ccp-dev.h +++ b/drivers/crypto/ccp/ccp-dev.h @@ -111,8 +111,7 @@ #define KSB_START 77 #define KSB_END 127 #define KSB_COUNT (KSB_END - KSB_START + 1) -#define CCP_KSB_BITS 256 -#define CCP_KSB_BYTES 32 +#define CCP_SB_BITS 256 #define CCP_JOBID_MASK 0x0000003f @@ -121,19 +120,19 @@ #define CCP_REVERSE_BUF_SIZE 64 -#define CCP_AES_KEY_KSB_COUNT 1 -#define CCP_AES_CTX_KSB_COUNT 1 +#define CCP_AES_KEY_SB_COUNT 1 +#define CCP_AES_CTX_SB_COUNT 1 -#define CCP_XTS_AES_KEY_KSB_COUNT 1 -#define CCP_XTS_AES_CTX_KSB_COUNT 1 +#define CCP_XTS_AES_KEY_SB_COUNT 1 +#define CCP_XTS_AES_CTX_SB_COUNT 1 -#define CCP_SHA_KSB_COUNT 1 +#define CCP_SHA_SB_COUNT 1 #define CCP_RSA_MAX_WIDTH 4096 #define CCP_PASSTHRU_BLOCKSIZE 256 #define CCP_PASSTHRU_MASKSIZE 32 -#define CCP_PASSTHRU_KSB_COUNT 1 +#define CCP_PASSTHRU_SB_COUNT 1 #define CCP_ECC_MODULUS_BYTES 48 /* 384-bits */ #define CCP_ECC_MAX_OPERANDS 6 @@ -145,6 +144,8 @@ #define CCP_ECC_RESULT_OFFSET 60 #define CCP_ECC_RESULT_SUCCESS 0x0001 +#define CCP_SB_BYTES 32 + struct ccp_op; /* Structure for computation functions that are device-specific */ @@ -215,9 +216,9 @@ struct ccp_cmd_queue { /* Queue dma pool */ struct dma_pool *dma_pool; - /* Queue reserved KSB regions */ - u32 ksb_key; - u32 ksb_ctx; + /* Per-queue reserved storage block(s) */ + u32 sb_key; + u32 sb_ctx; /* Queue processing thread */ struct task_struct *kthread; @@ -313,12 +314,12 @@ struct ccp_device { * to avoid allocation contention. This will reserve at most 10 KSB * entries, leaving 40 KSB entries available for dynamic allocation. */ - struct mutex ksb_mutex ____cacheline_aligned; - DECLARE_BITMAP(ksb, KSB_COUNT); - wait_queue_head_t ksb_queue; - unsigned int ksb_avail; - unsigned int ksb_count; - u32 ksb_start; + struct mutex sb_mutex ____cacheline_aligned; + DECLARE_BITMAP(sb, KSB_COUNT); + wait_queue_head_t sb_queue; + unsigned int sb_avail; + unsigned int sb_count; + u32 sb_start; /* Suspend support */ unsigned int suspending; @@ -330,7 +331,7 @@ struct ccp_device { enum ccp_memtype { CCP_MEMTYPE_SYSTEM = 0, - CCP_MEMTYPE_KSB, + CCP_MEMTYPE_SB, CCP_MEMTYPE_LOCAL, CCP_MEMTYPE__LAST, }; @@ -374,7 +375,7 @@ struct ccp_mem { enum ccp_memtype type; union { struct ccp_dma_info dma; - u32 ksb; + u32 sb; } u; }; @@ -414,8 +415,8 @@ struct ccp_op { u32 jobid; u32 ioc; u32 soc; - u32 ksb_key; - u32 ksb_ctx; + u32 sb_key; + u32 sb_ctx; u32 init; u32 eom; diff --git a/drivers/crypto/ccp/ccp-ops.c b/drivers/crypto/ccp/ccp-ops.c index d1024771e926..2c2890a4c2e2 100644 --- a/drivers/crypto/ccp/ccp-ops.c +++ b/drivers/crypto/ccp/ccp-ops.c @@ -46,25 +46,25 @@ static u32 ccp_alloc_ksb(struct ccp_device *ccp, unsigned int count) int start; for (;;) { - mutex_lock(&ccp->ksb_mutex); + mutex_lock(&ccp->sb_mutex); - start = (u32)bitmap_find_next_zero_area(ccp->ksb, - ccp->ksb_count, - ccp->ksb_start, + start = (u32)bitmap_find_next_zero_area(ccp->sb, + ccp->sb_count, + ccp->sb_start, count, 0); - if (start <= ccp->ksb_count) { - bitmap_set(ccp->ksb, start, count); + if (start <= ccp->sb_count) { + bitmap_set(ccp->sb, start, count); - mutex_unlock(&ccp->ksb_mutex); + mutex_unlock(&ccp->sb_mutex); break; } - ccp->ksb_avail = 0; + ccp->sb_avail = 0; - mutex_unlock(&ccp->ksb_mutex); + mutex_unlock(&ccp->sb_mutex); /* Wait for KSB entries to become available */ - if (wait_event_interruptible(ccp->ksb_queue, ccp->ksb_avail)) + if (wait_event_interruptible(ccp->sb_queue, ccp->sb_avail)) return 0; } @@ -77,15 +77,15 @@ static void ccp_free_ksb(struct ccp_device *ccp, unsigned int start, if (!start) return; - mutex_lock(&ccp->ksb_mutex); + mutex_lock(&ccp->sb_mutex); - bitmap_clear(ccp->ksb, start - KSB_START, count); + bitmap_clear(ccp->sb, start - KSB_START, count); - ccp->ksb_avail = 1; + ccp->sb_avail = 1; - mutex_unlock(&ccp->ksb_mutex); + mutex_unlock(&ccp->sb_mutex); - wake_up_interruptible_all(&ccp->ksb_queue); + wake_up_interruptible_all(&ccp->sb_queue); } static u32 ccp_gen_jobid(struct ccp_device *ccp) @@ -232,7 +232,7 @@ static int ccp_reverse_set_dm_area(struct ccp_dm_workarea *wa, unsigned int len, unsigned int se_len, bool sign_extend) { - unsigned int nbytes, sg_offset, dm_offset, ksb_len, i; + unsigned int nbytes, sg_offset, dm_offset, sb_len, i; u8 buffer[CCP_REVERSE_BUF_SIZE]; if (WARN_ON(se_len > sizeof(buffer))) @@ -242,21 +242,21 @@ static int ccp_reverse_set_dm_area(struct ccp_dm_workarea *wa, dm_offset = 0; nbytes = len; while (nbytes) { - ksb_len = min_t(unsigned int, nbytes, se_len); - sg_offset -= ksb_len; + sb_len = min_t(unsigned int, nbytes, se_len); + sg_offset -= sb_len; - scatterwalk_map_and_copy(buffer, sg, sg_offset, ksb_len, 0); - for (i = 0; i < ksb_len; i++) - wa->address[dm_offset + i] = buffer[ksb_len - i - 1]; + scatterwalk_map_and_copy(buffer, sg, sg_offset, sb_len, 0); + for (i = 0; i < sb_len; i++) + wa->address[dm_offset + i] = buffer[sb_len - i - 1]; - dm_offset += ksb_len; - nbytes -= ksb_len; + dm_offset += sb_len; + nbytes -= sb_len; - if ((ksb_len != se_len) && sign_extend) { + if ((sb_len != se_len) && sign_extend) { /* Must sign-extend to nearest sign-extend length */ if (wa->address[dm_offset - 1] & 0x80) memset(wa->address + dm_offset, 0xff, - se_len - ksb_len); + se_len - sb_len); } } @@ -267,22 +267,22 @@ static void ccp_reverse_get_dm_area(struct ccp_dm_workarea *wa, struct scatterlist *sg, unsigned int len) { - unsigned int nbytes, sg_offset, dm_offset, ksb_len, i; + unsigned int nbytes, sg_offset, dm_offset, sb_len, i; u8 buffer[CCP_REVERSE_BUF_SIZE]; sg_offset = 0; dm_offset = len; nbytes = len; while (nbytes) { - ksb_len = min_t(unsigned int, nbytes, sizeof(buffer)); - dm_offset -= ksb_len; + sb_len = min_t(unsigned int, nbytes, sizeof(buffer)); + dm_offset -= sb_len; - for (i = 0; i < ksb_len; i++) - buffer[ksb_len - i - 1] = wa->address[dm_offset + i]; - scatterwalk_map_and_copy(buffer, sg, sg_offset, ksb_len, 1); + for (i = 0; i < sb_len; i++) + buffer[sb_len - i - 1] = wa->address[dm_offset + i]; + scatterwalk_map_and_copy(buffer, sg, sg_offset, sb_len, 1); - sg_offset += ksb_len; - nbytes -= ksb_len; + sg_offset += sb_len; + nbytes -= sb_len; } } @@ -450,9 +450,9 @@ static void ccp_process_data(struct ccp_data *src, struct ccp_data *dst, } } -static int ccp_copy_to_from_ksb(struct ccp_cmd_queue *cmd_q, - struct ccp_dm_workarea *wa, u32 jobid, u32 ksb, - u32 byte_swap, bool from) +static int ccp_copy_to_from_sb(struct ccp_cmd_queue *cmd_q, + struct ccp_dm_workarea *wa, u32 jobid, u32 sb, + u32 byte_swap, bool from) { struct ccp_op op; @@ -464,8 +464,8 @@ static int ccp_copy_to_from_ksb(struct ccp_cmd_queue *cmd_q, if (from) { op.soc = 1; - op.src.type = CCP_MEMTYPE_KSB; - op.src.u.ksb = ksb; + op.src.type = CCP_MEMTYPE_SB; + op.src.u.sb = sb; op.dst.type = CCP_MEMTYPE_SYSTEM; op.dst.u.dma.address = wa->dma.address; op.dst.u.dma.length = wa->length; @@ -473,8 +473,8 @@ static int ccp_copy_to_from_ksb(struct ccp_cmd_queue *cmd_q, op.src.type = CCP_MEMTYPE_SYSTEM; op.src.u.dma.address = wa->dma.address; op.src.u.dma.length = wa->length; - op.dst.type = CCP_MEMTYPE_KSB; - op.dst.u.ksb = ksb; + op.dst.type = CCP_MEMTYPE_SB; + op.dst.u.sb = sb; } op.u.passthru.byte_swap = byte_swap; @@ -482,18 +482,18 @@ static int ccp_copy_to_from_ksb(struct ccp_cmd_queue *cmd_q, return cmd_q->ccp->vdata->perform->passthru(&op); } -static int ccp_copy_to_ksb(struct ccp_cmd_queue *cmd_q, - struct ccp_dm_workarea *wa, u32 jobid, u32 ksb, - u32 byte_swap) +static int ccp_copy_to_sb(struct ccp_cmd_queue *cmd_q, + struct ccp_dm_workarea *wa, u32 jobid, u32 sb, + u32 byte_swap) { - return ccp_copy_to_from_ksb(cmd_q, wa, jobid, ksb, byte_swap, false); + return ccp_copy_to_from_sb(cmd_q, wa, jobid, sb, byte_swap, false); } -static int ccp_copy_from_ksb(struct ccp_cmd_queue *cmd_q, - struct ccp_dm_workarea *wa, u32 jobid, u32 ksb, - u32 byte_swap) +static int ccp_copy_from_sb(struct ccp_cmd_queue *cmd_q, + struct ccp_dm_workarea *wa, u32 jobid, u32 sb, + u32 byte_swap) { - return ccp_copy_to_from_ksb(cmd_q, wa, jobid, ksb, byte_swap, true); + return ccp_copy_to_from_sb(cmd_q, wa, jobid, sb, byte_swap, true); } static int ccp_run_aes_cmac_cmd(struct ccp_cmd_queue *cmd_q, @@ -528,54 +528,54 @@ static int ccp_run_aes_cmac_cmd(struct ccp_cmd_queue *cmd_q, return -EINVAL; } - BUILD_BUG_ON(CCP_AES_KEY_KSB_COUNT != 1); - BUILD_BUG_ON(CCP_AES_CTX_KSB_COUNT != 1); + BUILD_BUG_ON(CCP_AES_KEY_SB_COUNT != 1); + BUILD_BUG_ON(CCP_AES_CTX_SB_COUNT != 1); ret = -EIO; memset(&op, 0, sizeof(op)); op.cmd_q = cmd_q; op.jobid = ccp_gen_jobid(cmd_q->ccp); - op.ksb_key = cmd_q->ksb_key; - op.ksb_ctx = cmd_q->ksb_ctx; + op.sb_key = cmd_q->sb_key; + op.sb_ctx = cmd_q->sb_ctx; op.init = 1; op.u.aes.type = aes->type; op.u.aes.mode = aes->mode; op.u.aes.action = aes->action; - /* All supported key sizes fit in a single (32-byte) KSB entry + /* All supported key sizes fit in a single (32-byte) SB entry * and must be in little endian format. Use the 256-bit byte * swap passthru option to convert from big endian to little * endian. */ ret = ccp_init_dm_workarea(&key, cmd_q, - CCP_AES_KEY_KSB_COUNT * CCP_KSB_BYTES, + CCP_AES_KEY_SB_COUNT * CCP_SB_BYTES, DMA_TO_DEVICE); if (ret) return ret; - dm_offset = CCP_KSB_BYTES - aes->key_len; + dm_offset = CCP_SB_BYTES - aes->key_len; ccp_set_dm_area(&key, dm_offset, aes->key, 0, aes->key_len); - ret = ccp_copy_to_ksb(cmd_q, &key, op.jobid, op.ksb_key, - CCP_PASSTHRU_BYTESWAP_256BIT); + ret = ccp_copy_to_sb(cmd_q, &key, op.jobid, op.sb_key, + CCP_PASSTHRU_BYTESWAP_256BIT); if (ret) { cmd->engine_error = cmd_q->cmd_error; goto e_key; } - /* The AES context fits in a single (32-byte) KSB entry and + /* The AES context fits in a single (32-byte) SB entry and * must be in little endian format. Use the 256-bit byte swap * passthru option to convert from big endian to little endian. */ ret = ccp_init_dm_workarea(&ctx, cmd_q, - CCP_AES_CTX_KSB_COUNT * CCP_KSB_BYTES, + CCP_AES_CTX_SB_COUNT * CCP_SB_BYTES, DMA_BIDIRECTIONAL); if (ret) goto e_key; - dm_offset = CCP_KSB_BYTES - AES_BLOCK_SIZE; + dm_offset = CCP_SB_BYTES - AES_BLOCK_SIZE; ccp_set_dm_area(&ctx, dm_offset, aes->iv, 0, aes->iv_len); - ret = ccp_copy_to_ksb(cmd_q, &ctx, op.jobid, op.ksb_ctx, - CCP_PASSTHRU_BYTESWAP_256BIT); + ret = ccp_copy_to_sb(cmd_q, &ctx, op.jobid, op.sb_ctx, + CCP_PASSTHRU_BYTESWAP_256BIT); if (ret) { cmd->engine_error = cmd_q->cmd_error; goto e_ctx; @@ -593,9 +593,9 @@ static int ccp_run_aes_cmac_cmd(struct ccp_cmd_queue *cmd_q, op.eom = 1; /* Push the K1/K2 key to the CCP now */ - ret = ccp_copy_from_ksb(cmd_q, &ctx, op.jobid, - op.ksb_ctx, - CCP_PASSTHRU_BYTESWAP_256BIT); + ret = ccp_copy_from_sb(cmd_q, &ctx, op.jobid, + op.sb_ctx, + CCP_PASSTHRU_BYTESWAP_256BIT); if (ret) { cmd->engine_error = cmd_q->cmd_error; goto e_src; @@ -603,8 +603,8 @@ static int ccp_run_aes_cmac_cmd(struct ccp_cmd_queue *cmd_q, ccp_set_dm_area(&ctx, 0, aes->cmac_key, 0, aes->cmac_key_len); - ret = ccp_copy_to_ksb(cmd_q, &ctx, op.jobid, op.ksb_ctx, - CCP_PASSTHRU_BYTESWAP_256BIT); + ret = ccp_copy_to_sb(cmd_q, &ctx, op.jobid, op.sb_ctx, + CCP_PASSTHRU_BYTESWAP_256BIT); if (ret) { cmd->engine_error = cmd_q->cmd_error; goto e_src; @@ -623,15 +623,15 @@ static int ccp_run_aes_cmac_cmd(struct ccp_cmd_queue *cmd_q, /* Retrieve the AES context - convert from LE to BE using * 32-byte (256-bit) byteswapping */ - ret = ccp_copy_from_ksb(cmd_q, &ctx, op.jobid, op.ksb_ctx, - CCP_PASSTHRU_BYTESWAP_256BIT); + ret = ccp_copy_from_sb(cmd_q, &ctx, op.jobid, op.sb_ctx, + CCP_PASSTHRU_BYTESWAP_256BIT); if (ret) { cmd->engine_error = cmd_q->cmd_error; goto e_src; } /* ...but we only need AES_BLOCK_SIZE bytes */ - dm_offset = CCP_KSB_BYTES - AES_BLOCK_SIZE; + dm_offset = CCP_SB_BYTES - AES_BLOCK_SIZE; ccp_get_dm_area(&ctx, dm_offset, aes->iv, 0, aes->iv_len); e_src: @@ -681,56 +681,56 @@ static int ccp_run_aes_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) return -EINVAL; } - BUILD_BUG_ON(CCP_AES_KEY_KSB_COUNT != 1); - BUILD_BUG_ON(CCP_AES_CTX_KSB_COUNT != 1); + BUILD_BUG_ON(CCP_AES_KEY_SB_COUNT != 1); + BUILD_BUG_ON(CCP_AES_CTX_SB_COUNT != 1); ret = -EIO; memset(&op, 0, sizeof(op)); op.cmd_q = cmd_q; op.jobid = ccp_gen_jobid(cmd_q->ccp); - op.ksb_key = cmd_q->ksb_key; - op.ksb_ctx = cmd_q->ksb_ctx; + op.sb_key = cmd_q->sb_key; + op.sb_ctx = cmd_q->sb_ctx; op.init = (aes->mode == CCP_AES_MODE_ECB) ? 0 : 1; op.u.aes.type = aes->type; op.u.aes.mode = aes->mode; op.u.aes.action = aes->action; - /* All supported key sizes fit in a single (32-byte) KSB entry + /* All supported key sizes fit in a single (32-byte) SB entry * and must be in little endian format. Use the 256-bit byte * swap passthru option to convert from big endian to little * endian. */ ret = ccp_init_dm_workarea(&key, cmd_q, - CCP_AES_KEY_KSB_COUNT * CCP_KSB_BYTES, + CCP_AES_KEY_SB_COUNT * CCP_SB_BYTES, DMA_TO_DEVICE); if (ret) return ret; - dm_offset = CCP_KSB_BYTES - aes->key_len; + dm_offset = CCP_SB_BYTES - aes->key_len; ccp_set_dm_area(&key, dm_offset, aes->key, 0, aes->key_len); - ret = ccp_copy_to_ksb(cmd_q, &key, op.jobid, op.ksb_key, - CCP_PASSTHRU_BYTESWAP_256BIT); + ret = ccp_copy_to_sb(cmd_q, &key, op.jobid, op.sb_key, + CCP_PASSTHRU_BYTESWAP_256BIT); if (ret) { cmd->engine_error = cmd_q->cmd_error; goto e_key; } - /* The AES context fits in a single (32-byte) KSB entry and + /* The AES context fits in a single (32-byte) SB entry and * must be in little endian format. Use the 256-bit byte swap * passthru option to convert from big endian to little endian. */ ret = ccp_init_dm_workarea(&ctx, cmd_q, - CCP_AES_CTX_KSB_COUNT * CCP_KSB_BYTES, + CCP_AES_CTX_SB_COUNT * CCP_SB_BYTES, DMA_BIDIRECTIONAL); if (ret) goto e_key; if (aes->mode != CCP_AES_MODE_ECB) { /* Load the AES context - conver to LE */ - dm_offset = CCP_KSB_BYTES - AES_BLOCK_SIZE; + dm_offset = CCP_SB_BYTES - AES_BLOCK_SIZE; ccp_set_dm_area(&ctx, dm_offset, aes->iv, 0, aes->iv_len); - ret = ccp_copy_to_ksb(cmd_q, &ctx, op.jobid, op.ksb_ctx, - CCP_PASSTHRU_BYTESWAP_256BIT); + ret = ccp_copy_to_sb(cmd_q, &ctx, op.jobid, op.sb_ctx, + CCP_PASSTHRU_BYTESWAP_256BIT); if (ret) { cmd->engine_error = cmd_q->cmd_error; goto e_ctx; @@ -786,15 +786,15 @@ static int ccp_run_aes_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) /* Retrieve the AES context - convert from LE to BE using * 32-byte (256-bit) byteswapping */ - ret = ccp_copy_from_ksb(cmd_q, &ctx, op.jobid, op.ksb_ctx, - CCP_PASSTHRU_BYTESWAP_256BIT); + ret = ccp_copy_from_sb(cmd_q, &ctx, op.jobid, op.sb_ctx, + CCP_PASSTHRU_BYTESWAP_256BIT); if (ret) { cmd->engine_error = cmd_q->cmd_error; goto e_dst; } /* ...but we only need AES_BLOCK_SIZE bytes */ - dm_offset = CCP_KSB_BYTES - AES_BLOCK_SIZE; + dm_offset = CCP_SB_BYTES - AES_BLOCK_SIZE; ccp_get_dm_area(&ctx, dm_offset, aes->iv, 0, aes->iv_len); } @@ -858,53 +858,53 @@ static int ccp_run_xts_aes_cmd(struct ccp_cmd_queue *cmd_q, if (!xts->key || !xts->iv || !xts->src || !xts->dst) return -EINVAL; - BUILD_BUG_ON(CCP_XTS_AES_KEY_KSB_COUNT != 1); - BUILD_BUG_ON(CCP_XTS_AES_CTX_KSB_COUNT != 1); + BUILD_BUG_ON(CCP_XTS_AES_KEY_SB_COUNT != 1); + BUILD_BUG_ON(CCP_XTS_AES_CTX_SB_COUNT != 1); ret = -EIO; memset(&op, 0, sizeof(op)); op.cmd_q = cmd_q; op.jobid = ccp_gen_jobid(cmd_q->ccp); - op.ksb_key = cmd_q->ksb_key; - op.ksb_ctx = cmd_q->ksb_ctx; + op.sb_key = cmd_q->sb_key; + op.sb_ctx = cmd_q->sb_ctx; op.init = 1; op.u.xts.action = xts->action; op.u.xts.unit_size = xts->unit_size; - /* All supported key sizes fit in a single (32-byte) KSB entry + /* All supported key sizes fit in a single (32-byte) SB entry * and must be in little endian format. Use the 256-bit byte * swap passthru option to convert from big endian to little * endian. */ ret = ccp_init_dm_workarea(&key, cmd_q, - CCP_XTS_AES_KEY_KSB_COUNT * CCP_KSB_BYTES, + CCP_XTS_AES_KEY_SB_COUNT * CCP_SB_BYTES, DMA_TO_DEVICE); if (ret) return ret; - dm_offset = CCP_KSB_BYTES - AES_KEYSIZE_128; + dm_offset = CCP_SB_BYTES - AES_KEYSIZE_128; ccp_set_dm_area(&key, dm_offset, xts->key, 0, xts->key_len); ccp_set_dm_area(&key, 0, xts->key, dm_offset, xts->key_len); - ret = ccp_copy_to_ksb(cmd_q, &key, op.jobid, op.ksb_key, - CCP_PASSTHRU_BYTESWAP_256BIT); + ret = ccp_copy_to_sb(cmd_q, &key, op.jobid, op.sb_key, + CCP_PASSTHRU_BYTESWAP_256BIT); if (ret) { cmd->engine_error = cmd_q->cmd_error; goto e_key; } - /* The AES context fits in a single (32-byte) KSB entry and + /* The AES context fits in a single (32-byte) SB entry and * for XTS is already in little endian format so no byte swapping * is needed. */ ret = ccp_init_dm_workarea(&ctx, cmd_q, - CCP_XTS_AES_CTX_KSB_COUNT * CCP_KSB_BYTES, + CCP_XTS_AES_CTX_SB_COUNT * CCP_SB_BYTES, DMA_BIDIRECTIONAL); if (ret) goto e_key; ccp_set_dm_area(&ctx, 0, xts->iv, 0, xts->iv_len); - ret = ccp_copy_to_ksb(cmd_q, &ctx, op.jobid, op.ksb_ctx, - CCP_PASSTHRU_BYTESWAP_NOOP); + ret = ccp_copy_to_sb(cmd_q, &ctx, op.jobid, op.sb_ctx, + CCP_PASSTHRU_BYTESWAP_NOOP); if (ret) { cmd->engine_error = cmd_q->cmd_error; goto e_ctx; @@ -950,15 +950,15 @@ static int ccp_run_xts_aes_cmd(struct ccp_cmd_queue *cmd_q, /* Retrieve the AES context - convert from LE to BE using * 32-byte (256-bit) byteswapping */ - ret = ccp_copy_from_ksb(cmd_q, &ctx, op.jobid, op.ksb_ctx, - CCP_PASSTHRU_BYTESWAP_256BIT); + ret = ccp_copy_from_sb(cmd_q, &ctx, op.jobid, op.sb_ctx, + CCP_PASSTHRU_BYTESWAP_256BIT); if (ret) { cmd->engine_error = cmd_q->cmd_error; goto e_dst; } /* ...but we only need AES_BLOCK_SIZE bytes */ - dm_offset = CCP_KSB_BYTES - AES_BLOCK_SIZE; + dm_offset = CCP_SB_BYTES - AES_BLOCK_SIZE; ccp_get_dm_area(&ctx, dm_offset, xts->iv, 0, xts->iv_len); e_dst: @@ -1036,21 +1036,21 @@ static int ccp_run_sha_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) if (!sha->src) return -EINVAL; - BUILD_BUG_ON(CCP_SHA_KSB_COUNT != 1); + BUILD_BUG_ON(CCP_SHA_SB_COUNT != 1); memset(&op, 0, sizeof(op)); op.cmd_q = cmd_q; op.jobid = ccp_gen_jobid(cmd_q->ccp); - op.ksb_ctx = cmd_q->ksb_ctx; + op.sb_ctx = cmd_q->sb_ctx; op.u.sha.type = sha->type; op.u.sha.msg_bits = sha->msg_bits; - /* The SHA context fits in a single (32-byte) KSB entry and + /* The SHA context fits in a single (32-byte) SB entry and * must be in little endian format. Use the 256-bit byte swap * passthru option to convert from big endian to little endian. */ ret = ccp_init_dm_workarea(&ctx, cmd_q, - CCP_SHA_KSB_COUNT * CCP_KSB_BYTES, + CCP_SHA_SB_COUNT * CCP_SB_BYTES, DMA_BIDIRECTIONAL); if (ret) return ret; @@ -1077,8 +1077,8 @@ static int ccp_run_sha_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) ccp_set_dm_area(&ctx, 0, sha->ctx, 0, sha->ctx_len); } - ret = ccp_copy_to_ksb(cmd_q, &ctx, op.jobid, op.ksb_ctx, - CCP_PASSTHRU_BYTESWAP_256BIT); + ret = ccp_copy_to_sb(cmd_q, &ctx, op.jobid, op.sb_ctx, + CCP_PASSTHRU_BYTESWAP_256BIT); if (ret) { cmd->engine_error = cmd_q->cmd_error; goto e_ctx; @@ -1107,8 +1107,8 @@ static int ccp_run_sha_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) /* Retrieve the SHA context - convert from LE to BE using * 32-byte (256-bit) byteswapping to BE */ - ret = ccp_copy_from_ksb(cmd_q, &ctx, op.jobid, op.ksb_ctx, - CCP_PASSTHRU_BYTESWAP_256BIT); + ret = ccp_copy_from_sb(cmd_q, &ctx, op.jobid, op.sb_ctx, + CCP_PASSTHRU_BYTESWAP_256BIT); if (ret) { cmd->engine_error = cmd_q->cmd_error; goto e_data; @@ -1191,7 +1191,7 @@ static int ccp_run_rsa_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) struct ccp_dm_workarea exp, src; struct ccp_data dst; struct ccp_op op; - unsigned int ksb_count, i_len, o_len; + unsigned int sb_count, i_len, o_len; int ret; if (rsa->key_size > CCP_RSA_MAX_WIDTH) @@ -1209,16 +1209,16 @@ static int ccp_run_rsa_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) o_len = ((rsa->key_size + 255) / 256) * 32; i_len = o_len * 2; - ksb_count = o_len / CCP_KSB_BYTES; + sb_count = o_len / CCP_SB_BYTES; memset(&op, 0, sizeof(op)); op.cmd_q = cmd_q; op.jobid = ccp_gen_jobid(cmd_q->ccp); - op.ksb_key = ccp_alloc_ksb(cmd_q->ccp, ksb_count); - if (!op.ksb_key) + op.sb_key = ccp_alloc_ksb(cmd_q->ccp, sb_count); + if (!op.sb_key) return -EIO; - /* The RSA exponent may span multiple (32-byte) KSB entries and must + /* The RSA exponent may span multiple (32-byte) SB entries and must * be in little endian format. Reverse copy each 32-byte chunk * of the exponent (En chunk to E0 chunk, E(n-1) chunk to E1 chunk) * and each byte within that chunk and do not perform any byte swap @@ -1226,14 +1226,14 @@ static int ccp_run_rsa_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) */ ret = ccp_init_dm_workarea(&exp, cmd_q, o_len, DMA_TO_DEVICE); if (ret) - goto e_ksb; + goto e_sb; ret = ccp_reverse_set_dm_area(&exp, rsa->exp, rsa->exp_len, - CCP_KSB_BYTES, false); + CCP_SB_BYTES, false); if (ret) goto e_exp; - ret = ccp_copy_to_ksb(cmd_q, &exp, op.jobid, op.ksb_key, - CCP_PASSTHRU_BYTESWAP_NOOP); + ret = ccp_copy_to_sb(cmd_q, &exp, op.jobid, op.sb_key, + CCP_PASSTHRU_BYTESWAP_NOOP); if (ret) { cmd->engine_error = cmd_q->cmd_error; goto e_exp; @@ -1248,12 +1248,12 @@ static int ccp_run_rsa_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) goto e_exp; ret = ccp_reverse_set_dm_area(&src, rsa->mod, rsa->mod_len, - CCP_KSB_BYTES, false); + CCP_SB_BYTES, false); if (ret) goto e_src; src.address += o_len; /* Adjust the address for the copy operation */ ret = ccp_reverse_set_dm_area(&src, rsa->src, rsa->src_len, - CCP_KSB_BYTES, false); + CCP_SB_BYTES, false); if (ret) goto e_src; src.address -= o_len; /* Reset the address to original value */ @@ -1292,8 +1292,8 @@ e_src: e_exp: ccp_dm_free(&exp); -e_ksb: - ccp_free_ksb(cmd_q->ccp, op.ksb_key, ksb_count); +e_sb: + ccp_free_ksb(cmd_q->ccp, op.sb_key, sb_count); return ret; } @@ -1322,7 +1322,7 @@ static int ccp_run_passthru_cmd(struct ccp_cmd_queue *cmd_q, return -EINVAL; } - BUILD_BUG_ON(CCP_PASSTHRU_KSB_COUNT != 1); + BUILD_BUG_ON(CCP_PASSTHRU_SB_COUNT != 1); memset(&op, 0, sizeof(op)); op.cmd_q = cmd_q; @@ -1330,18 +1330,18 @@ static int ccp_run_passthru_cmd(struct ccp_cmd_queue *cmd_q, if (pt->bit_mod != CCP_PASSTHRU_BITWISE_NOOP) { /* Load the mask */ - op.ksb_key = cmd_q->ksb_key; + op.sb_key = cmd_q->sb_key; ret = ccp_init_dm_workarea(&mask, cmd_q, - CCP_PASSTHRU_KSB_COUNT * - CCP_KSB_BYTES, + CCP_PASSTHRU_SB_COUNT * + CCP_SB_BYTES, DMA_TO_DEVICE); if (ret) return ret; ccp_set_dm_area(&mask, 0, pt->mask, 0, pt->mask_len); - ret = ccp_copy_to_ksb(cmd_q, &mask, op.jobid, op.ksb_key, - CCP_PASSTHRU_BYTESWAP_NOOP); + ret = ccp_copy_to_sb(cmd_q, &mask, op.jobid, op.sb_key, + CCP_PASSTHRU_BYTESWAP_NOOP); if (ret) { cmd->engine_error = cmd_q->cmd_error; goto e_mask; @@ -1449,7 +1449,7 @@ static int ccp_run_passthru_nomap_cmd(struct ccp_cmd_queue *cmd_q, return -EINVAL; } - BUILD_BUG_ON(CCP_PASSTHRU_KSB_COUNT != 1); + BUILD_BUG_ON(CCP_PASSTHRU_SB_COUNT != 1); memset(&op, 0, sizeof(op)); op.cmd_q = cmd_q; @@ -1457,13 +1457,13 @@ static int ccp_run_passthru_nomap_cmd(struct ccp_cmd_queue *cmd_q, if (pt->bit_mod != CCP_PASSTHRU_BITWISE_NOOP) { /* Load the mask */ - op.ksb_key = cmd_q->ksb_key; + op.sb_key = cmd_q->sb_key; mask.length = pt->mask_len; mask.dma.address = pt->mask; mask.dma.length = pt->mask_len; - ret = ccp_copy_to_ksb(cmd_q, &mask, op.jobid, op.ksb_key, + ret = ccp_copy_to_sb(cmd_q, &mask, op.jobid, op.sb_key, CCP_PASSTHRU_BYTESWAP_NOOP); if (ret) { cmd->engine_error = cmd_q->cmd_error; -- cgit v1.2.3-58-ga151 From 58a690b701efc32ffd49722dd7b887154eb5a205 Mon Sep 17 00:00:00 2001 From: Gary R Hook Date: Tue, 26 Jul 2016 19:09:50 -0500 Subject: crypto: ccp - Refactor the storage block allocation code Move the KSB access/management functions to the v3 device file, and add function pointers to the actions structure. At the operations layer all of the references to the storage block will be generic (virtual). This is in preparation for a version 5 device, in which the private storage block is managed differently. Signed-off-by: Gary R Hook Signed-off-by: Herbert Xu --- drivers/crypto/ccp/ccp-dev-v3.c | 52 +++++++++++++++++++++++++++++ drivers/crypto/ccp/ccp-dev.h | 74 ++++++++++++++++++++++++----------------- drivers/crypto/ccp/ccp-ops.c | 52 ++--------------------------- 3 files changed, 98 insertions(+), 80 deletions(-) (limited to 'drivers') diff --git a/drivers/crypto/ccp/ccp-dev-v3.c b/drivers/crypto/ccp/ccp-dev-v3.c index 19eafb85708f..5b0659933b2b 100644 --- a/drivers/crypto/ccp/ccp-dev-v3.c +++ b/drivers/crypto/ccp/ccp-dev-v3.c @@ -20,6 +20,56 @@ #include "ccp-dev.h" +static u32 ccp_alloc_ksb(struct ccp_cmd_queue *cmd_q, unsigned int count) +{ + int start; + struct ccp_device *ccp = cmd_q->ccp; + + for (;;) { + mutex_lock(&ccp->sb_mutex); + + start = (u32)bitmap_find_next_zero_area(ccp->sb, + ccp->sb_count, + ccp->sb_start, + count, 0); + if (start <= ccp->sb_count) { + bitmap_set(ccp->sb, start, count); + + mutex_unlock(&ccp->sb_mutex); + break; + } + + ccp->sb_avail = 0; + + mutex_unlock(&ccp->sb_mutex); + + /* Wait for KSB entries to become available */ + if (wait_event_interruptible(ccp->sb_queue, ccp->sb_avail)) + return 0; + } + + return KSB_START + start; +} + +static void ccp_free_ksb(struct ccp_cmd_queue *cmd_q, unsigned int start, + unsigned int count) +{ + struct ccp_device *ccp = cmd_q->ccp; + + if (!start) + return; + + mutex_lock(&ccp->sb_mutex); + + bitmap_clear(ccp->sb, start - KSB_START, count); + + ccp->sb_avail = 1; + + mutex_unlock(&ccp->sb_mutex); + + wake_up_interruptible_all(&ccp->sb_queue); +} + static int ccp_do_cmd(struct ccp_op *op, u32 *cr, unsigned int cr_count) { struct ccp_cmd_queue *cmd_q = op->cmd_q; @@ -534,6 +584,8 @@ static const struct ccp_actions ccp3_actions = { .rsa = ccp_perform_rsa, .passthru = ccp_perform_passthru, .ecc = ccp_perform_ecc, + .sballoc = ccp_alloc_ksb, + .sbfree = ccp_free_ksb, .init = ccp_init, .destroy = ccp_destroy, .irqhandler = ccp_irq_handler, diff --git a/drivers/crypto/ccp/ccp-dev.h b/drivers/crypto/ccp/ccp-dev.h index 1e30568d7c04..4e38a61fbe5d 100644 --- a/drivers/crypto/ccp/ccp-dev.h +++ b/drivers/crypto/ccp/ccp-dev.h @@ -147,30 +147,6 @@ #define CCP_SB_BYTES 32 struct ccp_op; - -/* Structure for computation functions that are device-specific */ -struct ccp_actions { - int (*aes)(struct ccp_op *); - int (*xts_aes)(struct ccp_op *); - int (*sha)(struct ccp_op *); - int (*rsa)(struct ccp_op *); - int (*passthru)(struct ccp_op *); - int (*ecc)(struct ccp_op *); - int (*init)(struct ccp_device *); - void (*destroy)(struct ccp_device *); - irqreturn_t (*irqhandler)(int, void *); -}; - -/* Structure to hold CCP version-specific values */ -struct ccp_vdata { - unsigned int version; - const struct ccp_actions *perform; - const unsigned int bar; - const unsigned int offset; -}; - -extern struct ccp_vdata ccpv3; - struct ccp_device; struct ccp_cmd; @@ -306,13 +282,22 @@ struct ccp_device { */ atomic_t current_id ____cacheline_aligned; - /* The CCP uses key storage blocks (KSB) to maintain context for certain - * operations. To prevent multiple cmds from using the same KSB range - * a command queue reserves a KSB range for the duration of the cmd. - * Each queue, will however, reserve 2 KSB blocks for operations that - * only require single KSB entries (eg. AES context/iv and key) in order - * to avoid allocation contention. This will reserve at most 10 KSB - * entries, leaving 40 KSB entries available for dynamic allocation. + /* The v3 CCP uses key storage blocks (SB) to maintain context for + * certain operations. To prevent multiple cmds from using the same + * SB range a command queue reserves an SB range for the duration of + * the cmd. Each queue, will however, reserve 2 SB blocks for + * operations that only require single SB entries (eg. AES context/iv + * and key) in order to avoid allocation contention. This will reserve + * at most 10 SB entries, leaving 40 SB entries available for dynamic + * allocation. + * + * The v5 CCP Local Storage Block (LSB) is broken up into 8 + * memrory ranges, each of which can be enabled for access by one + * or more queues. Device initialization takes this into account, + * and attempts to assign one region for exclusive use by each + * available queue; the rest are then aggregated as "public" use. + * If there are fewer regions than queues, all regions are shared + * amongst all queues. */ struct mutex sb_mutex ____cacheline_aligned; DECLARE_BITMAP(sb, KSB_COUNT); @@ -461,4 +446,31 @@ int ccp_run_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd); int ccp_dmaengine_register(struct ccp_device *ccp); void ccp_dmaengine_unregister(struct ccp_device *ccp); +/* Structure for computation functions that are device-specific */ +struct ccp_actions { + int (*aes)(struct ccp_op *); + int (*xts_aes)(struct ccp_op *); + int (*sha)(struct ccp_op *); + int (*rsa)(struct ccp_op *); + int (*passthru)(struct ccp_op *); + int (*ecc)(struct ccp_op *); + u32 (*sballoc)(struct ccp_cmd_queue *, unsigned int); + void (*sbfree)(struct ccp_cmd_queue *, unsigned int, + unsigned int); + int (*init)(struct ccp_device *); + void (*destroy)(struct ccp_device *); + irqreturn_t (*irqhandler)(int, void *); +}; + +/* Structure to hold CCP version-specific values */ +struct ccp_vdata { + unsigned int version; + int (*init)(struct ccp_device *); + const struct ccp_actions *perform; + const unsigned int bar; + const unsigned int offset; +}; + +extern struct ccp_vdata ccpv3; + #endif diff --git a/drivers/crypto/ccp/ccp-ops.c b/drivers/crypto/ccp/ccp-ops.c index 2c2890a4c2e2..bd9eb1d4512a 100644 --- a/drivers/crypto/ccp/ccp-ops.c +++ b/drivers/crypto/ccp/ccp-ops.c @@ -41,53 +41,6 @@ static const __be32 ccp_sha256_init[CCP_SHA_CTXSIZE / sizeof(__be32)] = { cpu_to_be32(SHA256_H6), cpu_to_be32(SHA256_H7), }; -static u32 ccp_alloc_ksb(struct ccp_device *ccp, unsigned int count) -{ - int start; - - for (;;) { - mutex_lock(&ccp->sb_mutex); - - start = (u32)bitmap_find_next_zero_area(ccp->sb, - ccp->sb_count, - ccp->sb_start, - count, 0); - if (start <= ccp->sb_count) { - bitmap_set(ccp->sb, start, count); - - mutex_unlock(&ccp->sb_mutex); - break; - } - - ccp->sb_avail = 0; - - mutex_unlock(&ccp->sb_mutex); - - /* Wait for KSB entries to become available */ - if (wait_event_interruptible(ccp->sb_queue, ccp->sb_avail)) - return 0; - } - - return KSB_START + start; -} - -static void ccp_free_ksb(struct ccp_device *ccp, unsigned int start, - unsigned int count) -{ - if (!start) - return; - - mutex_lock(&ccp->sb_mutex); - - bitmap_clear(ccp->sb, start - KSB_START, count); - - ccp->sb_avail = 1; - - mutex_unlock(&ccp->sb_mutex); - - wake_up_interruptible_all(&ccp->sb_queue); -} - static u32 ccp_gen_jobid(struct ccp_device *ccp) { return atomic_inc_return(&ccp->current_id) & CCP_JOBID_MASK; @@ -1214,7 +1167,8 @@ static int ccp_run_rsa_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) memset(&op, 0, sizeof(op)); op.cmd_q = cmd_q; op.jobid = ccp_gen_jobid(cmd_q->ccp); - op.sb_key = ccp_alloc_ksb(cmd_q->ccp, sb_count); + op.sb_key = cmd_q->ccp->vdata->perform->sballoc(cmd_q, sb_count); + if (!op.sb_key) return -EIO; @@ -1293,7 +1247,7 @@ e_exp: ccp_dm_free(&exp); e_sb: - ccp_free_ksb(cmd_q->ccp, op.sb_key, sb_count); + cmd_q->ccp->vdata->perform->sbfree(cmd_q, op.sb_key, sb_count); return ret; } -- cgit v1.2.3-58-ga151 From 8256e683113e659d9bf6bffdd227eeb1881ae9a7 Mon Sep 17 00:00:00 2001 From: Gary R Hook Date: Tue, 26 Jul 2016 19:10:02 -0500 Subject: crypto: ccp - Refactor code supporting the CCP's RNG Make the RNG support code common (where possible) in preparation for adding a v5 device. Signed-off-by: Gary R Hook Signed-off-by: Herbert Xu --- drivers/crypto/ccp/ccp-dev-v3.c | 51 +++++++++-------------------------------- drivers/crypto/ccp/ccp-dev.c | 28 ++++++++++++++++++++++ drivers/crypto/ccp/ccp-dev.h | 1 + 3 files changed, 40 insertions(+), 40 deletions(-) (limited to 'drivers') diff --git a/drivers/crypto/ccp/ccp-dev-v3.c b/drivers/crypto/ccp/ccp-dev-v3.c index 5b0659933b2b..373ac4fa4a47 100644 --- a/drivers/crypto/ccp/ccp-dev-v3.c +++ b/drivers/crypto/ccp/ccp-dev-v3.c @@ -307,35 +307,6 @@ static int ccp_perform_ecc(struct ccp_op *op) return ccp_do_cmd(op, cr, ARRAY_SIZE(cr)); } -static int ccp_trng_read(struct hwrng *rng, void *data, size_t max, bool wait) -{ - struct ccp_device *ccp = container_of(rng, struct ccp_device, hwrng); - u32 trng_value; - int len = min_t(int, sizeof(trng_value), max); - - /* - * Locking is provided by the caller so we can update device - * hwrng-related fields safely - */ - trng_value = ioread32(ccp->io_regs + TRNG_OUT_REG); - if (!trng_value) { - /* Zero is returned if not data is available or if a - * bad-entropy error is present. Assume an error if - * we exceed TRNG_RETRIES reads of zero. - */ - if (ccp->hwrng_retries++ > TRNG_RETRIES) - return -EIO; - - return 0; - } - - /* Reset the counter and save the rng value */ - ccp->hwrng_retries = 0; - memcpy(data, &trng_value, len); - - return len; -} - static int ccp_init(struct ccp_device *ccp) { struct device *dev = ccp->dev; @@ -495,17 +466,6 @@ static void ccp_destroy(struct ccp_device *ccp) /* Remove this device from the list of available units first */ ccp_del_device(ccp); - /* Unregister the DMA engine */ - ccp_dmaengine_unregister(ccp); - - /* Unregister the RNG */ - hwrng_unregister(&ccp->hwrng); - - /* Stop the queue kthreads */ - for (i = 0; i < ccp->cmd_q_count; i++) - if (ccp->cmd_q[i].kthread) - kthread_stop(ccp->cmd_q[i].kthread); - /* Build queue interrupt mask (two interrupt masks per queue) */ qim = 0; for (i = 0; i < ccp->cmd_q_count; i++) { @@ -523,6 +483,17 @@ static void ccp_destroy(struct ccp_device *ccp) } iowrite32(qim, ccp->io_regs + IRQ_STATUS_REG); + /* Unregister the DMA engine */ + ccp_dmaengine_unregister(ccp); + + /* Unregister the RNG */ + hwrng_unregister(&ccp->hwrng); + + /* Stop the queue kthreads */ + for (i = 0; i < ccp->cmd_q_count; i++) + if (ccp->cmd_q[i].kthread) + kthread_stop(ccp->cmd_q[i].kthread); + ccp->free_irq(ccp); for (i = 0; i < ccp->cmd_q_count; i++) diff --git a/drivers/crypto/ccp/ccp-dev.c b/drivers/crypto/ccp/ccp-dev.c index 9c8cfbb6841f..6b44730ef9d6 100644 --- a/drivers/crypto/ccp/ccp-dev.c +++ b/drivers/crypto/ccp/ccp-dev.c @@ -409,6 +409,34 @@ struct ccp_device *ccp_alloc_struct(struct device *dev) return ccp; } +int ccp_trng_read(struct hwrng *rng, void *data, size_t max, bool wait) +{ + struct ccp_device *ccp = container_of(rng, struct ccp_device, hwrng); + u32 trng_value; + int len = min_t(int, sizeof(trng_value), max); + + /* Locking is provided by the caller so we can update device + * hwrng-related fields safely + */ + trng_value = ioread32(ccp->io_regs + TRNG_OUT_REG); + if (!trng_value) { + /* Zero is returned if not data is available or if a + * bad-entropy error is present. Assume an error if + * we exceed TRNG_RETRIES reads of zero. + */ + if (ccp->hwrng_retries++ > TRNG_RETRIES) + return -EIO; + + return 0; + } + + /* Reset the counter and save the rng value */ + ccp->hwrng_retries = 0; + memcpy(data, &trng_value, len); + + return len; +} + #ifdef CONFIG_PM bool ccp_queues_suspended(struct ccp_device *ccp) { diff --git a/drivers/crypto/ccp/ccp-dev.h b/drivers/crypto/ccp/ccp-dev.h index 4e38a61fbe5d..0c44c5e049f5 100644 --- a/drivers/crypto/ccp/ccp-dev.h +++ b/drivers/crypto/ccp/ccp-dev.h @@ -440,6 +440,7 @@ void ccp_del_device(struct ccp_device *ccp); struct ccp_device *ccp_alloc_struct(struct device *dev); bool ccp_queues_suspended(struct ccp_device *ccp); int ccp_cmd_queue_thread(void *data); +int ccp_trng_read(struct hwrng *rng, void *data, size_t max, bool wait); int ccp_run_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd); -- cgit v1.2.3-58-ga151 From bb4e89b34d1bf46156b7e880a0f34205fb7ce2a5 Mon Sep 17 00:00:00 2001 From: Gary R Hook Date: Tue, 26 Jul 2016 19:10:13 -0500 Subject: crypto: ccp - Refactor code to enable checks for queue space. Available queue space is used to decide (by counting free slots) if we have to put a command on hold or if it can be sent to the engine immediately. Signed-off-by: Gary R Hook Signed-off-by: Herbert Xu --- drivers/crypto/ccp/ccp-dev-v3.c | 8 +++++++- drivers/crypto/ccp/ccp-dev.h | 1 + drivers/crypto/ccp/ccp-ops.c | 2 +- 3 files changed, 9 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/crypto/ccp/ccp-dev-v3.c b/drivers/crypto/ccp/ccp-dev-v3.c index 373ac4fa4a47..02c8c95fdc2d 100644 --- a/drivers/crypto/ccp/ccp-dev-v3.c +++ b/drivers/crypto/ccp/ccp-dev-v3.c @@ -70,6 +70,11 @@ static void ccp_free_ksb(struct ccp_cmd_queue *cmd_q, unsigned int start, wake_up_interruptible_all(&ccp->sb_queue); } +static unsigned int ccp_get_free_slots(struct ccp_cmd_queue *cmd_q) +{ + return CMD_Q_DEPTH(ioread32(cmd_q->reg_status)); +} + static int ccp_do_cmd(struct ccp_op *op, u32 *cr, unsigned int cr_count) { struct ccp_cmd_queue *cmd_q = op->cmd_q; @@ -357,7 +362,7 @@ static int ccp_init(struct ccp_device *ccp) cmd_q->int_ok = 1 << (i * 2); cmd_q->int_err = 1 << ((i * 2) + 1); - cmd_q->free_slots = CMD_Q_DEPTH(ioread32(cmd_q->reg_status)); + cmd_q->free_slots = ccp_get_free_slots(cmd_q); init_waitqueue_head(&cmd_q->int_queue); @@ -559,6 +564,7 @@ static const struct ccp_actions ccp3_actions = { .sbfree = ccp_free_ksb, .init = ccp_init, .destroy = ccp_destroy, + .get_free_slots = ccp_get_free_slots, .irqhandler = ccp_irq_handler, }; diff --git a/drivers/crypto/ccp/ccp-dev.h b/drivers/crypto/ccp/ccp-dev.h index 0c44c5e049f5..de907029c6ee 100644 --- a/drivers/crypto/ccp/ccp-dev.h +++ b/drivers/crypto/ccp/ccp-dev.h @@ -458,6 +458,7 @@ struct ccp_actions { u32 (*sballoc)(struct ccp_cmd_queue *, unsigned int); void (*sbfree)(struct ccp_cmd_queue *, unsigned int, unsigned int); + unsigned int (*get_free_slots)(struct ccp_cmd_queue *); int (*init)(struct ccp_device *); void (*destroy)(struct ccp_device *); irqreturn_t (*irqhandler)(int, void *); diff --git a/drivers/crypto/ccp/ccp-ops.c b/drivers/crypto/ccp/ccp-ops.c index bd9eb1d4512a..fdab0ae4f7c9 100644 --- a/drivers/crypto/ccp/ccp-ops.c +++ b/drivers/crypto/ccp/ccp-ops.c @@ -1765,7 +1765,7 @@ int ccp_run_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) cmd->engine_error = 0; cmd_q->cmd_error = 0; cmd_q->int_rcvd = 0; - cmd_q->free_slots = CMD_Q_DEPTH(ioread32(cmd_q->reg_status)); + cmd_q->free_slots = cmd_q->ccp->vdata->perform->get_free_slots(cmd_q); switch (cmd->engine) { case CCP_ENGINE_AES: -- cgit v1.2.3-58-ga151 From 4b394a232df78414442778b02ca4a388d947d059 Mon Sep 17 00:00:00 2001 From: Gary R Hook Date: Tue, 26 Jul 2016 19:10:21 -0500 Subject: crypto: ccp - Let a v5 CCP provide the same function as v3 Enable equivalent function on a v5 CCP. Add support for a version 5 CCP which enables AES/XTS/SHA services. Also, more work on the data structures to virtualize functionality. Signed-off-by: Gary R Hook Signed-off-by: Herbert Xu --- drivers/crypto/ccp/Makefile | 1 + drivers/crypto/ccp/ccp-crypto-sha.c | 18 +- drivers/crypto/ccp/ccp-dev-v3.c | 28 +- drivers/crypto/ccp/ccp-dev-v5.c | 961 ++++++++++++++++++++++++++++++++++++ drivers/crypto/ccp/ccp-dev.h | 164 +++++- drivers/crypto/ccp/ccp-ops.c | 279 +++++++---- drivers/crypto/ccp/ccp-pci.c | 6 +- include/linux/ccp.h | 3 - 8 files changed, 1340 insertions(+), 120 deletions(-) create mode 100644 drivers/crypto/ccp/ccp-dev-v5.c (limited to 'drivers') diff --git a/drivers/crypto/ccp/Makefile b/drivers/crypto/ccp/Makefile index ee4d2741b3ab..346ceb8f17bd 100644 --- a/drivers/crypto/ccp/Makefile +++ b/drivers/crypto/ccp/Makefile @@ -2,6 +2,7 @@ obj-$(CONFIG_CRYPTO_DEV_CCP_DD) += ccp.o ccp-objs := ccp-dev.o \ ccp-ops.o \ ccp-dev-v3.o \ + ccp-dev-v5.o \ ccp-platform.o \ ccp-dmaengine.o ccp-$(CONFIG_PCI) += ccp-pci.o diff --git a/drivers/crypto/ccp/ccp-crypto-sha.c b/drivers/crypto/ccp/ccp-crypto-sha.c index 8f36af62fe95..84a652be4274 100644 --- a/drivers/crypto/ccp/ccp-crypto-sha.c +++ b/drivers/crypto/ccp/ccp-crypto-sha.c @@ -4,6 +4,7 @@ * Copyright (C) 2013,2016 Advanced Micro Devices, Inc. * * Author: Tom Lendacky + * Author: Gary R Hook * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as @@ -134,7 +135,22 @@ static int ccp_do_sha_update(struct ahash_request *req, unsigned int nbytes, rctx->cmd.engine = CCP_ENGINE_SHA; rctx->cmd.u.sha.type = rctx->type; rctx->cmd.u.sha.ctx = &rctx->ctx_sg; - rctx->cmd.u.sha.ctx_len = sizeof(rctx->ctx); + + switch (rctx->type) { + case CCP_SHA_TYPE_1: + rctx->cmd.u.sha.ctx_len = SHA1_DIGEST_SIZE; + break; + case CCP_SHA_TYPE_224: + rctx->cmd.u.sha.ctx_len = SHA224_DIGEST_SIZE; + break; + case CCP_SHA_TYPE_256: + rctx->cmd.u.sha.ctx_len = SHA256_DIGEST_SIZE; + break; + default: + /* Should never get here */ + break; + } + rctx->cmd.u.sha.src = sg; rctx->cmd.u.sha.src_len = rctx->hash_cnt; rctx->cmd.u.sha.opad = ctx->u.sha.key_len ? diff --git a/drivers/crypto/ccp/ccp-dev-v3.c b/drivers/crypto/ccp/ccp-dev-v3.c index 02c8c95fdc2d..ff2d2a4de16a 100644 --- a/drivers/crypto/ccp/ccp-dev-v3.c +++ b/drivers/crypto/ccp/ccp-dev-v3.c @@ -405,6 +405,7 @@ static int ccp_init(struct ccp_device *ccp) init_waitqueue_head(&ccp->sb_queue); init_waitqueue_head(&ccp->suspend_queue); + dev_dbg(dev, "Starting threads...\n"); /* Create a kthread for each queue */ for (i = 0; i < ccp->cmd_q_count; i++) { struct task_struct *kthread; @@ -424,6 +425,13 @@ static int ccp_init(struct ccp_device *ccp) wake_up_process(kthread); } + dev_dbg(dev, "Enabling interrupts...\n"); + /* Enable interrupts */ + iowrite32(qim, ccp->io_regs + IRQ_MASK_REG); + + dev_dbg(dev, "Registering device...\n"); + ccp_add_device(ccp); + /* Register the RNG */ ccp->hwrng.name = ccp->rngname; ccp->hwrng.read = ccp_trng_read; @@ -438,11 +446,6 @@ static int ccp_init(struct ccp_device *ccp) if (ret) goto e_hwrng; - ccp_add_device(ccp); - - /* Enable interrupts */ - iowrite32(qim, ccp->io_regs + IRQ_MASK_REG); - return 0; e_hwrng: @@ -468,7 +471,13 @@ static void ccp_destroy(struct ccp_device *ccp) struct ccp_cmd *cmd; unsigned int qim, i; - /* Remove this device from the list of available units first */ + /* Unregister the DMA engine */ + ccp_dmaengine_unregister(ccp); + + /* Unregister the RNG */ + hwrng_unregister(&ccp->hwrng); + + /* Remove this device from the list of available units */ ccp_del_device(ccp); /* Build queue interrupt mask (two interrupt masks per queue) */ @@ -488,12 +497,6 @@ static void ccp_destroy(struct ccp_device *ccp) } iowrite32(qim, ccp->io_regs + IRQ_STATUS_REG); - /* Unregister the DMA engine */ - ccp_dmaengine_unregister(ccp); - - /* Unregister the RNG */ - hwrng_unregister(&ccp->hwrng); - /* Stop the queue kthreads */ for (i = 0; i < ccp->cmd_q_count; i++) if (ccp->cmd_q[i].kthread) @@ -570,6 +573,7 @@ static const struct ccp_actions ccp3_actions = { struct ccp_vdata ccpv3 = { .version = CCP_VERSION(3, 0), + .setup = NULL, .perform = &ccp3_actions, .bar = 2, .offset = 0x20000, diff --git a/drivers/crypto/ccp/ccp-dev-v5.c b/drivers/crypto/ccp/ccp-dev-v5.c new file mode 100644 index 000000000000..16dad9633754 --- /dev/null +++ b/drivers/crypto/ccp/ccp-dev-v5.c @@ -0,0 +1,961 @@ +/* + * AMD Cryptographic Coprocessor (CCP) driver + * + * Copyright (C) 2016 Advanced Micro Devices, Inc. + * + * Author: Gary R Hook + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "ccp-dev.h" + +static u32 ccp_lsb_alloc(struct ccp_cmd_queue *cmd_q, unsigned int count) +{ + struct ccp_device *ccp; + int start; + + /* First look at the map for the queue */ + if (cmd_q->lsb >= 0) { + start = (u32)bitmap_find_next_zero_area(cmd_q->lsbmap, + LSB_SIZE, + 0, count, 0); + if (start < LSB_SIZE) { + bitmap_set(cmd_q->lsbmap, start, count); + return start + cmd_q->lsb * LSB_SIZE; + } + } + + /* No joy; try to get an entry from the shared blocks */ + ccp = cmd_q->ccp; + for (;;) { + mutex_lock(&ccp->sb_mutex); + + start = (u32)bitmap_find_next_zero_area(ccp->lsbmap, + MAX_LSB_CNT * LSB_SIZE, + 0, + count, 0); + if (start <= MAX_LSB_CNT * LSB_SIZE) { + bitmap_set(ccp->lsbmap, start, count); + + mutex_unlock(&ccp->sb_mutex); + return start * LSB_ITEM_SIZE; + } + + ccp->sb_avail = 0; + + mutex_unlock(&ccp->sb_mutex); + + /* Wait for KSB entries to become available */ + if (wait_event_interruptible(ccp->sb_queue, ccp->sb_avail)) + return 0; + } +} + +static void ccp_lsb_free(struct ccp_cmd_queue *cmd_q, unsigned int start, + unsigned int count) +{ + int lsbno = start / LSB_SIZE; + + if (!start) + return; + + if (cmd_q->lsb == lsbno) { + /* An entry from the private LSB */ + bitmap_clear(cmd_q->lsbmap, start % LSB_SIZE, count); + } else { + /* From the shared LSBs */ + struct ccp_device *ccp = cmd_q->ccp; + + mutex_lock(&ccp->sb_mutex); + bitmap_clear(ccp->lsbmap, start, count); + ccp->sb_avail = 1; + mutex_unlock(&ccp->sb_mutex); + wake_up_interruptible_all(&ccp->sb_queue); + } +} + +/* CCP version 5: Union to define the function field (cmd_reg1/dword0) */ +union ccp_function { + struct { + u16 size:7; + u16 encrypt:1; + u16 mode:5; + u16 type:2; + } aes; + struct { + u16 size:7; + u16 encrypt:1; + u16 rsvd:5; + u16 type:2; + } aes_xts; + struct { + u16 rsvd1:10; + u16 type:4; + u16 rsvd2:1; + } sha; + struct { + u16 mode:3; + u16 size:12; + } rsa; + struct { + u16 byteswap:2; + u16 bitwise:3; + u16 reflect:2; + u16 rsvd:8; + } pt; + struct { + u16 rsvd:13; + } zlib; + struct { + u16 size:10; + u16 type:2; + u16 mode:3; + } ecc; + u16 raw; +}; + +#define CCP_AES_SIZE(p) ((p)->aes.size) +#define CCP_AES_ENCRYPT(p) ((p)->aes.encrypt) +#define CCP_AES_MODE(p) ((p)->aes.mode) +#define CCP_AES_TYPE(p) ((p)->aes.type) +#define CCP_XTS_SIZE(p) ((p)->aes_xts.size) +#define CCP_XTS_ENCRYPT(p) ((p)->aes_xts.encrypt) +#define CCP_SHA_TYPE(p) ((p)->sha.type) +#define CCP_RSA_SIZE(p) ((p)->rsa.size) +#define CCP_PT_BYTESWAP(p) ((p)->pt.byteswap) +#define CCP_PT_BITWISE(p) ((p)->pt.bitwise) +#define CCP_ECC_MODE(p) ((p)->ecc.mode) +#define CCP_ECC_AFFINE(p) ((p)->ecc.one) + +/* Word 0 */ +#define CCP5_CMD_DW0(p) ((p)->dw0) +#define CCP5_CMD_SOC(p) (CCP5_CMD_DW0(p).soc) +#define CCP5_CMD_IOC(p) (CCP5_CMD_DW0(p).ioc) +#define CCP5_CMD_INIT(p) (CCP5_CMD_DW0(p).init) +#define CCP5_CMD_EOM(p) (CCP5_CMD_DW0(p).eom) +#define CCP5_CMD_FUNCTION(p) (CCP5_CMD_DW0(p).function) +#define CCP5_CMD_ENGINE(p) (CCP5_CMD_DW0(p).engine) +#define CCP5_CMD_PROT(p) (CCP5_CMD_DW0(p).prot) + +/* Word 1 */ +#define CCP5_CMD_DW1(p) ((p)->length) +#define CCP5_CMD_LEN(p) (CCP5_CMD_DW1(p)) + +/* Word 2 */ +#define CCP5_CMD_DW2(p) ((p)->src_lo) +#define CCP5_CMD_SRC_LO(p) (CCP5_CMD_DW2(p)) + +/* Word 3 */ +#define CCP5_CMD_DW3(p) ((p)->dw3) +#define CCP5_CMD_SRC_MEM(p) ((p)->dw3.src_mem) +#define CCP5_CMD_SRC_HI(p) ((p)->dw3.src_hi) +#define CCP5_CMD_LSB_ID(p) ((p)->dw3.lsb_cxt_id) +#define CCP5_CMD_FIX_SRC(p) ((p)->dw3.fixed) + +/* Words 4/5 */ +#define CCP5_CMD_DW4(p) ((p)->dw4) +#define CCP5_CMD_DST_LO(p) (CCP5_CMD_DW4(p).dst_lo) +#define CCP5_CMD_DW5(p) ((p)->dw5.fields.dst_hi) +#define CCP5_CMD_DST_HI(p) (CCP5_CMD_DW5(p)) +#define CCP5_CMD_DST_MEM(p) ((p)->dw5.fields.dst_mem) +#define CCP5_CMD_FIX_DST(p) ((p)->dw5.fields.fixed) +#define CCP5_CMD_SHA_LO(p) ((p)->dw4.sha_len_lo) +#define CCP5_CMD_SHA_HI(p) ((p)->dw5.sha_len_hi) + +/* Word 6/7 */ +#define CCP5_CMD_DW6(p) ((p)->key_lo) +#define CCP5_CMD_KEY_LO(p) (CCP5_CMD_DW6(p)) +#define CCP5_CMD_DW7(p) ((p)->dw7) +#define CCP5_CMD_KEY_HI(p) ((p)->dw7.key_hi) +#define CCP5_CMD_KEY_MEM(p) ((p)->dw7.key_mem) + +static inline u32 low_address(unsigned long addr) +{ + return (u64)addr & 0x0ffffffff; +} + +static inline u32 high_address(unsigned long addr) +{ + return ((u64)addr >> 32) & 0x00000ffff; +} + +static unsigned int ccp5_get_free_slots(struct ccp_cmd_queue *cmd_q) +{ + unsigned int head_idx, n; + u32 head_lo, queue_start; + + queue_start = low_address(cmd_q->qdma_tail); + head_lo = ioread32(cmd_q->reg_head_lo); + head_idx = (head_lo - queue_start) / sizeof(struct ccp5_desc); + + n = head_idx + COMMANDS_PER_QUEUE - cmd_q->qidx - 1; + + return n % COMMANDS_PER_QUEUE; /* Always one unused spot */ +} + +static int ccp5_do_cmd(struct ccp5_desc *desc, + struct ccp_cmd_queue *cmd_q) +{ + u32 *mP; + __le32 *dP; + u32 tail; + int i; + int ret = 0; + + if (CCP5_CMD_SOC(desc)) { + CCP5_CMD_IOC(desc) = 1; + CCP5_CMD_SOC(desc) = 0; + } + mutex_lock(&cmd_q->q_mutex); + + mP = (u32 *) &cmd_q->qbase[cmd_q->qidx]; + dP = (__le32 *) desc; + for (i = 0; i < 8; i++) + mP[i] = cpu_to_le32(dP[i]); /* handle endianness */ + + cmd_q->qidx = (cmd_q->qidx + 1) % COMMANDS_PER_QUEUE; + + /* The data used by this command must be flushed to memory */ + wmb(); + + /* Write the new tail address back to the queue register */ + tail = low_address(cmd_q->qdma_tail + cmd_q->qidx * Q_DESC_SIZE); + iowrite32(tail, cmd_q->reg_tail_lo); + + /* Turn the queue back on using our cached control register */ + iowrite32(cmd_q->qcontrol | CMD5_Q_RUN, cmd_q->reg_control); + mutex_unlock(&cmd_q->q_mutex); + + if (CCP5_CMD_IOC(desc)) { + /* Wait for the job to complete */ + ret = wait_event_interruptible(cmd_q->int_queue, + cmd_q->int_rcvd); + if (ret || cmd_q->cmd_error) { + /* A version 5 device doesn't use Job IDs... */ + if (!ret) + ret = -EIO; + } + cmd_q->int_rcvd = 0; + } + + return 0; +} + +static int ccp5_perform_aes(struct ccp_op *op) +{ + struct ccp5_desc desc; + union ccp_function function; + u32 key_addr = op->sb_key * LSB_ITEM_SIZE; + + /* Zero out all the fields of the command desc */ + memset(&desc, 0, Q_DESC_SIZE); + + CCP5_CMD_ENGINE(&desc) = CCP_ENGINE_AES; + + CCP5_CMD_SOC(&desc) = op->soc; + CCP5_CMD_IOC(&desc) = 1; + CCP5_CMD_INIT(&desc) = op->init; + CCP5_CMD_EOM(&desc) = op->eom; + CCP5_CMD_PROT(&desc) = 0; + + function.raw = 0; + CCP_AES_ENCRYPT(&function) = op->u.aes.action; + CCP_AES_MODE(&function) = op->u.aes.mode; + CCP_AES_TYPE(&function) = op->u.aes.type; + if (op->u.aes.mode == CCP_AES_MODE_CFB) + CCP_AES_SIZE(&function) = 0x7f; + + CCP5_CMD_FUNCTION(&desc) = function.raw; + + CCP5_CMD_LEN(&desc) = op->src.u.dma.length; + + CCP5_CMD_SRC_LO(&desc) = ccp_addr_lo(&op->src.u.dma); + CCP5_CMD_SRC_HI(&desc) = ccp_addr_hi(&op->src.u.dma); + CCP5_CMD_SRC_MEM(&desc) = CCP_MEMTYPE_SYSTEM; + + CCP5_CMD_DST_LO(&desc) = ccp_addr_lo(&op->dst.u.dma); + CCP5_CMD_DST_HI(&desc) = ccp_addr_hi(&op->dst.u.dma); + CCP5_CMD_DST_MEM(&desc) = CCP_MEMTYPE_SYSTEM; + + CCP5_CMD_KEY_LO(&desc) = lower_32_bits(key_addr); + CCP5_CMD_KEY_HI(&desc) = 0; + CCP5_CMD_KEY_MEM(&desc) = CCP_MEMTYPE_SB; + CCP5_CMD_LSB_ID(&desc) = op->sb_ctx; + + return ccp5_do_cmd(&desc, op->cmd_q); +} + +static int ccp5_perform_xts_aes(struct ccp_op *op) +{ + struct ccp5_desc desc; + union ccp_function function; + u32 key_addr = op->sb_key * LSB_ITEM_SIZE; + + /* Zero out all the fields of the command desc */ + memset(&desc, 0, Q_DESC_SIZE); + + CCP5_CMD_ENGINE(&desc) = CCP_ENGINE_XTS_AES_128; + + CCP5_CMD_SOC(&desc) = op->soc; + CCP5_CMD_IOC(&desc) = 1; + CCP5_CMD_INIT(&desc) = op->init; + CCP5_CMD_EOM(&desc) = op->eom; + CCP5_CMD_PROT(&desc) = 0; + + function.raw = 0; + CCP_XTS_ENCRYPT(&function) = op->u.xts.action; + CCP_XTS_SIZE(&function) = op->u.xts.unit_size; + CCP5_CMD_FUNCTION(&desc) = function.raw; + + CCP5_CMD_LEN(&desc) = op->src.u.dma.length; + + CCP5_CMD_SRC_LO(&desc) = ccp_addr_lo(&op->src.u.dma); + CCP5_CMD_SRC_HI(&desc) = ccp_addr_hi(&op->src.u.dma); + CCP5_CMD_SRC_MEM(&desc) = CCP_MEMTYPE_SYSTEM; + + CCP5_CMD_DST_LO(&desc) = ccp_addr_lo(&op->dst.u.dma); + CCP5_CMD_DST_HI(&desc) = ccp_addr_hi(&op->dst.u.dma); + CCP5_CMD_DST_MEM(&desc) = CCP_MEMTYPE_SYSTEM; + + CCP5_CMD_KEY_LO(&desc) = lower_32_bits(key_addr); + CCP5_CMD_KEY_HI(&desc) = 0; + CCP5_CMD_KEY_MEM(&desc) = CCP_MEMTYPE_SB; + CCP5_CMD_LSB_ID(&desc) = op->sb_ctx; + + return ccp5_do_cmd(&desc, op->cmd_q); +} + +static int ccp5_perform_sha(struct ccp_op *op) +{ + struct ccp5_desc desc; + union ccp_function function; + + /* Zero out all the fields of the command desc */ + memset(&desc, 0, Q_DESC_SIZE); + + CCP5_CMD_ENGINE(&desc) = CCP_ENGINE_SHA; + + CCP5_CMD_SOC(&desc) = op->soc; + CCP5_CMD_IOC(&desc) = 1; + CCP5_CMD_INIT(&desc) = 1; + CCP5_CMD_EOM(&desc) = op->eom; + CCP5_CMD_PROT(&desc) = 0; + + function.raw = 0; + CCP_SHA_TYPE(&function) = op->u.sha.type; + CCP5_CMD_FUNCTION(&desc) = function.raw; + + CCP5_CMD_LEN(&desc) = op->src.u.dma.length; + + CCP5_CMD_SRC_LO(&desc) = ccp_addr_lo(&op->src.u.dma); + CCP5_CMD_SRC_HI(&desc) = ccp_addr_hi(&op->src.u.dma); + CCP5_CMD_SRC_MEM(&desc) = CCP_MEMTYPE_SYSTEM; + + CCP5_CMD_LSB_ID(&desc) = op->sb_ctx; + + if (op->eom) { + CCP5_CMD_SHA_LO(&desc) = lower_32_bits(op->u.sha.msg_bits); + CCP5_CMD_SHA_HI(&desc) = upper_32_bits(op->u.sha.msg_bits); + } else { + CCP5_CMD_SHA_LO(&desc) = 0; + CCP5_CMD_SHA_HI(&desc) = 0; + } + + return ccp5_do_cmd(&desc, op->cmd_q); +} + +static int ccp5_perform_rsa(struct ccp_op *op) +{ + struct ccp5_desc desc; + union ccp_function function; + + /* Zero out all the fields of the command desc */ + memset(&desc, 0, Q_DESC_SIZE); + + CCP5_CMD_ENGINE(&desc) = CCP_ENGINE_RSA; + + CCP5_CMD_SOC(&desc) = op->soc; + CCP5_CMD_IOC(&desc) = 1; + CCP5_CMD_INIT(&desc) = 0; + CCP5_CMD_EOM(&desc) = 1; + CCP5_CMD_PROT(&desc) = 0; + + function.raw = 0; + CCP_RSA_SIZE(&function) = op->u.rsa.mod_size; + CCP5_CMD_FUNCTION(&desc) = function.raw; + + CCP5_CMD_LEN(&desc) = op->u.rsa.input_len; + + /* Source is from external memory */ + CCP5_CMD_SRC_LO(&desc) = ccp_addr_lo(&op->src.u.dma); + CCP5_CMD_SRC_HI(&desc) = ccp_addr_hi(&op->src.u.dma); + CCP5_CMD_SRC_MEM(&desc) = CCP_MEMTYPE_SYSTEM; + + /* Destination is in external memory */ + CCP5_CMD_DST_LO(&desc) = ccp_addr_lo(&op->dst.u.dma); + CCP5_CMD_DST_HI(&desc) = ccp_addr_hi(&op->dst.u.dma); + CCP5_CMD_DST_MEM(&desc) = CCP_MEMTYPE_SYSTEM; + + /* Key (Exponent) is in external memory */ + CCP5_CMD_KEY_LO(&desc) = ccp_addr_lo(&op->exp.u.dma); + CCP5_CMD_KEY_HI(&desc) = ccp_addr_hi(&op->exp.u.dma); + CCP5_CMD_KEY_MEM(&desc) = CCP_MEMTYPE_SYSTEM; + + return ccp5_do_cmd(&desc, op->cmd_q); +} + +static int ccp5_perform_passthru(struct ccp_op *op) +{ + struct ccp5_desc desc; + union ccp_function function; + struct ccp_dma_info *saddr = &op->src.u.dma; + struct ccp_dma_info *daddr = &op->dst.u.dma; + + memset(&desc, 0, Q_DESC_SIZE); + + CCP5_CMD_ENGINE(&desc) = CCP_ENGINE_PASSTHRU; + + CCP5_CMD_SOC(&desc) = 0; + CCP5_CMD_IOC(&desc) = 1; + CCP5_CMD_INIT(&desc) = 0; + CCP5_CMD_EOM(&desc) = op->eom; + CCP5_CMD_PROT(&desc) = 0; + + function.raw = 0; + CCP_PT_BYTESWAP(&function) = op->u.passthru.byte_swap; + CCP_PT_BITWISE(&function) = op->u.passthru.bit_mod; + CCP5_CMD_FUNCTION(&desc) = function.raw; + + /* Length of source data is always 256 bytes */ + if (op->src.type == CCP_MEMTYPE_SYSTEM) + CCP5_CMD_LEN(&desc) = saddr->length; + else + CCP5_CMD_LEN(&desc) = daddr->length; + + if (op->src.type == CCP_MEMTYPE_SYSTEM) { + CCP5_CMD_SRC_LO(&desc) = ccp_addr_lo(&op->src.u.dma); + CCP5_CMD_SRC_HI(&desc) = ccp_addr_hi(&op->src.u.dma); + CCP5_CMD_SRC_MEM(&desc) = CCP_MEMTYPE_SYSTEM; + + if (op->u.passthru.bit_mod != CCP_PASSTHRU_BITWISE_NOOP) + CCP5_CMD_LSB_ID(&desc) = op->sb_key; + } else { + u32 key_addr = op->src.u.sb * CCP_SB_BYTES; + + CCP5_CMD_SRC_LO(&desc) = lower_32_bits(key_addr); + CCP5_CMD_SRC_HI(&desc) = 0; + CCP5_CMD_SRC_MEM(&desc) = CCP_MEMTYPE_SB; + } + + if (op->dst.type == CCP_MEMTYPE_SYSTEM) { + CCP5_CMD_DST_LO(&desc) = ccp_addr_lo(&op->dst.u.dma); + CCP5_CMD_DST_HI(&desc) = ccp_addr_hi(&op->dst.u.dma); + CCP5_CMD_DST_MEM(&desc) = CCP_MEMTYPE_SYSTEM; + } else { + u32 key_addr = op->dst.u.sb * CCP_SB_BYTES; + + CCP5_CMD_DST_LO(&desc) = lower_32_bits(key_addr); + CCP5_CMD_DST_HI(&desc) = 0; + CCP5_CMD_DST_MEM(&desc) = CCP_MEMTYPE_SB; + } + + return ccp5_do_cmd(&desc, op->cmd_q); +} + +static int ccp5_perform_ecc(struct ccp_op *op) +{ + struct ccp5_desc desc; + union ccp_function function; + + /* Zero out all the fields of the command desc */ + memset(&desc, 0, Q_DESC_SIZE); + + CCP5_CMD_ENGINE(&desc) = CCP_ENGINE_ECC; + + CCP5_CMD_SOC(&desc) = 0; + CCP5_CMD_IOC(&desc) = 1; + CCP5_CMD_INIT(&desc) = 0; + CCP5_CMD_EOM(&desc) = 1; + CCP5_CMD_PROT(&desc) = 0; + + function.raw = 0; + function.ecc.mode = op->u.ecc.function; + CCP5_CMD_FUNCTION(&desc) = function.raw; + + CCP5_CMD_LEN(&desc) = op->src.u.dma.length; + + CCP5_CMD_SRC_LO(&desc) = ccp_addr_lo(&op->src.u.dma); + CCP5_CMD_SRC_HI(&desc) = ccp_addr_hi(&op->src.u.dma); + CCP5_CMD_SRC_MEM(&desc) = CCP_MEMTYPE_SYSTEM; + + CCP5_CMD_DST_LO(&desc) = ccp_addr_lo(&op->dst.u.dma); + CCP5_CMD_DST_HI(&desc) = ccp_addr_hi(&op->dst.u.dma); + CCP5_CMD_DST_MEM(&desc) = CCP_MEMTYPE_SYSTEM; + + return ccp5_do_cmd(&desc, op->cmd_q); +} + +static int ccp_find_lsb_regions(struct ccp_cmd_queue *cmd_q, u64 status) +{ + int q_mask = 1 << cmd_q->id; + int queues = 0; + int j; + + /* Build a bit mask to know which LSBs this queue has access to. + * Don't bother with segment 0 as it has special privileges. + */ + for (j = 1; j < MAX_LSB_CNT; j++) { + if (status & q_mask) + bitmap_set(cmd_q->lsbmask, j, 1); + status >>= LSB_REGION_WIDTH; + } + queues = bitmap_weight(cmd_q->lsbmask, MAX_LSB_CNT); + dev_info(cmd_q->ccp->dev, "Queue %d can access %d LSB regions\n", + cmd_q->id, queues); + + return queues ? 0 : -EINVAL; +} + + +static int ccp_find_and_assign_lsb_to_q(struct ccp_device *ccp, + int lsb_cnt, int n_lsbs, + unsigned long *lsb_pub) +{ + DECLARE_BITMAP(qlsb, MAX_LSB_CNT); + int bitno; + int qlsb_wgt; + int i; + + /* For each queue: + * If the count of potential LSBs available to a queue matches the + * ordinal given to us in lsb_cnt: + * Copy the mask of possible LSBs for this queue into "qlsb"; + * For each bit in qlsb, see if the corresponding bit in the + * aggregation mask is set; if so, we have a match. + * If we have a match, clear the bit in the aggregation to + * mark it as no longer available. + * If there is no match, clear the bit in qlsb and keep looking. + */ + for (i = 0; i < ccp->cmd_q_count; i++) { + struct ccp_cmd_queue *cmd_q = &ccp->cmd_q[i]; + + qlsb_wgt = bitmap_weight(cmd_q->lsbmask, MAX_LSB_CNT); + + if (qlsb_wgt == lsb_cnt) { + bitmap_copy(qlsb, cmd_q->lsbmask, MAX_LSB_CNT); + + bitno = find_first_bit(qlsb, MAX_LSB_CNT); + while (bitno < MAX_LSB_CNT) { + if (test_bit(bitno, lsb_pub)) { + /* We found an available LSB + * that this queue can access + */ + cmd_q->lsb = bitno; + bitmap_clear(lsb_pub, bitno, 1); + dev_info(ccp->dev, + "Queue %d gets LSB %d\n", + i, bitno); + break; + } + bitmap_clear(qlsb, bitno, 1); + bitno = find_first_bit(qlsb, MAX_LSB_CNT); + } + if (bitno >= MAX_LSB_CNT) + return -EINVAL; + n_lsbs--; + } + } + return n_lsbs; +} + +/* For each queue, from the most- to least-constrained: + * find an LSB that can be assigned to the queue. If there are N queues that + * can only use M LSBs, where N > M, fail; otherwise, every queue will get a + * dedicated LSB. Remaining LSB regions become a shared resource. + * If we have fewer LSBs than queues, all LSB regions become shared resources. + */ +static int ccp_assign_lsbs(struct ccp_device *ccp) +{ + DECLARE_BITMAP(lsb_pub, MAX_LSB_CNT); + DECLARE_BITMAP(qlsb, MAX_LSB_CNT); + int n_lsbs = 0; + int bitno; + int i, lsb_cnt; + int rc = 0; + + bitmap_zero(lsb_pub, MAX_LSB_CNT); + + /* Create an aggregate bitmap to get a total count of available LSBs */ + for (i = 0; i < ccp->cmd_q_count; i++) + bitmap_or(lsb_pub, + lsb_pub, ccp->cmd_q[i].lsbmask, + MAX_LSB_CNT); + + n_lsbs = bitmap_weight(lsb_pub, MAX_LSB_CNT); + + if (n_lsbs >= ccp->cmd_q_count) { + /* We have enough LSBS to give every queue a private LSB. + * Brute force search to start with the queues that are more + * constrained in LSB choice. When an LSB is privately + * assigned, it is removed from the public mask. + * This is an ugly N squared algorithm with some optimization. + */ + for (lsb_cnt = 1; + n_lsbs && (lsb_cnt <= MAX_LSB_CNT); + lsb_cnt++) { + rc = ccp_find_and_assign_lsb_to_q(ccp, lsb_cnt, n_lsbs, + lsb_pub); + if (rc < 0) + return -EINVAL; + n_lsbs = rc; + } + } + + rc = 0; + /* What's left of the LSBs, according to the public mask, now become + * shared. Any zero bits in the lsb_pub mask represent an LSB region + * that can't be used as a shared resource, so mark the LSB slots for + * them as "in use". + */ + bitmap_copy(qlsb, lsb_pub, MAX_LSB_CNT); + + bitno = find_first_zero_bit(qlsb, MAX_LSB_CNT); + while (bitno < MAX_LSB_CNT) { + bitmap_set(ccp->lsbmap, bitno * LSB_SIZE, LSB_SIZE); + bitmap_set(qlsb, bitno, 1); + bitno = find_first_zero_bit(qlsb, MAX_LSB_CNT); + } + + return rc; +} + +static int ccp5_init(struct ccp_device *ccp) +{ + struct device *dev = ccp->dev; + struct ccp_cmd_queue *cmd_q; + struct dma_pool *dma_pool; + char dma_pool_name[MAX_DMAPOOL_NAME_LEN]; + unsigned int qmr, qim, i; + u64 status; + u32 status_lo, status_hi; + int ret; + + /* Find available queues */ + qim = 0; + qmr = ioread32(ccp->io_regs + Q_MASK_REG); + for (i = 0; i < MAX_HW_QUEUES; i++) { + + if (!(qmr & (1 << i))) + continue; + + /* Allocate a dma pool for this queue */ + snprintf(dma_pool_name, sizeof(dma_pool_name), "%s_q%d", + ccp->name, i); + dma_pool = dma_pool_create(dma_pool_name, dev, + CCP_DMAPOOL_MAX_SIZE, + CCP_DMAPOOL_ALIGN, 0); + if (!dma_pool) { + dev_err(dev, "unable to allocate dma pool\n"); + ret = -ENOMEM; + } + + cmd_q = &ccp->cmd_q[ccp->cmd_q_count]; + ccp->cmd_q_count++; + + cmd_q->ccp = ccp; + cmd_q->id = i; + cmd_q->dma_pool = dma_pool; + mutex_init(&cmd_q->q_mutex); + + /* Page alignment satisfies our needs for N <= 128 */ + BUILD_BUG_ON(COMMANDS_PER_QUEUE > 128); + cmd_q->qsize = Q_SIZE(Q_DESC_SIZE); + cmd_q->qbase = dma_zalloc_coherent(dev, cmd_q->qsize, + &cmd_q->qbase_dma, + GFP_KERNEL); + if (!cmd_q->qbase) { + dev_err(dev, "unable to allocate command queue\n"); + ret = -ENOMEM; + goto e_pool; + } + + cmd_q->qidx = 0; + /* Preset some register values and masks that are queue + * number dependent + */ + cmd_q->reg_control = ccp->io_regs + + CMD5_Q_STATUS_INCR * (i + 1); + cmd_q->reg_tail_lo = cmd_q->reg_control + CMD5_Q_TAIL_LO_BASE; + cmd_q->reg_head_lo = cmd_q->reg_control + CMD5_Q_HEAD_LO_BASE; + cmd_q->reg_int_enable = cmd_q->reg_control + + CMD5_Q_INT_ENABLE_BASE; + cmd_q->reg_interrupt_status = cmd_q->reg_control + + CMD5_Q_INTERRUPT_STATUS_BASE; + cmd_q->reg_status = cmd_q->reg_control + CMD5_Q_STATUS_BASE; + cmd_q->reg_int_status = cmd_q->reg_control + + CMD5_Q_INT_STATUS_BASE; + cmd_q->reg_dma_status = cmd_q->reg_control + + CMD5_Q_DMA_STATUS_BASE; + cmd_q->reg_dma_read_status = cmd_q->reg_control + + CMD5_Q_DMA_READ_STATUS_BASE; + cmd_q->reg_dma_write_status = cmd_q->reg_control + + CMD5_Q_DMA_WRITE_STATUS_BASE; + + init_waitqueue_head(&cmd_q->int_queue); + + dev_dbg(dev, "queue #%u available\n", i); + } + if (ccp->cmd_q_count == 0) { + dev_notice(dev, "no command queues available\n"); + ret = -EIO; + goto e_pool; + } + dev_notice(dev, "%u command queues available\n", ccp->cmd_q_count); + + /* Turn off the queues and disable interrupts until ready */ + for (i = 0; i < ccp->cmd_q_count; i++) { + cmd_q = &ccp->cmd_q[i]; + + cmd_q->qcontrol = 0; /* Start with nothing */ + iowrite32(cmd_q->qcontrol, cmd_q->reg_control); + + /* Disable the interrupts */ + iowrite32(0x00, cmd_q->reg_int_enable); + ioread32(cmd_q->reg_int_status); + ioread32(cmd_q->reg_status); + + /* Clear the interrupts */ + iowrite32(ALL_INTERRUPTS, cmd_q->reg_interrupt_status); + } + + dev_dbg(dev, "Requesting an IRQ...\n"); + /* Request an irq */ + ret = ccp->get_irq(ccp); + if (ret) { + dev_err(dev, "unable to allocate an IRQ\n"); + goto e_pool; + } + + /* Initialize the queue used to suspend */ + init_waitqueue_head(&ccp->suspend_queue); + + dev_dbg(dev, "Loading LSB map...\n"); + /* Copy the private LSB mask to the public registers */ + status_lo = ioread32(ccp->io_regs + LSB_PRIVATE_MASK_LO_OFFSET); + status_hi = ioread32(ccp->io_regs + LSB_PRIVATE_MASK_HI_OFFSET); + iowrite32(status_lo, ccp->io_regs + LSB_PUBLIC_MASK_LO_OFFSET); + iowrite32(status_hi, ccp->io_regs + LSB_PUBLIC_MASK_HI_OFFSET); + status = ((u64)status_hi<<30) | (u64)status_lo; + + dev_dbg(dev, "Configuring virtual queues...\n"); + /* Configure size of each virtual queue accessible to host */ + for (i = 0; i < ccp->cmd_q_count; i++) { + u32 dma_addr_lo; + u32 dma_addr_hi; + + cmd_q = &ccp->cmd_q[i]; + + cmd_q->qcontrol &= ~(CMD5_Q_SIZE << CMD5_Q_SHIFT); + cmd_q->qcontrol |= QUEUE_SIZE_VAL << CMD5_Q_SHIFT; + + cmd_q->qdma_tail = cmd_q->qbase_dma; + dma_addr_lo = low_address(cmd_q->qdma_tail); + iowrite32((u32)dma_addr_lo, cmd_q->reg_tail_lo); + iowrite32((u32)dma_addr_lo, cmd_q->reg_head_lo); + + dma_addr_hi = high_address(cmd_q->qdma_tail); + cmd_q->qcontrol |= (dma_addr_hi << 16); + iowrite32(cmd_q->qcontrol, cmd_q->reg_control); + + /* Find the LSB regions accessible to the queue */ + ccp_find_lsb_regions(cmd_q, status); + cmd_q->lsb = -1; /* Unassigned value */ + } + + dev_dbg(dev, "Assigning LSBs...\n"); + ret = ccp_assign_lsbs(ccp); + if (ret) { + dev_err(dev, "Unable to assign LSBs (%d)\n", ret); + goto e_irq; + } + + /* Optimization: pre-allocate LSB slots for each queue */ + for (i = 0; i < ccp->cmd_q_count; i++) { + ccp->cmd_q[i].sb_key = ccp_lsb_alloc(&ccp->cmd_q[i], 2); + ccp->cmd_q[i].sb_ctx = ccp_lsb_alloc(&ccp->cmd_q[i], 2); + } + + dev_dbg(dev, "Starting threads...\n"); + /* Create a kthread for each queue */ + for (i = 0; i < ccp->cmd_q_count; i++) { + struct task_struct *kthread; + + cmd_q = &ccp->cmd_q[i]; + + kthread = kthread_create(ccp_cmd_queue_thread, cmd_q, + "%s-q%u", ccp->name, cmd_q->id); + if (IS_ERR(kthread)) { + dev_err(dev, "error creating queue thread (%ld)\n", + PTR_ERR(kthread)); + ret = PTR_ERR(kthread); + goto e_kthread; + } + + cmd_q->kthread = kthread; + wake_up_process(kthread); + } + + dev_dbg(dev, "Enabling interrupts...\n"); + /* Enable interrupts */ + for (i = 0; i < ccp->cmd_q_count; i++) { + cmd_q = &ccp->cmd_q[i]; + iowrite32(ALL_INTERRUPTS, cmd_q->reg_int_enable); + } + + dev_dbg(dev, "Registering device...\n"); + /* Put this on the unit list to make it available */ + ccp_add_device(ccp); + + return 0; + +e_kthread: + for (i = 0; i < ccp->cmd_q_count; i++) + if (ccp->cmd_q[i].kthread) + kthread_stop(ccp->cmd_q[i].kthread); + +e_irq: + ccp->free_irq(ccp); + +e_pool: + for (i = 0; i < ccp->cmd_q_count; i++) + dma_pool_destroy(ccp->cmd_q[i].dma_pool); + + return ret; +} + +static void ccp5_destroy(struct ccp_device *ccp) +{ + struct device *dev = ccp->dev; + struct ccp_cmd_queue *cmd_q; + struct ccp_cmd *cmd; + unsigned int i; + + /* Remove this device from the list of available units first */ + ccp_del_device(ccp); + + /* Disable and clear interrupts */ + for (i = 0; i < ccp->cmd_q_count; i++) { + cmd_q = &ccp->cmd_q[i]; + + /* Turn off the run bit */ + iowrite32(cmd_q->qcontrol & ~CMD5_Q_RUN, cmd_q->reg_control); + + /* Disable the interrupts */ + iowrite32(ALL_INTERRUPTS, cmd_q->reg_interrupt_status); + + /* Clear the interrupt status */ + iowrite32(0x00, cmd_q->reg_int_enable); + ioread32(cmd_q->reg_int_status); + ioread32(cmd_q->reg_status); + } + + /* Stop the queue kthreads */ + for (i = 0; i < ccp->cmd_q_count; i++) + if (ccp->cmd_q[i].kthread) + kthread_stop(ccp->cmd_q[i].kthread); + + ccp->free_irq(ccp); + + for (i = 0; i < ccp->cmd_q_count; i++) { + cmd_q = &ccp->cmd_q[i]; + dma_free_coherent(dev, cmd_q->qsize, cmd_q->qbase, + cmd_q->qbase_dma); + } + + /* Flush the cmd and backlog queue */ + while (!list_empty(&ccp->cmd)) { + /* Invoke the callback directly with an error code */ + cmd = list_first_entry(&ccp->cmd, struct ccp_cmd, entry); + list_del(&cmd->entry); + cmd->callback(cmd->data, -ENODEV); + } + while (!list_empty(&ccp->backlog)) { + /* Invoke the callback directly with an error code */ + cmd = list_first_entry(&ccp->backlog, struct ccp_cmd, entry); + list_del(&cmd->entry); + cmd->callback(cmd->data, -ENODEV); + } +} + +static irqreturn_t ccp5_irq_handler(int irq, void *data) +{ + struct device *dev = data; + struct ccp_device *ccp = dev_get_drvdata(dev); + u32 status; + unsigned int i; + + for (i = 0; i < ccp->cmd_q_count; i++) { + struct ccp_cmd_queue *cmd_q = &ccp->cmd_q[i]; + + status = ioread32(cmd_q->reg_interrupt_status); + + if (status) { + cmd_q->int_status = status; + cmd_q->q_status = ioread32(cmd_q->reg_status); + cmd_q->q_int_status = ioread32(cmd_q->reg_int_status); + + /* On error, only save the first error value */ + if ((status & INT_ERROR) && !cmd_q->cmd_error) + cmd_q->cmd_error = CMD_Q_ERROR(cmd_q->q_status); + + cmd_q->int_rcvd = 1; + + /* Acknowledge the interrupt and wake the kthread */ + iowrite32(ALL_INTERRUPTS, cmd_q->reg_interrupt_status); + wake_up_interruptible(&cmd_q->int_queue); + } + } + + return IRQ_HANDLED; +} + +static void ccp5_config(struct ccp_device *ccp) +{ + /* Public side */ + iowrite32(0x00001249, ccp->io_regs + CMD5_REQID_CONFIG_OFFSET); +} + +static const struct ccp_actions ccp5_actions = { + .aes = ccp5_perform_aes, + .xts_aes = ccp5_perform_xts_aes, + .sha = ccp5_perform_sha, + .rsa = ccp5_perform_rsa, + .passthru = ccp5_perform_passthru, + .ecc = ccp5_perform_ecc, + .sballoc = ccp_lsb_alloc, + .sbfree = ccp_lsb_free, + .init = ccp5_init, + .destroy = ccp5_destroy, + .get_free_slots = ccp5_get_free_slots, + .irqhandler = ccp5_irq_handler, +}; + +struct ccp_vdata ccpv5 = { + .version = CCP_VERSION(5, 0), + .setup = ccp5_config, + .perform = &ccp5_actions, + .bar = 2, + .offset = 0x0, +}; diff --git a/drivers/crypto/ccp/ccp-dev.h b/drivers/crypto/ccp/ccp-dev.h index de907029c6ee..5ff4a73e3bd4 100644 --- a/drivers/crypto/ccp/ccp-dev.h +++ b/drivers/crypto/ccp/ccp-dev.h @@ -61,7 +61,62 @@ #define CMD_Q_ERROR(__qs) ((__qs) & 0x0000003f) #define CMD_Q_DEPTH(__qs) (((__qs) >> 12) & 0x0000000f) -/****** REQ0 Related Values ******/ +/* ------------------------ CCP Version 5 Specifics ------------------------ */ +#define CMD5_QUEUE_MASK_OFFSET 0x00 +#define CMD5_REQID_CONFIG_OFFSET 0x08 +#define LSB_PUBLIC_MASK_LO_OFFSET 0x18 +#define LSB_PUBLIC_MASK_HI_OFFSET 0x1C +#define LSB_PRIVATE_MASK_LO_OFFSET 0x20 +#define LSB_PRIVATE_MASK_HI_OFFSET 0x24 + +#define CMD5_Q_CONTROL_BASE 0x0000 +#define CMD5_Q_TAIL_LO_BASE 0x0004 +#define CMD5_Q_HEAD_LO_BASE 0x0008 +#define CMD5_Q_INT_ENABLE_BASE 0x000C +#define CMD5_Q_INTERRUPT_STATUS_BASE 0x0010 + +#define CMD5_Q_STATUS_BASE 0x0100 +#define CMD5_Q_INT_STATUS_BASE 0x0104 +#define CMD5_Q_DMA_STATUS_BASE 0x0108 +#define CMD5_Q_DMA_READ_STATUS_BASE 0x010C +#define CMD5_Q_DMA_WRITE_STATUS_BASE 0x0110 +#define CMD5_Q_ABORT_BASE 0x0114 +#define CMD5_Q_AX_CACHE_BASE 0x0118 + +/* Address offset between two virtual queue registers */ +#define CMD5_Q_STATUS_INCR 0x1000 + +/* Bit masks */ +#define CMD5_Q_RUN 0x1 +#define CMD5_Q_HALT 0x2 +#define CMD5_Q_MEM_LOCATION 0x4 +#define CMD5_Q_SIZE 0x1F +#define CMD5_Q_SHIFT 3 +#define COMMANDS_PER_QUEUE 16 +#define QUEUE_SIZE_VAL ((ffs(COMMANDS_PER_QUEUE) - 2) & \ + CMD5_Q_SIZE) +#define Q_PTR_MASK (2 << (QUEUE_SIZE_VAL + 5) - 1) +#define Q_DESC_SIZE sizeof(struct ccp5_desc) +#define Q_SIZE(n) (COMMANDS_PER_QUEUE*(n)) + +#define INT_COMPLETION 0x1 +#define INT_ERROR 0x2 +#define INT_QUEUE_STOPPED 0x4 +#define ALL_INTERRUPTS (INT_COMPLETION| \ + INT_ERROR| \ + INT_QUEUE_STOPPED) + +#define LSB_REGION_WIDTH 5 +#define MAX_LSB_CNT 8 + +#define LSB_SIZE 16 +#define LSB_ITEM_SIZE 32 +#define PLSB_MAP_SIZE (LSB_SIZE) +#define SLSB_MAP_SIZE (MAX_LSB_CNT * LSB_SIZE) + +#define LSB_ENTRY_NUMBER(LSB_ADDR) (LSB_ADDR / LSB_ITEM_SIZE) + +/* ------------------------ CCP Version 3 Specifics ------------------------ */ #define REQ0_WAIT_FOR_WRITE 0x00000004 #define REQ0_INT_ON_COMPLETE 0x00000002 #define REQ0_STOP_ON_COMPLETE 0x00000001 @@ -115,6 +170,8 @@ #define CCP_JOBID_MASK 0x0000003f +/* ------------------------ General CCP Defines ------------------------ */ + #define CCP_DMAPOOL_MAX_SIZE 64 #define CCP_DMAPOOL_ALIGN BIT(5) @@ -149,6 +206,7 @@ struct ccp_op; struct ccp_device; struct ccp_cmd; +struct ccp_fns; struct ccp_dma_cmd { struct list_head entry; @@ -192,10 +250,30 @@ struct ccp_cmd_queue { /* Queue dma pool */ struct dma_pool *dma_pool; + /* Queue base address (not neccessarily aligned)*/ + struct ccp5_desc *qbase; + + /* Aligned queue start address (per requirement) */ + struct mutex q_mutex ____cacheline_aligned; + unsigned int qidx; + + /* Version 5 has different requirements for queue memory */ + unsigned int qsize; + dma_addr_t qbase_dma; + dma_addr_t qdma_tail; + /* Per-queue reserved storage block(s) */ u32 sb_key; u32 sb_ctx; + /* Bitmap of LSBs that can be accessed by this queue */ + DECLARE_BITMAP(lsbmask, MAX_LSB_CNT); + /* Private LSB that is assigned to this queue, or -1 if none. + * Bitmap for my private LSB, unused otherwise + */ + unsigned int lsb; + DECLARE_BITMAP(lsbmap, PLSB_MAP_SIZE); + /* Queue processing thread */ struct task_struct *kthread; unsigned int active; @@ -209,8 +287,17 @@ struct ccp_cmd_queue { u32 int_err; /* Register addresses for queue */ + void __iomem *reg_control; + void __iomem *reg_tail_lo; + void __iomem *reg_head_lo; + void __iomem *reg_int_enable; + void __iomem *reg_interrupt_status; void __iomem *reg_status; void __iomem *reg_int_status; + void __iomem *reg_dma_status; + void __iomem *reg_dma_read_status; + void __iomem *reg_dma_write_status; + u32 qcontrol; /* Cached control register */ /* Status values from job */ u32 int_status; @@ -306,6 +393,9 @@ struct ccp_device { unsigned int sb_count; u32 sb_start; + /* Bitmap of shared LSBs, if any */ + DECLARE_BITMAP(lsbmap, SLSB_MAP_SIZE); + /* Suspend support */ unsigned int suspending; wait_queue_head_t suspend_queue; @@ -320,6 +410,7 @@ enum ccp_memtype { CCP_MEMTYPE_LOCAL, CCP_MEMTYPE__LAST, }; +#define CCP_MEMTYPE_LSB CCP_MEMTYPE_KSB struct ccp_dma_info { dma_addr_t address; @@ -407,6 +498,7 @@ struct ccp_op { struct ccp_mem src; struct ccp_mem dst; + struct ccp_mem exp; union { struct ccp_aes_op aes; @@ -416,6 +508,7 @@ struct ccp_op { struct ccp_passthru_op passthru; struct ccp_ecc_op ecc; } u; + struct ccp_mem key; }; static inline u32 ccp_addr_lo(struct ccp_dma_info *info) @@ -428,6 +521,70 @@ static inline u32 ccp_addr_hi(struct ccp_dma_info *info) return upper_32_bits(info->address + info->offset) & 0x0000ffff; } +/** + * descriptor for version 5 CPP commands + * 8 32-bit words: + * word 0: function; engine; control bits + * word 1: length of source data + * word 2: low 32 bits of source pointer + * word 3: upper 16 bits of source pointer; source memory type + * word 4: low 32 bits of destination pointer + * word 5: upper 16 bits of destination pointer; destination memory type + * word 6: low 32 bits of key pointer + * word 7: upper 16 bits of key pointer; key memory type + */ +struct dword0 { + __le32 soc:1; + __le32 ioc:1; + __le32 rsvd1:1; + __le32 init:1; + __le32 eom:1; /* AES/SHA only */ + __le32 function:15; + __le32 engine:4; + __le32 prot:1; + __le32 rsvd2:7; +}; + +struct dword3 { + __le32 src_hi:16; + __le32 src_mem:2; + __le32 lsb_cxt_id:8; + __le32 rsvd1:5; + __le32 fixed:1; +}; + +union dword4 { + __le32 dst_lo; /* NON-SHA */ + __le32 sha_len_lo; /* SHA */ +}; + +union dword5 { + struct { + __le32 dst_hi:16; + __le32 dst_mem:2; + __le32 rsvd1:13; + __le32 fixed:1; + } fields; + __le32 sha_len_hi; +}; + +struct dword7 { + __le32 key_hi:16; + __le32 key_mem:2; + __le32 rsvd1:14; +}; + +struct ccp5_desc { + struct dword0 dw0; + __le32 length; + __le32 src_lo; + struct dword3 dw3; + union dword4 dw4; + union dword5 dw5; + __le32 key_lo; + struct dword7 dw7; +}; + int ccp_pci_init(void); void ccp_pci_exit(void); @@ -466,13 +623,14 @@ struct ccp_actions { /* Structure to hold CCP version-specific values */ struct ccp_vdata { - unsigned int version; - int (*init)(struct ccp_device *); + const unsigned int version; + void (*setup)(struct ccp_device *); const struct ccp_actions *perform; const unsigned int bar; const unsigned int offset; }; extern struct ccp_vdata ccpv3; +extern struct ccp_vdata ccpv5; #endif diff --git a/drivers/crypto/ccp/ccp-ops.c b/drivers/crypto/ccp/ccp-ops.c index fdab0ae4f7c9..50fae4442801 100644 --- a/drivers/crypto/ccp/ccp-ops.c +++ b/drivers/crypto/ccp/ccp-ops.c @@ -21,26 +21,29 @@ #include "ccp-dev.h" /* SHA initial context values */ -static const __be32 ccp_sha1_init[CCP_SHA_CTXSIZE / sizeof(__be32)] = { +static const __be32 ccp_sha1_init[SHA1_DIGEST_SIZE / sizeof(__be32)] = { cpu_to_be32(SHA1_H0), cpu_to_be32(SHA1_H1), cpu_to_be32(SHA1_H2), cpu_to_be32(SHA1_H3), - cpu_to_be32(SHA1_H4), 0, 0, 0, + cpu_to_be32(SHA1_H4), }; -static const __be32 ccp_sha224_init[CCP_SHA_CTXSIZE / sizeof(__be32)] = { +static const __be32 ccp_sha224_init[SHA256_DIGEST_SIZE / sizeof(__be32)] = { cpu_to_be32(SHA224_H0), cpu_to_be32(SHA224_H1), cpu_to_be32(SHA224_H2), cpu_to_be32(SHA224_H3), cpu_to_be32(SHA224_H4), cpu_to_be32(SHA224_H5), cpu_to_be32(SHA224_H6), cpu_to_be32(SHA224_H7), }; -static const __be32 ccp_sha256_init[CCP_SHA_CTXSIZE / sizeof(__be32)] = { +static const __be32 ccp_sha256_init[SHA256_DIGEST_SIZE / sizeof(__be32)] = { cpu_to_be32(SHA256_H0), cpu_to_be32(SHA256_H1), cpu_to_be32(SHA256_H2), cpu_to_be32(SHA256_H3), cpu_to_be32(SHA256_H4), cpu_to_be32(SHA256_H5), cpu_to_be32(SHA256_H6), cpu_to_be32(SHA256_H7), }; +#define CCP_NEW_JOBID(ccp) ((ccp->vdata->version == CCP_VERSION(3, 0)) ? \ + ccp_gen_jobid(ccp) : 0) + static u32 ccp_gen_jobid(struct ccp_device *ccp) { return atomic_inc_return(&ccp->current_id) & CCP_JOBID_MASK; @@ -487,7 +490,7 @@ static int ccp_run_aes_cmac_cmd(struct ccp_cmd_queue *cmd_q, ret = -EIO; memset(&op, 0, sizeof(op)); op.cmd_q = cmd_q; - op.jobid = ccp_gen_jobid(cmd_q->ccp); + op.jobid = CCP_NEW_JOBID(cmd_q->ccp); op.sb_key = cmd_q->sb_key; op.sb_ctx = cmd_q->sb_ctx; op.init = 1; @@ -640,7 +643,7 @@ static int ccp_run_aes_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) ret = -EIO; memset(&op, 0, sizeof(op)); op.cmd_q = cmd_q; - op.jobid = ccp_gen_jobid(cmd_q->ccp); + op.jobid = CCP_NEW_JOBID(cmd_q->ccp); op.sb_key = cmd_q->sb_key; op.sb_ctx = cmd_q->sb_ctx; op.init = (aes->mode == CCP_AES_MODE_ECB) ? 0 : 1; @@ -679,7 +682,7 @@ static int ccp_run_aes_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) goto e_key; if (aes->mode != CCP_AES_MODE_ECB) { - /* Load the AES context - conver to LE */ + /* Load the AES context - convert to LE */ dm_offset = CCP_SB_BYTES - AES_BLOCK_SIZE; ccp_set_dm_area(&ctx, dm_offset, aes->iv, 0, aes->iv_len); ret = ccp_copy_to_sb(cmd_q, &ctx, op.jobid, op.sb_ctx, @@ -817,7 +820,7 @@ static int ccp_run_xts_aes_cmd(struct ccp_cmd_queue *cmd_q, ret = -EIO; memset(&op, 0, sizeof(op)); op.cmd_q = cmd_q; - op.jobid = ccp_gen_jobid(cmd_q->ccp); + op.jobid = CCP_NEW_JOBID(cmd_q->ccp); op.sb_key = cmd_q->sb_key; op.sb_ctx = cmd_q->sb_ctx; op.init = 1; @@ -936,98 +939,154 @@ static int ccp_run_sha_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) struct ccp_dm_workarea ctx; struct ccp_data src; struct ccp_op op; + unsigned int ioffset, ooffset; + unsigned int digest_size; + int sb_count; + const void *init; + u64 block_size; + int ctx_size; int ret; - if (sha->ctx_len != CCP_SHA_CTXSIZE) + switch (sha->type) { + case CCP_SHA_TYPE_1: + if (sha->ctx_len < SHA1_DIGEST_SIZE) + return -EINVAL; + block_size = SHA1_BLOCK_SIZE; + break; + case CCP_SHA_TYPE_224: + if (sha->ctx_len < SHA224_DIGEST_SIZE) + return -EINVAL; + block_size = SHA224_BLOCK_SIZE; + break; + case CCP_SHA_TYPE_256: + if (sha->ctx_len < SHA256_DIGEST_SIZE) + return -EINVAL; + block_size = SHA256_BLOCK_SIZE; + break; + default: return -EINVAL; + } if (!sha->ctx) return -EINVAL; - if (!sha->final && (sha->src_len & (CCP_SHA_BLOCKSIZE - 1))) + if (!sha->final && (sha->src_len & (block_size - 1))) return -EINVAL; - if (!sha->src_len) { - const u8 *sha_zero; + /* The version 3 device can't handle zero-length input */ + if (cmd_q->ccp->vdata->version == CCP_VERSION(3, 0)) { - /* Not final, just return */ - if (!sha->final) - return 0; + if (!sha->src_len) { + unsigned int digest_len; + const u8 *sha_zero; - /* CCP can't do a zero length sha operation so the caller - * must buffer the data. - */ - if (sha->msg_bits) - return -EINVAL; + /* Not final, just return */ + if (!sha->final) + return 0; - /* The CCP cannot perform zero-length sha operations so the - * caller is required to buffer data for the final operation. - * However, a sha operation for a message with a total length - * of zero is valid so known values are required to supply - * the result. - */ - switch (sha->type) { - case CCP_SHA_TYPE_1: - sha_zero = sha1_zero_message_hash; - break; - case CCP_SHA_TYPE_224: - sha_zero = sha224_zero_message_hash; - break; - case CCP_SHA_TYPE_256: - sha_zero = sha256_zero_message_hash; - break; - default: - return -EINVAL; - } + /* CCP can't do a zero length sha operation so the + * caller must buffer the data. + */ + if (sha->msg_bits) + return -EINVAL; - scatterwalk_map_and_copy((void *)sha_zero, sha->ctx, 0, - sha->ctx_len, 1); + /* The CCP cannot perform zero-length sha operations + * so the caller is required to buffer data for the + * final operation. However, a sha operation for a + * message with a total length of zero is valid so + * known values are required to supply the result. + */ + switch (sha->type) { + case CCP_SHA_TYPE_1: + sha_zero = sha1_zero_message_hash; + digest_len = SHA1_DIGEST_SIZE; + break; + case CCP_SHA_TYPE_224: + sha_zero = sha224_zero_message_hash; + digest_len = SHA224_DIGEST_SIZE; + break; + case CCP_SHA_TYPE_256: + sha_zero = sha256_zero_message_hash; + digest_len = SHA256_DIGEST_SIZE; + break; + default: + return -EINVAL; + } - return 0; + scatterwalk_map_and_copy((void *)sha_zero, sha->ctx, 0, + digest_len, 1); + + return 0; + } } - if (!sha->src) - return -EINVAL; + /* Set variables used throughout */ + switch (sha->type) { + case CCP_SHA_TYPE_1: + digest_size = SHA1_DIGEST_SIZE; + init = (void *) ccp_sha1_init; + ctx_size = SHA1_DIGEST_SIZE; + sb_count = 1; + if (cmd_q->ccp->vdata->version != CCP_VERSION(3, 0)) + ooffset = ioffset = CCP_SB_BYTES - SHA1_DIGEST_SIZE; + else + ooffset = ioffset = 0; + break; + case CCP_SHA_TYPE_224: + digest_size = SHA224_DIGEST_SIZE; + init = (void *) ccp_sha224_init; + ctx_size = SHA256_DIGEST_SIZE; + sb_count = 1; + ioffset = 0; + if (cmd_q->ccp->vdata->version != CCP_VERSION(3, 0)) + ooffset = CCP_SB_BYTES - SHA224_DIGEST_SIZE; + else + ooffset = 0; + break; + case CCP_SHA_TYPE_256: + digest_size = SHA256_DIGEST_SIZE; + init = (void *) ccp_sha256_init; + ctx_size = SHA256_DIGEST_SIZE; + sb_count = 1; + ooffset = ioffset = 0; + break; + default: + ret = -EINVAL; + goto e_data; + } - BUILD_BUG_ON(CCP_SHA_SB_COUNT != 1); + /* For zero-length plaintext the src pointer is ignored; + * otherwise both parts must be valid + */ + if (sha->src_len && !sha->src) + return -EINVAL; memset(&op, 0, sizeof(op)); op.cmd_q = cmd_q; - op.jobid = ccp_gen_jobid(cmd_q->ccp); - op.sb_ctx = cmd_q->sb_ctx; + op.jobid = CCP_NEW_JOBID(cmd_q->ccp); + op.sb_ctx = cmd_q->sb_ctx; /* Pre-allocated */ op.u.sha.type = sha->type; op.u.sha.msg_bits = sha->msg_bits; - /* The SHA context fits in a single (32-byte) SB entry and - * must be in little endian format. Use the 256-bit byte swap - * passthru option to convert from big endian to little endian. - */ - ret = ccp_init_dm_workarea(&ctx, cmd_q, - CCP_SHA_SB_COUNT * CCP_SB_BYTES, + ret = ccp_init_dm_workarea(&ctx, cmd_q, sb_count * CCP_SB_BYTES, DMA_BIDIRECTIONAL); if (ret) return ret; - if (sha->first) { - const __be32 *init; - switch (sha->type) { case CCP_SHA_TYPE_1: - init = ccp_sha1_init; - break; case CCP_SHA_TYPE_224: - init = ccp_sha224_init; - break; case CCP_SHA_TYPE_256: - init = ccp_sha256_init; + memcpy(ctx.address + ioffset, init, ctx_size); break; default: ret = -EINVAL; goto e_ctx; } - memcpy(ctx.address, init, CCP_SHA_CTXSIZE); } else { - ccp_set_dm_area(&ctx, 0, sha->ctx, 0, sha->ctx_len); + /* Restore the context */ + ccp_set_dm_area(&ctx, 0, sha->ctx, 0, + sb_count * CCP_SB_BYTES); } ret = ccp_copy_to_sb(cmd_q, &ctx, op.jobid, op.sb_ctx, @@ -1037,24 +1096,33 @@ static int ccp_run_sha_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) goto e_ctx; } - /* Send data to the CCP SHA engine */ - ret = ccp_init_data(&src, cmd_q, sha->src, sha->src_len, - CCP_SHA_BLOCKSIZE, DMA_TO_DEVICE); - if (ret) - goto e_ctx; + if (sha->src) { + /* Send data to the CCP SHA engine; block_size is set above */ + ret = ccp_init_data(&src, cmd_q, sha->src, sha->src_len, + block_size, DMA_TO_DEVICE); + if (ret) + goto e_ctx; - while (src.sg_wa.bytes_left) { - ccp_prepare_data(&src, NULL, &op, CCP_SHA_BLOCKSIZE, false); - if (sha->final && !src.sg_wa.bytes_left) - op.eom = 1; + while (src.sg_wa.bytes_left) { + ccp_prepare_data(&src, NULL, &op, block_size, false); + if (sha->final && !src.sg_wa.bytes_left) + op.eom = 1; + + ret = cmd_q->ccp->vdata->perform->sha(&op); + if (ret) { + cmd->engine_error = cmd_q->cmd_error; + goto e_data; + } + ccp_process_data(&src, NULL, &op); + } + } else { + op.eom = 1; ret = cmd_q->ccp->vdata->perform->sha(&op); if (ret) { cmd->engine_error = cmd_q->cmd_error; goto e_data; } - - ccp_process_data(&src, NULL, &op); } /* Retrieve the SHA context - convert from LE to BE using @@ -1067,32 +1135,31 @@ static int ccp_run_sha_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) goto e_data; } - ccp_get_dm_area(&ctx, 0, sha->ctx, 0, sha->ctx_len); - - if (sha->final && sha->opad) { - /* HMAC operation, recursively perform final SHA */ - struct ccp_cmd hmac_cmd; - struct scatterlist sg; - u64 block_size, digest_size; - u8 *hmac_buf; - + if (sha->final) { + /* Finishing up, so get the digest */ switch (sha->type) { case CCP_SHA_TYPE_1: - block_size = SHA1_BLOCK_SIZE; - digest_size = SHA1_DIGEST_SIZE; - break; case CCP_SHA_TYPE_224: - block_size = SHA224_BLOCK_SIZE; - digest_size = SHA224_DIGEST_SIZE; - break; case CCP_SHA_TYPE_256: - block_size = SHA256_BLOCK_SIZE; - digest_size = SHA256_DIGEST_SIZE; + ccp_get_dm_area(&ctx, ooffset, + sha->ctx, 0, + digest_size); break; default: ret = -EINVAL; - goto e_data; + goto e_ctx; } + } else { + /* Stash the context */ + ccp_get_dm_area(&ctx, 0, sha->ctx, 0, + sb_count * CCP_SB_BYTES); + } + + if (sha->final && sha->opad) { + /* HMAC operation, recursively perform final SHA */ + struct ccp_cmd hmac_cmd; + struct scatterlist sg; + u8 *hmac_buf; if (sha->opad_len != block_size) { ret = -EINVAL; @@ -1107,7 +1174,18 @@ static int ccp_run_sha_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) sg_init_one(&sg, hmac_buf, block_size + digest_size); scatterwalk_map_and_copy(hmac_buf, sha->opad, 0, block_size, 0); - memcpy(hmac_buf + block_size, ctx.address, digest_size); + switch (sha->type) { + case CCP_SHA_TYPE_1: + case CCP_SHA_TYPE_224: + case CCP_SHA_TYPE_256: + memcpy(hmac_buf + block_size, + ctx.address + ooffset, + digest_size); + break; + default: + ret = -EINVAL; + goto e_ctx; + } memset(&hmac_cmd, 0, sizeof(hmac_cmd)); hmac_cmd.engine = CCP_ENGINE_SHA; @@ -1130,7 +1208,8 @@ static int ccp_run_sha_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) } e_data: - ccp_free_data(&src, cmd_q); + if (sha->src) + ccp_free_data(&src, cmd_q); e_ctx: ccp_dm_free(&ctx); @@ -1261,7 +1340,7 @@ static int ccp_run_passthru_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_op op; bool in_place = false; unsigned int i; - int ret; + int ret = 0; if (!pt->final && (pt->src_len & (CCP_PASSTHRU_BLOCKSIZE - 1))) return -EINVAL; @@ -1280,7 +1359,7 @@ static int ccp_run_passthru_cmd(struct ccp_cmd_queue *cmd_q, memset(&op, 0, sizeof(op)); op.cmd_q = cmd_q; - op.jobid = ccp_gen_jobid(cmd_q->ccp); + op.jobid = CCP_NEW_JOBID(cmd_q->ccp); if (pt->bit_mod != CCP_PASSTHRU_BITWISE_NOOP) { /* Load the mask */ @@ -1469,7 +1548,7 @@ static int ccp_run_ecc_mm_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) memset(&op, 0, sizeof(op)); op.cmd_q = cmd_q; - op.jobid = ccp_gen_jobid(cmd_q->ccp); + op.jobid = CCP_NEW_JOBID(cmd_q->ccp); /* Concatenate the modulus and the operands. Both the modulus and * the operands must be in little endian format. Since the input @@ -1594,7 +1673,7 @@ static int ccp_run_ecc_pm_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) memset(&op, 0, sizeof(op)); op.cmd_q = cmd_q; - op.jobid = ccp_gen_jobid(cmd_q->ccp); + op.jobid = CCP_NEW_JOBID(cmd_q->ccp); /* Concatenate the modulus and the operands. Both the modulus and * the operands must be in little endian format. Since the input @@ -1632,7 +1711,7 @@ static int ccp_run_ecc_pm_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) goto e_src; src.address += CCP_ECC_OPERAND_SIZE; - /* Set the first point Z coordianate to 1 */ + /* Set the first point Z coordinate to 1 */ *src.address = 0x01; src.address += CCP_ECC_OPERAND_SIZE; @@ -1651,7 +1730,7 @@ static int ccp_run_ecc_pm_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) goto e_src; src.address += CCP_ECC_OPERAND_SIZE; - /* Set the second point Z coordianate to 1 */ + /* Set the second point Z coordinate to 1 */ *src.address = 0x01; src.address += CCP_ECC_OPERAND_SIZE; } else { diff --git a/drivers/crypto/ccp/ccp-pci.c b/drivers/crypto/ccp/ccp-pci.c index 072bcedef386..064e20f78b10 100644 --- a/drivers/crypto/ccp/ccp-pci.c +++ b/drivers/crypto/ccp/ccp-pci.c @@ -141,10 +141,11 @@ static void ccp_free_irqs(struct ccp_device *ccp) free_irq(ccp_pci->msix[ccp_pci->msix_count].vector, dev); pci_disable_msix(pdev); - } else { + } else if (ccp->irq) { free_irq(ccp->irq, dev); pci_disable_msi(pdev); } + ccp->irq = 0; } static int ccp_find_mmio_area(struct ccp_device *ccp) @@ -229,6 +230,8 @@ static int ccp_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) dev_set_drvdata(dev, ccp); + if (ccp->vdata->setup) + ccp->vdata->setup(ccp); ret = ccp->vdata->perform->init(ccp); if (ret) goto e_iomap; @@ -321,6 +324,7 @@ static int ccp_pci_resume(struct pci_dev *pdev) static const struct pci_device_id ccp_pci_table[] = { { PCI_VDEVICE(AMD, 0x1537), (kernel_ulong_t)&ccpv3 }, + { PCI_VDEVICE(AMD, 0x1456), (kernel_ulong_t)&ccpv5 }, /* Last entry must be zero */ { 0, } }; diff --git a/include/linux/ccp.h b/include/linux/ccp.h index 7c2bb27c067c..a7653339fedb 100644 --- a/include/linux/ccp.h +++ b/include/linux/ccp.h @@ -238,9 +238,6 @@ struct ccp_xts_aes_engine { }; /***** SHA engine *****/ -#define CCP_SHA_BLOCKSIZE SHA256_BLOCK_SIZE -#define CCP_SHA_CTXSIZE SHA256_DIGEST_SIZE - /** * ccp_sha_type - type of SHA operation * -- cgit v1.2.3-58-ga151 From 084935b208f6507ef5214fd67052a67a700bc6cf Mon Sep 17 00:00:00 2001 From: Gary R Hook Date: Tue, 26 Jul 2016 19:10:31 -0500 Subject: crypto: ccp - Add support for the RNG in a version 5 CCP Signed-off-by: Gary R Hook Signed-off-by: Herbert Xu --- drivers/crypto/ccp/ccp-dev-v3.c | 13 ++++--------- drivers/crypto/ccp/ccp-dev-v5.c | 7 +++++++ drivers/crypto/ccp/ccp-dev.c | 23 +++++++++++++++++++++++ drivers/crypto/ccp/ccp-dev.h | 2 ++ 4 files changed, 36 insertions(+), 9 deletions(-) (limited to 'drivers') diff --git a/drivers/crypto/ccp/ccp-dev-v3.c b/drivers/crypto/ccp/ccp-dev-v3.c index ff2d2a4de16a..578522d8f22e 100644 --- a/drivers/crypto/ccp/ccp-dev-v3.c +++ b/drivers/crypto/ccp/ccp-dev-v3.c @@ -432,14 +432,9 @@ static int ccp_init(struct ccp_device *ccp) dev_dbg(dev, "Registering device...\n"); ccp_add_device(ccp); - /* Register the RNG */ - ccp->hwrng.name = ccp->rngname; - ccp->hwrng.read = ccp_trng_read; - ret = hwrng_register(&ccp->hwrng); - if (ret) { - dev_err(dev, "error registering hwrng (%d)\n", ret); + ret = ccp_register_rng(ccp); + if (ret) goto e_kthread; - } /* Register the DMA engine support */ ret = ccp_dmaengine_register(ccp); @@ -449,7 +444,7 @@ static int ccp_init(struct ccp_device *ccp) return 0; e_hwrng: - hwrng_unregister(&ccp->hwrng); + ccp_unregister_rng(ccp); e_kthread: for (i = 0; i < ccp->cmd_q_count; i++) @@ -475,7 +470,7 @@ static void ccp_destroy(struct ccp_device *ccp) ccp_dmaengine_unregister(ccp); /* Unregister the RNG */ - hwrng_unregister(&ccp->hwrng); + ccp_unregister_rng(ccp); /* Remove this device from the list of available units */ ccp_del_device(ccp); diff --git a/drivers/crypto/ccp/ccp-dev-v5.c b/drivers/crypto/ccp/ccp-dev-v5.c index 16dad9633754..ddce2205e5fa 100644 --- a/drivers/crypto/ccp/ccp-dev-v5.c +++ b/drivers/crypto/ccp/ccp-dev-v5.c @@ -828,6 +828,10 @@ static int ccp5_init(struct ccp_device *ccp) /* Put this on the unit list to make it available */ ccp_add_device(ccp); + ret = ccp_register_rng(ccp); + if (ret) + goto e_kthread; + return 0; e_kthread: @@ -852,6 +856,9 @@ static void ccp5_destroy(struct ccp_device *ccp) struct ccp_cmd *cmd; unsigned int i; + /* Unregister the RNG */ + ccp_unregister_rng(ccp); + /* Remove this device from the list of available units first */ ccp_del_device(ccp); diff --git a/drivers/crypto/ccp/ccp-dev.c b/drivers/crypto/ccp/ccp-dev.c index 6b44730ef9d6..38a98d879f82 100644 --- a/drivers/crypto/ccp/ccp-dev.c +++ b/drivers/crypto/ccp/ccp-dev.c @@ -119,6 +119,29 @@ void ccp_del_device(struct ccp_device *ccp) write_unlock_irqrestore(&ccp_unit_lock, flags); } + + +int ccp_register_rng(struct ccp_device *ccp) +{ + int ret = 0; + + dev_dbg(ccp->dev, "Registering RNG...\n"); + /* Register an RNG */ + ccp->hwrng.name = ccp->rngname; + ccp->hwrng.read = ccp_trng_read; + ret = hwrng_register(&ccp->hwrng); + if (ret) + dev_err(ccp->dev, "error registering hwrng (%d)\n", ret); + + return ret; +} + +void ccp_unregister_rng(struct ccp_device *ccp) +{ + if (ccp->hwrng.name) + hwrng_unregister(&ccp->hwrng); +} + static struct ccp_device *ccp_get_device(void) { unsigned long flags; diff --git a/drivers/crypto/ccp/ccp-dev.h b/drivers/crypto/ccp/ccp-dev.h index 5ff4a73e3bd4..d04bd61ed845 100644 --- a/drivers/crypto/ccp/ccp-dev.h +++ b/drivers/crypto/ccp/ccp-dev.h @@ -601,6 +601,8 @@ int ccp_trng_read(struct hwrng *rng, void *data, size_t max, bool wait); int ccp_run_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd); +int ccp_register_rng(struct ccp_device *ccp); +void ccp_unregister_rng(struct ccp_device *ccp); int ccp_dmaengine_register(struct ccp_device *ccp); void ccp_dmaengine_unregister(struct ccp_device *ccp); -- cgit v1.2.3-58-ga151 From 99d90b2ebd8b327c0c496798db99009b30c70945 Mon Sep 17 00:00:00 2001 From: Gary R Hook Date: Tue, 26 Jul 2016 19:10:40 -0500 Subject: crypto: ccp - Enable DMA service on a v5 CCP Every CCP is capable of providing general DMA services. Register the device as a provider. Signed-off-by: Gary R Hook Signed-off-by: Herbert Xu --- drivers/crypto/ccp/ccp-dev-v5.c | 8 ++++++++ 1 file changed, 8 insertions(+) (limited to 'drivers') diff --git a/drivers/crypto/ccp/ccp-dev-v5.c b/drivers/crypto/ccp/ccp-dev-v5.c index ddce2205e5fa..40867141aea8 100644 --- a/drivers/crypto/ccp/ccp-dev-v5.c +++ b/drivers/crypto/ccp/ccp-dev-v5.c @@ -832,6 +832,11 @@ static int ccp5_init(struct ccp_device *ccp) if (ret) goto e_kthread; + /* Register the DMA engine support */ + ret = ccp_dmaengine_register(ccp); + if (ret) + goto e_hwrng; + return 0; e_kthread: @@ -856,6 +861,9 @@ static void ccp5_destroy(struct ccp_device *ccp) struct ccp_cmd *cmd; unsigned int i; + /* Unregister the DMA engine */ + ccp_dmaengine_unregister(ccp); + /* Unregister the RNG */ ccp_unregister_rng(ccp); -- cgit v1.2.3-58-ga151 From e14e7d126765ce0156ab5e3b250b1270998c207d Mon Sep 17 00:00:00 2001 From: Gary R Hook Date: Tue, 26 Jul 2016 19:10:49 -0500 Subject: crypto: ccp - Enable use of the additional CCP A second CCP is available, identical to the first, with its ownn PCI ID. Make it available for use by the crypto subsystem, as well as for DMA activity and random number generation. This device is not pre-configured at at boot time. The driver must configure it (during the probe) for use. Signed-off-by: Gary R Hook Signed-off-by: Herbert Xu --- drivers/crypto/ccp/ccp-dev-v5.c | 37 ++++++++++++++++++++++++++++++++++++- drivers/crypto/ccp/ccp-dev.h | 8 ++++++++ drivers/crypto/ccp/ccp-pci.c | 2 ++ 3 files changed, 46 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/crypto/ccp/ccp-dev-v5.c b/drivers/crypto/ccp/ccp-dev-v5.c index 40867141aea8..f499e34df389 100644 --- a/drivers/crypto/ccp/ccp-dev-v5.c +++ b/drivers/crypto/ccp/ccp-dev-v5.c @@ -835,7 +835,7 @@ static int ccp5_init(struct ccp_device *ccp) /* Register the DMA engine support */ ret = ccp_dmaengine_register(ccp); if (ret) - goto e_hwrng; + goto e_kthread; return 0; @@ -952,6 +952,33 @@ static void ccp5_config(struct ccp_device *ccp) iowrite32(0x00001249, ccp->io_regs + CMD5_REQID_CONFIG_OFFSET); } +static void ccp5other_config(struct ccp_device *ccp) +{ + int i; + u32 rnd; + + /* We own all of the queues on the NTB CCP */ + + iowrite32(0x00012D57, ccp->io_regs + CMD5_TRNG_CTL_OFFSET); + iowrite32(0x00000003, ccp->io_regs + CMD5_CONFIG_0_OFFSET); + for (i = 0; i < 12; i++) { + rnd = ioread32(ccp->io_regs + TRNG_OUT_REG); + iowrite32(rnd, ccp->io_regs + CMD5_AES_MASK_OFFSET); + } + + iowrite32(0x0000001F, ccp->io_regs + CMD5_QUEUE_MASK_OFFSET); + iowrite32(0x00005B6D, ccp->io_regs + CMD5_QUEUE_PRIO_OFFSET); + iowrite32(0x00000000, ccp->io_regs + CMD5_CMD_TIMEOUT_OFFSET); + + iowrite32(0x3FFFFFFF, ccp->io_regs + LSB_PRIVATE_MASK_LO_OFFSET); + iowrite32(0x000003FF, ccp->io_regs + LSB_PRIVATE_MASK_HI_OFFSET); + + iowrite32(0x00108823, ccp->io_regs + CMD5_CLK_GATE_CTL_OFFSET); + + ccp5_config(ccp); +} + +/* Version 5 adds some function, but is essentially the same as v5 */ static const struct ccp_actions ccp5_actions = { .aes = ccp5_perform_aes, .xts_aes = ccp5_perform_xts_aes, @@ -974,3 +1001,11 @@ struct ccp_vdata ccpv5 = { .bar = 2, .offset = 0x0, }; + +struct ccp_vdata ccpv5other = { + .version = CCP_VERSION(5, 0), + .setup = ccp5other_config, + .perform = &ccp5_actions, + .bar = 2, + .offset = 0x0, +}; diff --git a/drivers/crypto/ccp/ccp-dev.h b/drivers/crypto/ccp/ccp-dev.h index d04bd61ed845..ebc93652833b 100644 --- a/drivers/crypto/ccp/ccp-dev.h +++ b/drivers/crypto/ccp/ccp-dev.h @@ -63,7 +63,9 @@ /* ------------------------ CCP Version 5 Specifics ------------------------ */ #define CMD5_QUEUE_MASK_OFFSET 0x00 +#define CMD5_QUEUE_PRIO_OFFSET 0x04 #define CMD5_REQID_CONFIG_OFFSET 0x08 +#define CMD5_CMD_TIMEOUT_OFFSET 0x10 #define LSB_PUBLIC_MASK_LO_OFFSET 0x18 #define LSB_PUBLIC_MASK_HI_OFFSET 0x1C #define LSB_PRIVATE_MASK_LO_OFFSET 0x20 @@ -83,6 +85,11 @@ #define CMD5_Q_ABORT_BASE 0x0114 #define CMD5_Q_AX_CACHE_BASE 0x0118 +#define CMD5_CONFIG_0_OFFSET 0x6000 +#define CMD5_TRNG_CTL_OFFSET 0x6008 +#define CMD5_AES_MASK_OFFSET 0x6010 +#define CMD5_CLK_GATE_CTL_OFFSET 0x603C + /* Address offset between two virtual queue registers */ #define CMD5_Q_STATUS_INCR 0x1000 @@ -634,5 +641,6 @@ struct ccp_vdata { extern struct ccp_vdata ccpv3; extern struct ccp_vdata ccpv5; +extern struct ccp_vdata ccpv5other; #endif diff --git a/drivers/crypto/ccp/ccp-pci.c b/drivers/crypto/ccp/ccp-pci.c index 064e20f78b10..239cbf2630bf 100644 --- a/drivers/crypto/ccp/ccp-pci.c +++ b/drivers/crypto/ccp/ccp-pci.c @@ -232,6 +232,7 @@ static int ccp_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) if (ccp->vdata->setup) ccp->vdata->setup(ccp); + ret = ccp->vdata->perform->init(ccp); if (ret) goto e_iomap; @@ -325,6 +326,7 @@ static int ccp_pci_resume(struct pci_dev *pdev) static const struct pci_device_id ccp_pci_table[] = { { PCI_VDEVICE(AMD, 0x1537), (kernel_ulong_t)&ccpv3 }, { PCI_VDEVICE(AMD, 0x1456), (kernel_ulong_t)&ccpv5 }, + { PCI_VDEVICE(AMD, 0x1468), (kernel_ulong_t)&ccpv5other }, /* Last entry must be zero */ { 0, } }; -- cgit v1.2.3-58-ga151 From 10badea259fab2ec91a5e6bf1e568a01e4cb5263 Mon Sep 17 00:00:00 2001 From: Will Thomas Date: Fri, 5 Aug 2016 14:00:14 +0100 Subject: crypto: img-hash - Fix null pointer exception Sporadic null pointer exceptions came from here. Fix them. Signed-off-by: Will Thomas Reviewed-by: James Hartley Signed-off-by: Herbert Xu --- drivers/crypto/img-hash.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/crypto/img-hash.c b/drivers/crypto/img-hash.c index 68e8aa90fe01..e5c941bdc693 100644 --- a/drivers/crypto/img-hash.c +++ b/drivers/crypto/img-hash.c @@ -361,7 +361,7 @@ static void img_hash_dma_task(unsigned long d) size_t nbytes, bleft, wsend, len, tbc; struct scatterlist tsg; - if (!ctx->sg) + if (!hdev->req || !ctx->sg) return; addr = sg_virt(ctx->sg); -- cgit v1.2.3-58-ga151 From dd4f677b0c89280e66ab9bebf97c8c90fbc1c330 Mon Sep 17 00:00:00 2001 From: Will Thomas Date: Fri, 5 Aug 2016 14:00:15 +0100 Subject: crypto: img-hash - Fix hash request context Move 0 length buffer to end of structure to stop overwriting fallback request data. This doesn't cause a bug itself as the buffer is never used alongside the fallback but should be changed. Signed-off-by: Will Thomas Reviewed-by: James Hartley Signed-off-by: Herbert Xu --- drivers/crypto/img-hash.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/crypto/img-hash.c b/drivers/crypto/img-hash.c index e5c941bdc693..de2b86efbfe4 100644 --- a/drivers/crypto/img-hash.c +++ b/drivers/crypto/img-hash.c @@ -102,8 +102,10 @@ struct img_hash_request_ctx { unsigned long op; size_t bufcnt; - u8 buffer[0] __aligned(sizeof(u32)); struct ahash_request fallback_req; + + /* Zero length buffer must remain last member of struct */ + u8 buffer[0] __aligned(sizeof(u32)); }; struct img_hash_ctx { -- cgit v1.2.3-58-ga151 From 302a1bee8fd71b01bcf4ca8395c115a60cef52bf Mon Sep 17 00:00:00 2001 From: Will Thomas Date: Fri, 5 Aug 2016 14:00:16 +0100 Subject: crypto: img-hash - Reconfigure DMA Burst length Burst length of 16 drives the hash accelerator out of spec and causes stability issues in some cases. Reduce this to stop data being lost. Signed-off-by: Will Thomas Reviewed-by: James Hartley Signed-off-by: Herbert Xu --- drivers/crypto/img-hash.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/crypto/img-hash.c b/drivers/crypto/img-hash.c index de2b86efbfe4..f8abbe3bc51c 100644 --- a/drivers/crypto/img-hash.c +++ b/drivers/crypto/img-hash.c @@ -71,6 +71,7 @@ #define DRIVER_FLAGS_MD5 BIT(21) #define IMG_HASH_QUEUE_LENGTH 20 +#define IMG_HASH_DMA_BURST 4 #define IMG_HASH_DMA_THRESHOLD 64 #ifdef __LITTLE_ENDIAN @@ -342,7 +343,7 @@ static int img_hash_dma_init(struct img_hash_dev *hdev) dma_conf.direction = DMA_MEM_TO_DEV; dma_conf.dst_addr = hdev->bus_addr; dma_conf.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; - dma_conf.dst_maxburst = 16; + dma_conf.dst_maxburst = IMG_HASH_DMA_BURST; dma_conf.device_fc = false; err = dmaengine_slave_config(hdev->dma_lch, &dma_conf); -- cgit v1.2.3-58-ga151 From d084e13a1395cf65d105fb79911329750500f8c5 Mon Sep 17 00:00:00 2001 From: Govindraj Raja Date: Fri, 5 Aug 2016 14:00:17 +0100 Subject: crypto: img-hash - Add suspend resume hooks for img hash Current img hash claims sys and periph gate clocks and this can be gated in system suspend scenarios. Add support for Device pm ops for img hash to gate the clocks claimed by img hash. Signed-off-by: Govindraj Raja Reviewed-by: Will Thomas Signed-off-by: Herbert Xu --- drivers/crypto/img-hash.c | 27 +++++++++++++++++++++++++++ 1 file changed, 27 insertions(+) (limited to 'drivers') diff --git a/drivers/crypto/img-hash.c b/drivers/crypto/img-hash.c index f8abbe3bc51c..2622c0174232 100644 --- a/drivers/crypto/img-hash.c +++ b/drivers/crypto/img-hash.c @@ -1016,11 +1016,38 @@ static int img_hash_remove(struct platform_device *pdev) return 0; } +#ifdef CONFIG_PM_SLEEP +static int img_hash_suspend(struct device *dev) +{ + struct img_hash_dev *hdev = dev_get_drvdata(dev); + + clk_disable_unprepare(hdev->hash_clk); + clk_disable_unprepare(hdev->sys_clk); + + return 0; +} + +static int img_hash_resume(struct device *dev) +{ + struct img_hash_dev *hdev = dev_get_drvdata(dev); + + clk_prepare_enable(hdev->hash_clk); + clk_prepare_enable(hdev->sys_clk); + + return 0; +} +#endif /* CONFIG_PM_SLEEP */ + +static const struct dev_pm_ops img_hash_pm_ops = { + SET_SYSTEM_SLEEP_PM_OPS(img_hash_suspend, img_hash_resume) +}; + static struct platform_driver img_hash_driver = { .probe = img_hash_probe, .remove = img_hash_remove, .driver = { .name = "img-hash-accelerator", + .pm = &img_hash_pm_ops, .of_match_table = of_match_ptr(img_hash_match), } }; -- cgit v1.2.3-58-ga151 From 436e3bb52b5b8f0823746cb06e6385d0db7f0811 Mon Sep 17 00:00:00 2001 From: James Hartley Date: Fri, 5 Aug 2016 14:00:18 +0100 Subject: crypto: img-hash - Add support for export and import Currently the img-hash accelerator does not probe successfully due to a change in the checks made during registration with the crypto framework. This is due to import and export functions not being defined. Correct this. Signed-off-by: James Hartley Signed-off-by: Will Thomas Signed-off-by: Herbert Xu --- drivers/crypto/img-hash.c | 69 ++++++++++++++++++++++++++++++++++++++++++----- 1 file changed, 63 insertions(+), 6 deletions(-) (limited to 'drivers') diff --git a/drivers/crypto/img-hash.c b/drivers/crypto/img-hash.c index 2622c0174232..fd4cd5120f9e 100644 --- a/drivers/crypto/img-hash.c +++ b/drivers/crypto/img-hash.c @@ -590,6 +590,32 @@ static int img_hash_finup(struct ahash_request *req) return crypto_ahash_finup(&rctx->fallback_req); } +static int img_hash_import(struct ahash_request *req, const void *in) +{ + struct img_hash_request_ctx *rctx = ahash_request_ctx(req); + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); + struct img_hash_ctx *ctx = crypto_ahash_ctx(tfm); + + ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback); + rctx->fallback_req.base.flags = req->base.flags + & CRYPTO_TFM_REQ_MAY_SLEEP; + + return crypto_ahash_import(&rctx->fallback_req, in); +} + +static int img_hash_export(struct ahash_request *req, void *out) +{ + struct img_hash_request_ctx *rctx = ahash_request_ctx(req); + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); + struct img_hash_ctx *ctx = crypto_ahash_ctx(tfm); + + ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback); + rctx->fallback_req.base.flags = req->base.flags + & CRYPTO_TFM_REQ_MAY_SLEEP; + + return crypto_ahash_export(&rctx->fallback_req, out); +} + static int img_hash_digest(struct ahash_request *req) { struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); @@ -646,10 +672,9 @@ static int img_hash_digest(struct ahash_request *req) return err; } -static int img_hash_cra_init(struct crypto_tfm *tfm) +static int img_hash_cra_init(struct crypto_tfm *tfm, const char *alg_name) { struct img_hash_ctx *ctx = crypto_tfm_ctx(tfm); - const char *alg_name = crypto_tfm_alg_name(tfm); int err = -ENOMEM; ctx->fallback = crypto_alloc_ahash(alg_name, 0, @@ -669,6 +694,26 @@ err: return err; } +static int img_hash_cra_md5_init(struct crypto_tfm *tfm) +{ + return img_hash_cra_init(tfm, "md5-generic"); +} + +static int img_hash_cra_sha1_init(struct crypto_tfm *tfm) +{ + return img_hash_cra_init(tfm, "sha1-generic"); +} + +static int img_hash_cra_sha224_init(struct crypto_tfm *tfm) +{ + return img_hash_cra_init(tfm, "sha224-generic"); +} + +static int img_hash_cra_sha256_init(struct crypto_tfm *tfm) +{ + return img_hash_cra_init(tfm, "sha256-generic"); +} + static void img_hash_cra_exit(struct crypto_tfm *tfm) { struct img_hash_ctx *tctx = crypto_tfm_ctx(tfm); @@ -714,9 +759,12 @@ static struct ahash_alg img_algs[] = { .update = img_hash_update, .final = img_hash_final, .finup = img_hash_finup, + .export = img_hash_export, + .import = img_hash_import, .digest = img_hash_digest, .halg = { .digestsize = MD5_DIGEST_SIZE, + .statesize = sizeof(struct md5_state), .base = { .cra_name = "md5", .cra_driver_name = "img-md5", @@ -726,7 +774,7 @@ static struct ahash_alg img_algs[] = { CRYPTO_ALG_NEED_FALLBACK, .cra_blocksize = MD5_HMAC_BLOCK_SIZE, .cra_ctxsize = sizeof(struct img_hash_ctx), - .cra_init = img_hash_cra_init, + .cra_init = img_hash_cra_md5_init, .cra_exit = img_hash_cra_exit, .cra_module = THIS_MODULE, } @@ -737,9 +785,12 @@ static struct ahash_alg img_algs[] = { .update = img_hash_update, .final = img_hash_final, .finup = img_hash_finup, + .export = img_hash_export, + .import = img_hash_import, .digest = img_hash_digest, .halg = { .digestsize = SHA1_DIGEST_SIZE, + .statesize = sizeof(struct sha1_state), .base = { .cra_name = "sha1", .cra_driver_name = "img-sha1", @@ -749,7 +800,7 @@ static struct ahash_alg img_algs[] = { CRYPTO_ALG_NEED_FALLBACK, .cra_blocksize = SHA1_BLOCK_SIZE, .cra_ctxsize = sizeof(struct img_hash_ctx), - .cra_init = img_hash_cra_init, + .cra_init = img_hash_cra_sha1_init, .cra_exit = img_hash_cra_exit, .cra_module = THIS_MODULE, } @@ -760,9 +811,12 @@ static struct ahash_alg img_algs[] = { .update = img_hash_update, .final = img_hash_final, .finup = img_hash_finup, + .export = img_hash_export, + .import = img_hash_import, .digest = img_hash_digest, .halg = { .digestsize = SHA224_DIGEST_SIZE, + .statesize = sizeof(struct sha256_state), .base = { .cra_name = "sha224", .cra_driver_name = "img-sha224", @@ -772,7 +826,7 @@ static struct ahash_alg img_algs[] = { CRYPTO_ALG_NEED_FALLBACK, .cra_blocksize = SHA224_BLOCK_SIZE, .cra_ctxsize = sizeof(struct img_hash_ctx), - .cra_init = img_hash_cra_init, + .cra_init = img_hash_cra_sha224_init, .cra_exit = img_hash_cra_exit, .cra_module = THIS_MODULE, } @@ -783,9 +837,12 @@ static struct ahash_alg img_algs[] = { .update = img_hash_update, .final = img_hash_final, .finup = img_hash_finup, + .export = img_hash_export, + .import = img_hash_import, .digest = img_hash_digest, .halg = { .digestsize = SHA256_DIGEST_SIZE, + .statesize = sizeof(struct sha256_state), .base = { .cra_name = "sha256", .cra_driver_name = "img-sha256", @@ -795,7 +852,7 @@ static struct ahash_alg img_algs[] = { CRYPTO_ALG_NEED_FALLBACK, .cra_blocksize = SHA256_BLOCK_SIZE, .cra_ctxsize = sizeof(struct img_hash_ctx), - .cra_init = img_hash_cra_init, + .cra_init = img_hash_cra_sha256_init, .cra_exit = img_hash_cra_exit, .cra_module = THIS_MODULE, } -- cgit v1.2.3-58-ga151 From fb67740e2fb82c97cca6df24882df4e7c898aae4 Mon Sep 17 00:00:00 2001 From: James Hartley Date: Fri, 5 Aug 2016 14:00:19 +0100 Subject: crypto: img-hash - log a successful probe Currently the probe function only emits an output on success when debug is specifically enabled. It would be more useful if this happens by default. Signed-off-by: James Hartley Reviewed-by: Will Thomas Signed-off-by: Herbert Xu --- drivers/crypto/img-hash.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/crypto/img-hash.c b/drivers/crypto/img-hash.c index fd4cd5120f9e..60410d79bbc5 100644 --- a/drivers/crypto/img-hash.c +++ b/drivers/crypto/img-hash.c @@ -1031,7 +1031,7 @@ static int img_hash_probe(struct platform_device *pdev) err = img_register_algs(hdev); if (err) goto err_algs; - dev_dbg(dev, "Img MD5/SHA1/SHA224/SHA256 Hardware accelerator initialized\n"); + dev_info(dev, "Img MD5/SHA1/SHA224/SHA256 Hardware accelerator initialized\n"); return 0; -- cgit v1.2.3-58-ga151 From 5e38d20088d48b60775bdbdfdf47f8a2c4f6288f Mon Sep 17 00:00:00 2001 From: Will Thomas Date: Fri, 5 Aug 2016 14:00:20 +0100 Subject: crypto: img-hash - Fix set_reqsize call Properly allocate enough memory to respect the fallback. Signed-off-by: Will Thomas Signed-off-by: Herbert Xu --- drivers/crypto/img-hash.c | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers') diff --git a/drivers/crypto/img-hash.c b/drivers/crypto/img-hash.c index 60410d79bbc5..a2e77b87485b 100644 --- a/drivers/crypto/img-hash.c +++ b/drivers/crypto/img-hash.c @@ -686,6 +686,7 @@ static int img_hash_cra_init(struct crypto_tfm *tfm, const char *alg_name) } crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm), sizeof(struct img_hash_request_ctx) + + crypto_ahash_reqsize(ctx->fallback) + IMG_HASH_DMA_THRESHOLD); return 0; -- cgit v1.2.3-58-ga151 From 3d5a2db695574a3780d15e42f771f35344258d8b Mon Sep 17 00:00:00 2001 From: Russell King Date: Mon, 8 Aug 2016 18:04:31 +0100 Subject: crypto: caam - fix DMA API mapping leak caamhash contains this weird code: src_nents = sg_count(req->src, req->nbytes); dma_map_sg(jrdev, req->src, src_nents ? : 1, DMA_TO_DEVICE); ... edesc->src_nents = src_nents; sg_count() returns zero when sg_nents_for_len() returns zero or one. This means we don't need to use a hardware scatterlist. However, setting src_nents to zero causes problems when we unmap: if (edesc->src_nents) dma_unmap_sg_chained(dev, req->src, edesc->src_nents, DMA_TO_DEVICE, edesc->chained); as zero here means that we have no entries to unmap. This causes us to leak DMA mappings, where we map one scatterlist entry and then fail to unmap it. This can be fixed in two ways: either by writing the number of entries that were requested of dma_map_sg(), or by reworking the "no SG required" case. We adopt the re-work solution here - we replace sg_count() with sg_nents_for_len(), so src_nents now contains the real number of scatterlist entries, and we then change the test for using the hardware scatterlist to src_nents > 1 rather than just non-zero. This change passes my sshd, openssl tests hashing /bin and tcrypt tests. Signed-off-by: Russell King Signed-off-by: Herbert Xu --- drivers/crypto/caam/caamhash.c | 26 +++++++++++++++++--------- 1 file changed, 17 insertions(+), 9 deletions(-) (limited to 'drivers') diff --git a/drivers/crypto/caam/caamhash.c b/drivers/crypto/caam/caamhash.c index 36365b3efdfd..24acd52b222e 100644 --- a/drivers/crypto/caam/caamhash.c +++ b/drivers/crypto/caam/caamhash.c @@ -1094,13 +1094,16 @@ static int ahash_digest(struct ahash_request *req) u32 options; int sh_len; - src_nents = sg_count(req->src, req->nbytes); + src_nents = sg_nents_for_len(req->src, req->nbytes); if (src_nents < 0) { dev_err(jrdev, "Invalid number of src SG.\n"); return src_nents; } - dma_map_sg(jrdev, req->src, src_nents ? : 1, DMA_TO_DEVICE); - sec4_sg_bytes = src_nents * sizeof(struct sec4_sg_entry); + dma_map_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE); + if (src_nents > 1) + sec4_sg_bytes = src_nents * sizeof(struct sec4_sg_entry); + else + sec4_sg_bytes = 0; /* allocate space for base edesc and hw desc commands, link tables */ edesc = kzalloc(sizeof(*edesc) + sec4_sg_bytes + DESC_JOB_IO_LEN, @@ -1118,7 +1121,7 @@ static int ahash_digest(struct ahash_request *req) desc = edesc->hw_desc; init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER | HDR_REVERSE); - if (src_nents) { + if (src_nents > 1) { sg_to_sec4_sg_last(req->src, src_nents, edesc->sec4_sg, 0); edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg, sec4_sg_bytes, DMA_TO_DEVICE); @@ -1246,7 +1249,7 @@ static int ahash_update_no_ctx(struct ahash_request *req) if (to_hash) { src_nents = sg_nents_for_len(req->src, - req->nbytes - (*next_buflen)); + req->nbytes - *next_buflen); if (src_nents < 0) { dev_err(jrdev, "Invalid number of src SG.\n"); return src_nents; @@ -1450,13 +1453,18 @@ static int ahash_update_first(struct ahash_request *req) to_hash = req->nbytes - *next_buflen; if (to_hash) { - src_nents = sg_count(req->src, req->nbytes - (*next_buflen)); + src_nents = sg_nents_for_len(req->src, + req->nbytes - *next_buflen); if (src_nents < 0) { dev_err(jrdev, "Invalid number of src SG.\n"); return src_nents; } - dma_map_sg(jrdev, req->src, src_nents ? : 1, DMA_TO_DEVICE); - sec4_sg_bytes = src_nents * sizeof(struct sec4_sg_entry); + dma_map_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE); + if (src_nents > 1) + sec4_sg_bytes = src_nents * + sizeof(struct sec4_sg_entry); + else + sec4_sg_bytes = 0; /* * allocate space for base edesc and hw desc commands, @@ -1476,7 +1484,7 @@ static int ahash_update_first(struct ahash_request *req) DESC_JOB_IO_LEN; edesc->dst_dma = 0; - if (src_nents) { + if (src_nents > 1) { sg_to_sec4_sg_last(req->src, src_nents, edesc->sec4_sg, 0); edesc->sec4_sg_dma = dma_map_single(jrdev, -- cgit v1.2.3-58-ga151 From e11793f5dad8225023d8b46ac93a98ec14cebcde Mon Sep 17 00:00:00 2001 From: Russell King Date: Mon, 8 Aug 2016 18:04:36 +0100 Subject: crypto: caam - ensure descriptor buffers are cacheline aligned Signed-off-by: Russell King Signed-off-by: Herbert Xu --- drivers/crypto/caam/caamhash.c | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) (limited to 'drivers') diff --git a/drivers/crypto/caam/caamhash.c b/drivers/crypto/caam/caamhash.c index 24acd52b222e..ff91efd98dda 100644 --- a/drivers/crypto/caam/caamhash.c +++ b/drivers/crypto/caam/caamhash.c @@ -99,17 +99,17 @@ static struct list_head hash_list; /* ahash per-session context */ struct caam_hash_ctx { - struct device *jrdev; - u32 sh_desc_update[DESC_HASH_MAX_USED_LEN]; - u32 sh_desc_update_first[DESC_HASH_MAX_USED_LEN]; - u32 sh_desc_fin[DESC_HASH_MAX_USED_LEN]; - u32 sh_desc_digest[DESC_HASH_MAX_USED_LEN]; - u32 sh_desc_finup[DESC_HASH_MAX_USED_LEN]; - dma_addr_t sh_desc_update_dma; + u32 sh_desc_update[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned; + u32 sh_desc_update_first[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned; + u32 sh_desc_fin[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned; + u32 sh_desc_digest[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned; + u32 sh_desc_finup[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned; + dma_addr_t sh_desc_update_dma ____cacheline_aligned; dma_addr_t sh_desc_update_first_dma; dma_addr_t sh_desc_fin_dma; dma_addr_t sh_desc_digest_dma; dma_addr_t sh_desc_finup_dma; + struct device *jrdev; u32 alg_type; u32 alg_op; u8 key[CAAM_MAX_HASH_KEY_SIZE]; -- cgit v1.2.3-58-ga151 From 64ce56cb44f8b13df4d8f47b8ea527591a68a03b Mon Sep 17 00:00:00 2001 From: Russell King Date: Mon, 8 Aug 2016 18:04:42 +0100 Subject: crypto: caam - incorporate job descriptor into struct ahash_edesc Rather than giving the descriptor as hw_desc[0], give it's real size. All places where we allocate an ahash_edesc incorporate DESC_JOB_IO_LEN bytes of job descriptor. Signed-off-by: Russell King Signed-off-by: Herbert Xu --- drivers/crypto/caam/caamhash.c | 49 ++++++++++++++++-------------------------- 1 file changed, 19 insertions(+), 30 deletions(-) (limited to 'drivers') diff --git a/drivers/crypto/caam/caamhash.c b/drivers/crypto/caam/caamhash.c index ff91efd98dda..683a945e10aa 100644 --- a/drivers/crypto/caam/caamhash.c +++ b/drivers/crypto/caam/caamhash.c @@ -604,7 +604,7 @@ struct ahash_edesc { int src_nents; int sec4_sg_bytes; struct sec4_sg_entry *sec4_sg; - u32 hw_desc[0]; + u32 hw_desc[DESC_JOB_IO_LEN / sizeof(u32)]; }; static inline void ahash_unmap(struct device *dev, @@ -815,8 +815,8 @@ static int ahash_update_ctx(struct ahash_request *req) * allocate space for base edesc and hw desc commands, * link tables */ - edesc = kzalloc(sizeof(*edesc) + DESC_JOB_IO_LEN + - sec4_sg_bytes, GFP_DMA | flags); + edesc = kzalloc(sizeof(*edesc) + sec4_sg_bytes, + GFP_DMA | flags); if (!edesc) { dev_err(jrdev, "could not allocate extended descriptor\n"); @@ -825,8 +825,7 @@ static int ahash_update_ctx(struct ahash_request *req) edesc->src_nents = src_nents; edesc->sec4_sg_bytes = sec4_sg_bytes; - edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) + - DESC_JOB_IO_LEN; + edesc->sec4_sg = (void *)(edesc + 1); ret = ctx_map_to_sec4_sg(desc, jrdev, state, ctx->ctx_len, edesc->sec4_sg, DMA_BIDIRECTIONAL); @@ -925,8 +924,7 @@ static int ahash_final_ctx(struct ahash_request *req) sec4_sg_bytes = sec4_sg_src_index * sizeof(struct sec4_sg_entry); /* allocate space for base edesc and hw desc commands, link tables */ - edesc = kzalloc(sizeof(*edesc) + DESC_JOB_IO_LEN + sec4_sg_bytes, - GFP_DMA | flags); + edesc = kzalloc(sizeof(*edesc) + sec4_sg_bytes, GFP_DMA | flags); if (!edesc) { dev_err(jrdev, "could not allocate extended descriptor\n"); return -ENOMEM; @@ -937,8 +935,7 @@ static int ahash_final_ctx(struct ahash_request *req) init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER | HDR_REVERSE); edesc->sec4_sg_bytes = sec4_sg_bytes; - edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) + - DESC_JOB_IO_LEN; + edesc->sec4_sg = (void *)(edesc + 1); edesc->src_nents = 0; ret = ctx_map_to_sec4_sg(desc, jrdev, state, ctx->ctx_len, @@ -1016,8 +1013,7 @@ static int ahash_finup_ctx(struct ahash_request *req) sizeof(struct sec4_sg_entry); /* allocate space for base edesc and hw desc commands, link tables */ - edesc = kzalloc(sizeof(*edesc) + DESC_JOB_IO_LEN + sec4_sg_bytes, - GFP_DMA | flags); + edesc = kzalloc(sizeof(*edesc) + sec4_sg_bytes, GFP_DMA | flags); if (!edesc) { dev_err(jrdev, "could not allocate extended descriptor\n"); return -ENOMEM; @@ -1029,8 +1025,7 @@ static int ahash_finup_ctx(struct ahash_request *req) edesc->src_nents = src_nents; edesc->sec4_sg_bytes = sec4_sg_bytes; - edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) + - DESC_JOB_IO_LEN; + edesc->sec4_sg = (void *)(edesc + 1); ret = ctx_map_to_sec4_sg(desc, jrdev, state, ctx->ctx_len, edesc->sec4_sg, DMA_TO_DEVICE); @@ -1106,14 +1101,12 @@ static int ahash_digest(struct ahash_request *req) sec4_sg_bytes = 0; /* allocate space for base edesc and hw desc commands, link tables */ - edesc = kzalloc(sizeof(*edesc) + sec4_sg_bytes + DESC_JOB_IO_LEN, - GFP_DMA | flags); + edesc = kzalloc(sizeof(*edesc) + sec4_sg_bytes, GFP_DMA | flags); if (!edesc) { dev_err(jrdev, "could not allocate extended descriptor\n"); return -ENOMEM; } - edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) + - DESC_JOB_IO_LEN; + edesc->sec4_sg = (void *)(edesc + 1); edesc->sec4_sg_bytes = sec4_sg_bytes; edesc->src_nents = src_nents; @@ -1179,7 +1172,7 @@ static int ahash_final_no_ctx(struct ahash_request *req) int sh_len; /* allocate space for base edesc and hw desc commands, link tables */ - edesc = kzalloc(sizeof(*edesc) + DESC_JOB_IO_LEN, GFP_DMA | flags); + edesc = kzalloc(sizeof(*edesc), GFP_DMA | flags); if (!edesc) { dev_err(jrdev, "could not allocate extended descriptor\n"); return -ENOMEM; @@ -1261,8 +1254,8 @@ static int ahash_update_no_ctx(struct ahash_request *req) * allocate space for base edesc and hw desc commands, * link tables */ - edesc = kzalloc(sizeof(*edesc) + DESC_JOB_IO_LEN + - sec4_sg_bytes, GFP_DMA | flags); + edesc = kzalloc(sizeof(*edesc) + sec4_sg_bytes, + GFP_DMA | flags); if (!edesc) { dev_err(jrdev, "could not allocate extended descriptor\n"); @@ -1271,8 +1264,7 @@ static int ahash_update_no_ctx(struct ahash_request *req) edesc->src_nents = src_nents; edesc->sec4_sg_bytes = sec4_sg_bytes; - edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) + - DESC_JOB_IO_LEN; + edesc->sec4_sg = (void *)(edesc + 1); edesc->dst_dma = 0; state->buf_dma = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg, @@ -1371,8 +1363,7 @@ static int ahash_finup_no_ctx(struct ahash_request *req) sizeof(struct sec4_sg_entry); /* allocate space for base edesc and hw desc commands, link tables */ - edesc = kzalloc(sizeof(*edesc) + DESC_JOB_IO_LEN + sec4_sg_bytes, - GFP_DMA | flags); + edesc = kzalloc(sizeof(*edesc) + sec4_sg_bytes, GFP_DMA | flags); if (!edesc) { dev_err(jrdev, "could not allocate extended descriptor\n"); return -ENOMEM; @@ -1384,8 +1375,7 @@ static int ahash_finup_no_ctx(struct ahash_request *req) edesc->src_nents = src_nents; edesc->sec4_sg_bytes = sec4_sg_bytes; - edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) + - DESC_JOB_IO_LEN; + edesc->sec4_sg = (void *)(edesc + 1); state->buf_dma = try_buf_map_to_sec4_sg(jrdev, edesc->sec4_sg, buf, state->buf_dma, buflen, @@ -1470,8 +1460,8 @@ static int ahash_update_first(struct ahash_request *req) * allocate space for base edesc and hw desc commands, * link tables */ - edesc = kzalloc(sizeof(*edesc) + DESC_JOB_IO_LEN + - sec4_sg_bytes, GFP_DMA | flags); + edesc = kzalloc(sizeof(*edesc) + sec4_sg_bytes, + GFP_DMA | flags); if (!edesc) { dev_err(jrdev, "could not allocate extended descriptor\n"); @@ -1480,8 +1470,7 @@ static int ahash_update_first(struct ahash_request *req) edesc->src_nents = src_nents; edesc->sec4_sg_bytes = sec4_sg_bytes; - edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) + - DESC_JOB_IO_LEN; + edesc->sec4_sg = (void *)(edesc + 1); edesc->dst_dma = 0; if (src_nents > 1) { -- cgit v1.2.3-58-ga151 From d7b24ed4a9a4250f483f4c2fea3588828ab43af2 Mon Sep 17 00:00:00 2001 From: Russell King Date: Mon, 8 Aug 2016 18:04:47 +0100 Subject: crypto: caam - mark the hardware descriptor as cache line aligned Mark the hardware descriptor as being cache line aligned; on DMA incoherent architectures, the hardware descriptor should sit in a separate cache line from the CPU accessed data to avoid polluting the caches. Signed-off-by: Russell King Signed-off-by: Herbert Xu --- drivers/crypto/caam/caamhash.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/crypto/caam/caamhash.c b/drivers/crypto/caam/caamhash.c index 683a945e10aa..faf3d1bbcce9 100644 --- a/drivers/crypto/caam/caamhash.c +++ b/drivers/crypto/caam/caamhash.c @@ -604,7 +604,7 @@ struct ahash_edesc { int src_nents; int sec4_sg_bytes; struct sec4_sg_entry *sec4_sg; - u32 hw_desc[DESC_JOB_IO_LEN / sizeof(u32)]; + u32 hw_desc[DESC_JOB_IO_LEN / sizeof(u32)] ____cacheline_aligned; }; static inline void ahash_unmap(struct device *dev, -- cgit v1.2.3-58-ga151 From 343e44b15ecce6bca4b2caeae613a559e5be2720 Mon Sep 17 00:00:00 2001 From: Russell King Date: Mon, 8 Aug 2016 18:04:52 +0100 Subject: crypto: caam - replace sec4_sg pointer with array Since the extended descriptor includes the hardware descriptor, and the sec4 scatterlist immediately follows this, we can declare it as a array at the very end of the extended descriptor. This allows us to get rid of an initialiser for every site where we allocate an extended descriptor. Signed-off-by: Russell King Signed-off-by: Herbert Xu --- drivers/crypto/caam/caamhash.c | 12 +++--------- 1 file changed, 3 insertions(+), 9 deletions(-) (limited to 'drivers') diff --git a/drivers/crypto/caam/caamhash.c b/drivers/crypto/caam/caamhash.c index faf3d1bbcce9..aaaa3724e1f8 100644 --- a/drivers/crypto/caam/caamhash.c +++ b/drivers/crypto/caam/caamhash.c @@ -595,16 +595,16 @@ badkey: * @sec4_sg_dma: physical mapped address of h/w link table * @src_nents: number of segments in input scatterlist * @sec4_sg_bytes: length of dma mapped sec4_sg space - * @sec4_sg: pointer to h/w link table * @hw_desc: the h/w job descriptor followed by any referenced link tables + * @sec4_sg: h/w link table */ struct ahash_edesc { dma_addr_t dst_dma; dma_addr_t sec4_sg_dma; int src_nents; int sec4_sg_bytes; - struct sec4_sg_entry *sec4_sg; u32 hw_desc[DESC_JOB_IO_LEN / sizeof(u32)] ____cacheline_aligned; + struct sec4_sg_entry sec4_sg[0]; }; static inline void ahash_unmap(struct device *dev, @@ -825,7 +825,6 @@ static int ahash_update_ctx(struct ahash_request *req) edesc->src_nents = src_nents; edesc->sec4_sg_bytes = sec4_sg_bytes; - edesc->sec4_sg = (void *)(edesc + 1); ret = ctx_map_to_sec4_sg(desc, jrdev, state, ctx->ctx_len, edesc->sec4_sg, DMA_BIDIRECTIONAL); @@ -935,7 +934,6 @@ static int ahash_final_ctx(struct ahash_request *req) init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER | HDR_REVERSE); edesc->sec4_sg_bytes = sec4_sg_bytes; - edesc->sec4_sg = (void *)(edesc + 1); edesc->src_nents = 0; ret = ctx_map_to_sec4_sg(desc, jrdev, state, ctx->ctx_len, @@ -1025,7 +1023,6 @@ static int ahash_finup_ctx(struct ahash_request *req) edesc->src_nents = src_nents; edesc->sec4_sg_bytes = sec4_sg_bytes; - edesc->sec4_sg = (void *)(edesc + 1); ret = ctx_map_to_sec4_sg(desc, jrdev, state, ctx->ctx_len, edesc->sec4_sg, DMA_TO_DEVICE); @@ -1106,7 +1103,7 @@ static int ahash_digest(struct ahash_request *req) dev_err(jrdev, "could not allocate extended descriptor\n"); return -ENOMEM; } - edesc->sec4_sg = (void *)(edesc + 1); + edesc->sec4_sg_bytes = sec4_sg_bytes; edesc->src_nents = src_nents; @@ -1264,7 +1261,6 @@ static int ahash_update_no_ctx(struct ahash_request *req) edesc->src_nents = src_nents; edesc->sec4_sg_bytes = sec4_sg_bytes; - edesc->sec4_sg = (void *)(edesc + 1); edesc->dst_dma = 0; state->buf_dma = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg, @@ -1375,7 +1371,6 @@ static int ahash_finup_no_ctx(struct ahash_request *req) edesc->src_nents = src_nents; edesc->sec4_sg_bytes = sec4_sg_bytes; - edesc->sec4_sg = (void *)(edesc + 1); state->buf_dma = try_buf_map_to_sec4_sg(jrdev, edesc->sec4_sg, buf, state->buf_dma, buflen, @@ -1470,7 +1465,6 @@ static int ahash_update_first(struct ahash_request *req) edesc->src_nents = src_nents; edesc->sec4_sg_bytes = sec4_sg_bytes; - edesc->sec4_sg = (void *)(edesc + 1); edesc->dst_dma = 0; if (src_nents > 1) { -- cgit v1.2.3-58-ga151 From 32686d34f8fb6919df491ddb7ad8a0d6a9164624 Mon Sep 17 00:00:00 2001 From: Russell King Date: Mon, 8 Aug 2016 18:04:58 +0100 Subject: crypto: caam - ensure that we clean up after an error Ensure that we clean up allocations and DMA mappings after encountering an error rather than just giving up and leaking memory and resources. Signed-off-by: Russell King Signed-off-by: Herbert Xu --- drivers/crypto/caam/caamhash.c | 132 ++++++++++++++++++++++++----------------- 1 file changed, 79 insertions(+), 53 deletions(-) (limited to 'drivers') diff --git a/drivers/crypto/caam/caamhash.c b/drivers/crypto/caam/caamhash.c index aaaa3724e1f8..9b992b3d1117 100644 --- a/drivers/crypto/caam/caamhash.c +++ b/drivers/crypto/caam/caamhash.c @@ -829,7 +829,7 @@ static int ahash_update_ctx(struct ahash_request *req) ret = ctx_map_to_sec4_sg(desc, jrdev, state, ctx->ctx_len, edesc->sec4_sg, DMA_BIDIRECTIONAL); if (ret) - return ret; + goto err; state->buf_dma = try_buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1, @@ -860,7 +860,8 @@ static int ahash_update_ctx(struct ahash_request *req) DMA_TO_DEVICE); if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) { dev_err(jrdev, "unable to map S/G table\n"); - return -ENOMEM; + ret = -ENOMEM; + goto err; } append_seq_in_ptr(desc, edesc->sec4_sg_dma, ctx->ctx_len + @@ -875,13 +876,10 @@ static int ahash_update_ctx(struct ahash_request *req) #endif ret = caam_jr_enqueue(jrdev, desc, ahash_done_bi, req); - if (!ret) { - ret = -EINPROGRESS; - } else { - ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, - DMA_BIDIRECTIONAL); - kfree(edesc); - } + if (ret) + goto err; + + ret = -EINPROGRESS; } else if (*next_buflen) { scatterwalk_map_and_copy(buf + *buflen, req->src, 0, req->nbytes, 0); @@ -897,6 +895,11 @@ static int ahash_update_ctx(struct ahash_request *req) #endif return ret; + + err: + ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_BIDIRECTIONAL); + kfree(edesc); + return ret; } static int ahash_final_ctx(struct ahash_request *req) @@ -939,7 +942,7 @@ static int ahash_final_ctx(struct ahash_request *req) ret = ctx_map_to_sec4_sg(desc, jrdev, state, ctx->ctx_len, edesc->sec4_sg, DMA_TO_DEVICE); if (ret) - return ret; + goto err; state->buf_dma = try_buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1, buf, state->buf_dma, buflen, @@ -951,7 +954,8 @@ static int ahash_final_ctx(struct ahash_request *req) sec4_sg_bytes, DMA_TO_DEVICE); if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) { dev_err(jrdev, "unable to map S/G table\n"); - return -ENOMEM; + ret = -ENOMEM; + goto err; } append_seq_in_ptr(desc, edesc->sec4_sg_dma, ctx->ctx_len + buflen, @@ -961,7 +965,8 @@ static int ahash_final_ctx(struct ahash_request *req) digestsize); if (dma_mapping_error(jrdev, edesc->dst_dma)) { dev_err(jrdev, "unable to map dst\n"); - return -ENOMEM; + ret = -ENOMEM; + goto err; } #ifdef DEBUG @@ -970,13 +975,14 @@ static int ahash_final_ctx(struct ahash_request *req) #endif ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_src, req); - if (!ret) { - ret = -EINPROGRESS; - } else { - ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_FROM_DEVICE); - kfree(edesc); - } + if (ret) + goto err; + return -EINPROGRESS; + +err: + ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_FROM_DEVICE); + kfree(edesc); return ret; } @@ -1027,7 +1033,7 @@ static int ahash_finup_ctx(struct ahash_request *req) ret = ctx_map_to_sec4_sg(desc, jrdev, state, ctx->ctx_len, edesc->sec4_sg, DMA_TO_DEVICE); if (ret) - return ret; + goto err; state->buf_dma = try_buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1, buf, state->buf_dma, buflen, @@ -1040,7 +1046,8 @@ static int ahash_finup_ctx(struct ahash_request *req) sec4_sg_bytes, DMA_TO_DEVICE); if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) { dev_err(jrdev, "unable to map S/G table\n"); - return -ENOMEM; + ret = -ENOMEM; + goto err; } append_seq_in_ptr(desc, edesc->sec4_sg_dma, ctx->ctx_len + @@ -1050,7 +1057,8 @@ static int ahash_finup_ctx(struct ahash_request *req) digestsize); if (dma_mapping_error(jrdev, edesc->dst_dma)) { dev_err(jrdev, "unable to map dst\n"); - return -ENOMEM; + ret = -ENOMEM; + goto err; } #ifdef DEBUG @@ -1059,13 +1067,14 @@ static int ahash_finup_ctx(struct ahash_request *req) #endif ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_src, req); - if (!ret) { - ret = -EINPROGRESS; - } else { - ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_FROM_DEVICE); - kfree(edesc); - } + if (ret) + goto err; + return -EINPROGRESS; + +err: + ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_FROM_DEVICE); + kfree(edesc); return ret; } @@ -1117,6 +1126,8 @@ static int ahash_digest(struct ahash_request *req) sec4_sg_bytes, DMA_TO_DEVICE); if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) { dev_err(jrdev, "unable to map S/G table\n"); + ahash_unmap(jrdev, edesc, req, digestsize); + kfree(edesc); return -ENOMEM; } src_dma = edesc->sec4_sg_dma; @@ -1131,6 +1142,8 @@ static int ahash_digest(struct ahash_request *req) digestsize); if (dma_mapping_error(jrdev, edesc->dst_dma)) { dev_err(jrdev, "unable to map dst\n"); + ahash_unmap(jrdev, edesc, req, digestsize); + kfree(edesc); return -ENOMEM; } @@ -1183,6 +1196,8 @@ static int ahash_final_no_ctx(struct ahash_request *req) state->buf_dma = dma_map_single(jrdev, buf, buflen, DMA_TO_DEVICE); if (dma_mapping_error(jrdev, state->buf_dma)) { dev_err(jrdev, "unable to map src\n"); + ahash_unmap(jrdev, edesc, req, digestsize); + kfree(edesc); return -ENOMEM; } @@ -1192,6 +1207,8 @@ static int ahash_final_no_ctx(struct ahash_request *req) digestsize); if (dma_mapping_error(jrdev, edesc->dst_dma)) { dev_err(jrdev, "unable to map dst\n"); + ahash_unmap(jrdev, edesc, req, digestsize); + kfree(edesc); return -ENOMEM; } edesc->src_nents = 0; @@ -1285,14 +1302,15 @@ static int ahash_update_no_ctx(struct ahash_request *req) DMA_TO_DEVICE); if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) { dev_err(jrdev, "unable to map S/G table\n"); - return -ENOMEM; + ret = -ENOMEM; + goto err; } append_seq_in_ptr(desc, edesc->sec4_sg_dma, to_hash, LDST_SGF); ret = map_seq_out_ptr_ctx(desc, jrdev, state, ctx->ctx_len); if (ret) - return ret; + goto err; #ifdef DEBUG print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ", @@ -1301,16 +1319,13 @@ static int ahash_update_no_ctx(struct ahash_request *req) #endif ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_dst, req); - if (!ret) { - ret = -EINPROGRESS; - state->update = ahash_update_ctx; - state->finup = ahash_finup_ctx; - state->final = ahash_final_ctx; - } else { - ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, - DMA_TO_DEVICE); - kfree(edesc); - } + if (ret) + goto err; + + ret = -EINPROGRESS; + state->update = ahash_update_ctx; + state->finup = ahash_finup_ctx; + state->final = ahash_final_ctx; } else if (*next_buflen) { scatterwalk_map_and_copy(buf + *buflen, req->src, 0, req->nbytes, 0); @@ -1326,6 +1341,11 @@ static int ahash_update_no_ctx(struct ahash_request *req) #endif return ret; + +err: + ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_TO_DEVICE); + kfree(edesc); + return ret; } /* submit ahash finup if it the first job descriptor after update */ @@ -1382,6 +1402,8 @@ static int ahash_finup_no_ctx(struct ahash_request *req) sec4_sg_bytes, DMA_TO_DEVICE); if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) { dev_err(jrdev, "unable to map S/G table\n"); + ahash_unmap(jrdev, edesc, req, digestsize); + kfree(edesc); return -ENOMEM; } @@ -1392,6 +1414,8 @@ static int ahash_finup_no_ctx(struct ahash_request *req) digestsize); if (dma_mapping_error(jrdev, edesc->dst_dma)) { dev_err(jrdev, "unable to map dst\n"); + ahash_unmap(jrdev, edesc, req, digestsize); + kfree(edesc); return -ENOMEM; } @@ -1476,7 +1500,8 @@ static int ahash_update_first(struct ahash_request *req) DMA_TO_DEVICE); if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) { dev_err(jrdev, "unable to map S/G table\n"); - return -ENOMEM; + ret = -ENOMEM; + goto err; } src_dma = edesc->sec4_sg_dma; options = LDST_SGF; @@ -1498,7 +1523,7 @@ static int ahash_update_first(struct ahash_request *req) ret = map_seq_out_ptr_ctx(desc, jrdev, state, ctx->ctx_len); if (ret) - return ret; + goto err; #ifdef DEBUG print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ", @@ -1506,18 +1531,14 @@ static int ahash_update_first(struct ahash_request *req) desc_bytes(desc), 1); #endif - ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_dst, - req); - if (!ret) { - ret = -EINPROGRESS; - state->update = ahash_update_ctx; - state->finup = ahash_finup_ctx; - state->final = ahash_final_ctx; - } else { - ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, - DMA_TO_DEVICE); - kfree(edesc); - } + ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_dst, req); + if (ret) + goto err; + + ret = -EINPROGRESS; + state->update = ahash_update_ctx; + state->finup = ahash_finup_ctx; + state->final = ahash_final_ctx; } else if (*next_buflen) { state->update = ahash_update_no_ctx; state->finup = ahash_finup_no_ctx; @@ -1532,6 +1553,11 @@ static int ahash_update_first(struct ahash_request *req) #endif return ret; + +err: + ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_TO_DEVICE); + kfree(edesc); + return ret; } static int ahash_finup_first(struct ahash_request *req) -- cgit v1.2.3-58-ga151 From bc13c69e29697f90200cf77d133a39ca1e525590 Mon Sep 17 00:00:00 2001 From: Russell King Date: Mon, 8 Aug 2016 18:05:03 +0100 Subject: crypto: caam - check and use dma_map_sg() return code Strictly, dma_map_sg() may coalesce SG entries, but in practise on iMX hardware, this will never happen. However, dma_map_sg() can fail, and we completely fail to check its return value. So, fix this properly. Arrange the code to map the scatterlist early, so we know how many scatter table entries to allocate, and then fill them in. This allows us to keep relatively simple error cleanup paths. Signed-off-by: Russell King Signed-off-by: Herbert Xu --- drivers/crypto/caam/caamhash.c | 138 ++++++++++++++++++++++++++++++----------- 1 file changed, 103 insertions(+), 35 deletions(-) (limited to 'drivers') diff --git a/drivers/crypto/caam/caamhash.c b/drivers/crypto/caam/caamhash.c index 9b992b3d1117..de2224382d9c 100644 --- a/drivers/crypto/caam/caamhash.c +++ b/drivers/crypto/caam/caamhash.c @@ -187,15 +187,6 @@ static inline dma_addr_t buf_map_to_sec4_sg(struct device *jrdev, return buf_dma; } -/* Map req->src and put it in link table */ -static inline void src_map_to_sec4_sg(struct device *jrdev, - struct scatterlist *src, int src_nents, - struct sec4_sg_entry *sec4_sg) -{ - dma_map_sg(jrdev, src, src_nents, DMA_TO_DEVICE); - sg_to_sec4_sg_last(src, src_nents, sec4_sg, 0); -} - /* * Only put buffer in link table if it contains data, which is possible, * since a buffer has previously been used, and needs to be unmapped, @@ -791,7 +782,7 @@ static int ahash_update_ctx(struct ahash_request *req) int in_len = *buflen + req->nbytes, to_hash; u32 *sh_desc = ctx->sh_desc_update, *desc; dma_addr_t ptr = ctx->sh_desc_update_dma; - int src_nents, sec4_sg_bytes, sec4_sg_src_index; + int src_nents, mapped_nents, sec4_sg_bytes, sec4_sg_src_index; struct ahash_edesc *edesc; int ret = 0; int sh_len; @@ -807,8 +798,20 @@ static int ahash_update_ctx(struct ahash_request *req) dev_err(jrdev, "Invalid number of src SG.\n"); return src_nents; } + + if (src_nents) { + mapped_nents = dma_map_sg(jrdev, req->src, src_nents, + DMA_TO_DEVICE); + if (!mapped_nents) { + dev_err(jrdev, "unable to DMA map source\n"); + return -ENOMEM; + } + } else { + mapped_nents = 0; + } + sec4_sg_src_index = 1 + (*buflen ? 1 : 0); - sec4_sg_bytes = (sec4_sg_src_index + src_nents) * + sec4_sg_bytes = (sec4_sg_src_index + mapped_nents) * sizeof(struct sec4_sg_entry); /* @@ -820,6 +823,7 @@ static int ahash_update_ctx(struct ahash_request *req) if (!edesc) { dev_err(jrdev, "could not allocate extended descriptor\n"); + dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE); return -ENOMEM; } @@ -836,9 +840,10 @@ static int ahash_update_ctx(struct ahash_request *req) buf, state->buf_dma, *buflen, last_buflen); - if (src_nents) { - src_map_to_sec4_sg(jrdev, req->src, src_nents, - edesc->sec4_sg + sec4_sg_src_index); + if (mapped_nents) { + sg_to_sec4_sg_last(req->src, mapped_nents, + edesc->sec4_sg + sec4_sg_src_index, + 0); if (*next_buflen) scatterwalk_map_and_copy(next_buf, req->src, to_hash - *buflen, @@ -1001,7 +1006,7 @@ static int ahash_finup_ctx(struct ahash_request *req) u32 *sh_desc = ctx->sh_desc_finup, *desc; dma_addr_t ptr = ctx->sh_desc_finup_dma; int sec4_sg_bytes, sec4_sg_src_index; - int src_nents; + int src_nents, mapped_nents; int digestsize = crypto_ahash_digestsize(ahash); struct ahash_edesc *edesc; int ret = 0; @@ -1012,14 +1017,27 @@ static int ahash_finup_ctx(struct ahash_request *req) dev_err(jrdev, "Invalid number of src SG.\n"); return src_nents; } + + if (src_nents) { + mapped_nents = dma_map_sg(jrdev, req->src, src_nents, + DMA_TO_DEVICE); + if (!mapped_nents) { + dev_err(jrdev, "unable to DMA map source\n"); + return -ENOMEM; + } + } else { + mapped_nents = 0; + } + sec4_sg_src_index = 1 + (buflen ? 1 : 0); - sec4_sg_bytes = (sec4_sg_src_index + src_nents) * + sec4_sg_bytes = (sec4_sg_src_index + mapped_nents) * sizeof(struct sec4_sg_entry); /* allocate space for base edesc and hw desc commands, link tables */ edesc = kzalloc(sizeof(*edesc) + sec4_sg_bytes, GFP_DMA | flags); if (!edesc) { dev_err(jrdev, "could not allocate extended descriptor\n"); + dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE); return -ENOMEM; } @@ -1039,8 +1057,8 @@ static int ahash_finup_ctx(struct ahash_request *req) buf, state->buf_dma, buflen, last_buflen); - src_map_to_sec4_sg(jrdev, req->src, src_nents, edesc->sec4_sg + - sec4_sg_src_index); + sg_to_sec4_sg_last(req->src, mapped_nents, + edesc->sec4_sg + sec4_sg_src_index, 0); edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg, sec4_sg_bytes, DMA_TO_DEVICE); @@ -1088,7 +1106,7 @@ static int ahash_digest(struct ahash_request *req) u32 *sh_desc = ctx->sh_desc_digest, *desc; dma_addr_t ptr = ctx->sh_desc_digest_dma; int digestsize = crypto_ahash_digestsize(ahash); - int src_nents, sec4_sg_bytes; + int src_nents, mapped_nents, sec4_sg_bytes; dma_addr_t src_dma; struct ahash_edesc *edesc; int ret = 0; @@ -1100,9 +1118,20 @@ static int ahash_digest(struct ahash_request *req) dev_err(jrdev, "Invalid number of src SG.\n"); return src_nents; } - dma_map_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE); - if (src_nents > 1) - sec4_sg_bytes = src_nents * sizeof(struct sec4_sg_entry); + + if (src_nents) { + mapped_nents = dma_map_sg(jrdev, req->src, src_nents, + DMA_TO_DEVICE); + if (!mapped_nents) { + dev_err(jrdev, "unable to map source for DMA\n"); + return -ENOMEM; + } + } else { + mapped_nents = 0; + } + + if (mapped_nents > 1) + sec4_sg_bytes = mapped_nents * sizeof(struct sec4_sg_entry); else sec4_sg_bytes = 0; @@ -1110,6 +1139,7 @@ static int ahash_digest(struct ahash_request *req) edesc = kzalloc(sizeof(*edesc) + sec4_sg_bytes, GFP_DMA | flags); if (!edesc) { dev_err(jrdev, "could not allocate extended descriptor\n"); + dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE); return -ENOMEM; } @@ -1121,7 +1151,7 @@ static int ahash_digest(struct ahash_request *req) init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER | HDR_REVERSE); if (src_nents > 1) { - sg_to_sec4_sg_last(req->src, src_nents, edesc->sec4_sg, 0); + sg_to_sec4_sg_last(req->src, mapped_nents, edesc->sec4_sg, 0); edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg, sec4_sg_bytes, DMA_TO_DEVICE); if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) { @@ -1244,7 +1274,7 @@ static int ahash_update_no_ctx(struct ahash_request *req) int *next_buflen = state->current_buf ? &state->buflen_0 : &state->buflen_1; int in_len = *buflen + req->nbytes, to_hash; - int sec4_sg_bytes, src_nents; + int sec4_sg_bytes, src_nents, mapped_nents; struct ahash_edesc *edesc; u32 *desc, *sh_desc = ctx->sh_desc_update_first; dma_addr_t ptr = ctx->sh_desc_update_first_dma; @@ -1261,7 +1291,19 @@ static int ahash_update_no_ctx(struct ahash_request *req) dev_err(jrdev, "Invalid number of src SG.\n"); return src_nents; } - sec4_sg_bytes = (1 + src_nents) * + + if (src_nents) { + mapped_nents = dma_map_sg(jrdev, req->src, src_nents, + DMA_TO_DEVICE); + if (!mapped_nents) { + dev_err(jrdev, "unable to DMA map source\n"); + return -ENOMEM; + } + } else { + mapped_nents = 0; + } + + sec4_sg_bytes = (1 + mapped_nents) * sizeof(struct sec4_sg_entry); /* @@ -1273,6 +1315,7 @@ static int ahash_update_no_ctx(struct ahash_request *req) if (!edesc) { dev_err(jrdev, "could not allocate extended descriptor\n"); + dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE); return -ENOMEM; } @@ -1282,8 +1325,9 @@ static int ahash_update_no_ctx(struct ahash_request *req) state->buf_dma = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg, buf, *buflen); - src_map_to_sec4_sg(jrdev, req->src, src_nents, - edesc->sec4_sg + 1); + sg_to_sec4_sg_last(req->src, mapped_nents, + edesc->sec4_sg + 1, 0); + if (*next_buflen) { scatterwalk_map_and_copy(next_buf, req->src, to_hash - *buflen, @@ -1363,7 +1407,7 @@ static int ahash_finup_no_ctx(struct ahash_request *req) state->buflen_1; u32 *sh_desc = ctx->sh_desc_digest, *desc; dma_addr_t ptr = ctx->sh_desc_digest_dma; - int sec4_sg_bytes, sec4_sg_src_index, src_nents; + int sec4_sg_bytes, sec4_sg_src_index, src_nents, mapped_nents; int digestsize = crypto_ahash_digestsize(ahash); struct ahash_edesc *edesc; int sh_len; @@ -1374,14 +1418,27 @@ static int ahash_finup_no_ctx(struct ahash_request *req) dev_err(jrdev, "Invalid number of src SG.\n"); return src_nents; } + + if (src_nents) { + mapped_nents = dma_map_sg(jrdev, req->src, src_nents, + DMA_TO_DEVICE); + if (!mapped_nents) { + dev_err(jrdev, "unable to DMA map source\n"); + return -ENOMEM; + } + } else { + mapped_nents = 0; + } + sec4_sg_src_index = 2; - sec4_sg_bytes = (sec4_sg_src_index + src_nents) * + sec4_sg_bytes = (sec4_sg_src_index + mapped_nents) * sizeof(struct sec4_sg_entry); /* allocate space for base edesc and hw desc commands, link tables */ edesc = kzalloc(sizeof(*edesc) + sec4_sg_bytes, GFP_DMA | flags); if (!edesc) { dev_err(jrdev, "could not allocate extended descriptor\n"); + dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE); return -ENOMEM; } @@ -1396,7 +1453,7 @@ static int ahash_finup_no_ctx(struct ahash_request *req) state->buf_dma, buflen, last_buflen); - src_map_to_sec4_sg(jrdev, req->src, src_nents, edesc->sec4_sg + 1); + sg_to_sec4_sg_last(req->src, mapped_nents, edesc->sec4_sg + 1, 0); edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg, sec4_sg_bytes, DMA_TO_DEVICE); @@ -1450,7 +1507,7 @@ static int ahash_update_first(struct ahash_request *req) int to_hash; u32 *sh_desc = ctx->sh_desc_update_first, *desc; dma_addr_t ptr = ctx->sh_desc_update_first_dma; - int sec4_sg_bytes, src_nents; + int sec4_sg_bytes, src_nents, mapped_nents; dma_addr_t src_dma; u32 options; struct ahash_edesc *edesc; @@ -1468,9 +1525,19 @@ static int ahash_update_first(struct ahash_request *req) dev_err(jrdev, "Invalid number of src SG.\n"); return src_nents; } - dma_map_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE); - if (src_nents > 1) - sec4_sg_bytes = src_nents * + + if (src_nents) { + mapped_nents = dma_map_sg(jrdev, req->src, src_nents, + DMA_TO_DEVICE); + if (!mapped_nents) { + dev_err(jrdev, "unable to map source for DMA\n"); + return -ENOMEM; + } + } else { + mapped_nents = 0; + } + if (mapped_nents > 1) + sec4_sg_bytes = mapped_nents * sizeof(struct sec4_sg_entry); else sec4_sg_bytes = 0; @@ -1484,6 +1551,7 @@ static int ahash_update_first(struct ahash_request *req) if (!edesc) { dev_err(jrdev, "could not allocate extended descriptor\n"); + dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE); return -ENOMEM; } @@ -1492,7 +1560,7 @@ static int ahash_update_first(struct ahash_request *req) edesc->dst_dma = 0; if (src_nents > 1) { - sg_to_sec4_sg_last(req->src, src_nents, + sg_to_sec4_sg_last(req->src, mapped_nents, edesc->sec4_sg, 0); edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg, -- cgit v1.2.3-58-ga151 From 5588d039b5ea35760ffc94a50ed3aa2027aec11d Mon Sep 17 00:00:00 2001 From: Russell King Date: Mon, 8 Aug 2016 18:05:08 +0100 Subject: crypto: caam - add ahash_edesc_alloc() for descriptor allocation Add a helper function to perform the descriptor allocation. Signed-off-by: Russell King Signed-off-by: Herbert Xu --- drivers/crypto/caam/caamhash.c | 60 +++++++++++++++++++++++------------------- 1 file changed, 33 insertions(+), 27 deletions(-) (limited to 'drivers') diff --git a/drivers/crypto/caam/caamhash.c b/drivers/crypto/caam/caamhash.c index de2224382d9c..ef332c723f0b 100644 --- a/drivers/crypto/caam/caamhash.c +++ b/drivers/crypto/caam/caamhash.c @@ -765,6 +765,25 @@ static void ahash_done_ctx_dst(struct device *jrdev, u32 *desc, u32 err, req->base.complete(&req->base, err); } +/* + * Allocate an enhanced descriptor, which contains the hardware descriptor + * and space for hardware scatter table containing sg_num entries. + */ +static struct ahash_edesc *ahash_edesc_alloc(struct caam_hash_ctx *ctx, + int sg_num, gfp_t flags) +{ + struct ahash_edesc *edesc; + unsigned int sg_size = sg_num * sizeof(struct sec4_sg_entry); + + edesc = kzalloc(sizeof(*edesc) + sg_size, GFP_DMA | flags); + if (!edesc) { + dev_err(ctx->jrdev, "could not allocate extended descriptor\n"); + return NULL; + } + + return edesc; +} + /* submit update job descriptor */ static int ahash_update_ctx(struct ahash_request *req) { @@ -818,11 +837,9 @@ static int ahash_update_ctx(struct ahash_request *req) * allocate space for base edesc and hw desc commands, * link tables */ - edesc = kzalloc(sizeof(*edesc) + sec4_sg_bytes, - GFP_DMA | flags); + edesc = ahash_edesc_alloc(ctx, sec4_sg_src_index + mapped_nents, + flags); if (!edesc) { - dev_err(jrdev, - "could not allocate extended descriptor\n"); dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE); return -ENOMEM; } @@ -931,11 +948,9 @@ static int ahash_final_ctx(struct ahash_request *req) sec4_sg_bytes = sec4_sg_src_index * sizeof(struct sec4_sg_entry); /* allocate space for base edesc and hw desc commands, link tables */ - edesc = kzalloc(sizeof(*edesc) + sec4_sg_bytes, GFP_DMA | flags); - if (!edesc) { - dev_err(jrdev, "could not allocate extended descriptor\n"); + edesc = ahash_edesc_alloc(ctx, sec4_sg_src_index, flags); + if (!edesc) return -ENOMEM; - } sh_len = desc_len(sh_desc); desc = edesc->hw_desc; @@ -1034,9 +1049,9 @@ static int ahash_finup_ctx(struct ahash_request *req) sizeof(struct sec4_sg_entry); /* allocate space for base edesc and hw desc commands, link tables */ - edesc = kzalloc(sizeof(*edesc) + sec4_sg_bytes, GFP_DMA | flags); + edesc = ahash_edesc_alloc(ctx, sec4_sg_src_index + mapped_nents, + flags); if (!edesc) { - dev_err(jrdev, "could not allocate extended descriptor\n"); dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE); return -ENOMEM; } @@ -1136,9 +1151,9 @@ static int ahash_digest(struct ahash_request *req) sec4_sg_bytes = 0; /* allocate space for base edesc and hw desc commands, link tables */ - edesc = kzalloc(sizeof(*edesc) + sec4_sg_bytes, GFP_DMA | flags); + edesc = ahash_edesc_alloc(ctx, mapped_nents > 1 ? mapped_nents : 0, + flags); if (!edesc) { - dev_err(jrdev, "could not allocate extended descriptor\n"); dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE); return -ENOMEM; } @@ -1212,13 +1227,10 @@ static int ahash_final_no_ctx(struct ahash_request *req) int sh_len; /* allocate space for base edesc and hw desc commands, link tables */ - edesc = kzalloc(sizeof(*edesc), GFP_DMA | flags); - if (!edesc) { - dev_err(jrdev, "could not allocate extended descriptor\n"); + edesc = ahash_edesc_alloc(ctx, 0, flags); + if (!edesc) return -ENOMEM; - } - edesc->sec4_sg_bytes = 0; sh_len = desc_len(sh_desc); desc = edesc->hw_desc; init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER | HDR_REVERSE); @@ -1310,11 +1322,8 @@ static int ahash_update_no_ctx(struct ahash_request *req) * allocate space for base edesc and hw desc commands, * link tables */ - edesc = kzalloc(sizeof(*edesc) + sec4_sg_bytes, - GFP_DMA | flags); + edesc = ahash_edesc_alloc(ctx, 1 + mapped_nents, flags); if (!edesc) { - dev_err(jrdev, - "could not allocate extended descriptor\n"); dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE); return -ENOMEM; } @@ -1435,9 +1444,8 @@ static int ahash_finup_no_ctx(struct ahash_request *req) sizeof(struct sec4_sg_entry); /* allocate space for base edesc and hw desc commands, link tables */ - edesc = kzalloc(sizeof(*edesc) + sec4_sg_bytes, GFP_DMA | flags); + edesc = ahash_edesc_alloc(ctx, sec4_sg_src_index + mapped_nents, flags); if (!edesc) { - dev_err(jrdev, "could not allocate extended descriptor\n"); dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE); return -ENOMEM; } @@ -1546,11 +1554,9 @@ static int ahash_update_first(struct ahash_request *req) * allocate space for base edesc and hw desc commands, * link tables */ - edesc = kzalloc(sizeof(*edesc) + sec4_sg_bytes, - GFP_DMA | flags); + edesc = ahash_edesc_alloc(ctx, mapped_nents > 1 ? + mapped_nents : 0, flags); if (!edesc) { - dev_err(jrdev, - "could not allocate extended descriptor\n"); dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE); return -ENOMEM; } -- cgit v1.2.3-58-ga151 From 30a43b4498be67f89d81e3eded307bffe554952d Mon Sep 17 00:00:00 2001 From: Russell King Date: Mon, 8 Aug 2016 18:05:13 +0100 Subject: crypto: caam - move job descriptor initialisation to ahash_edesc_alloc() Signed-off-by: Russell King Signed-off-by: Herbert Xu --- drivers/crypto/caam/caamhash.c | 84 +++++++++++++++++------------------------- 1 file changed, 34 insertions(+), 50 deletions(-) (limited to 'drivers') diff --git a/drivers/crypto/caam/caamhash.c b/drivers/crypto/caam/caamhash.c index ef332c723f0b..aa863783d454 100644 --- a/drivers/crypto/caam/caamhash.c +++ b/drivers/crypto/caam/caamhash.c @@ -770,7 +770,9 @@ static void ahash_done_ctx_dst(struct device *jrdev, u32 *desc, u32 err, * and space for hardware scatter table containing sg_num entries. */ static struct ahash_edesc *ahash_edesc_alloc(struct caam_hash_ctx *ctx, - int sg_num, gfp_t flags) + int sg_num, u32 *sh_desc, + dma_addr_t sh_desc_dma, + gfp_t flags) { struct ahash_edesc *edesc; unsigned int sg_size = sg_num * sizeof(struct sec4_sg_entry); @@ -781,6 +783,9 @@ static struct ahash_edesc *ahash_edesc_alloc(struct caam_hash_ctx *ctx, return NULL; } + init_job_desc_shared(edesc->hw_desc, sh_desc_dma, desc_len(sh_desc), + HDR_SHARE_DEFER | HDR_REVERSE); + return edesc; } @@ -799,12 +804,10 @@ static int ahash_update_ctx(struct ahash_request *req) int *next_buflen = state->current_buf ? &state->buflen_0 : &state->buflen_1, last_buflen; int in_len = *buflen + req->nbytes, to_hash; - u32 *sh_desc = ctx->sh_desc_update, *desc; - dma_addr_t ptr = ctx->sh_desc_update_dma; + u32 *desc; int src_nents, mapped_nents, sec4_sg_bytes, sec4_sg_src_index; struct ahash_edesc *edesc; int ret = 0; - int sh_len; last_buflen = *next_buflen; *next_buflen = in_len & (crypto_tfm_alg_blocksize(&ahash->base) - 1); @@ -838,7 +841,8 @@ static int ahash_update_ctx(struct ahash_request *req) * link tables */ edesc = ahash_edesc_alloc(ctx, sec4_sg_src_index + mapped_nents, - flags); + ctx->sh_desc_update, + ctx->sh_desc_update_dma, flags); if (!edesc) { dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE); return -ENOMEM; @@ -872,10 +876,7 @@ static int ahash_update_ctx(struct ahash_request *req) state->current_buf = !state->current_buf; - sh_len = desc_len(sh_desc); desc = edesc->hw_desc; - init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER | - HDR_REVERSE); edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg, sec4_sg_bytes, @@ -936,25 +937,23 @@ static int ahash_final_ctx(struct ahash_request *req) int buflen = state->current_buf ? state->buflen_1 : state->buflen_0; int last_buflen = state->current_buf ? state->buflen_0 : state->buflen_1; - u32 *sh_desc = ctx->sh_desc_fin, *desc; - dma_addr_t ptr = ctx->sh_desc_fin_dma; + u32 *desc; int sec4_sg_bytes, sec4_sg_src_index; int digestsize = crypto_ahash_digestsize(ahash); struct ahash_edesc *edesc; int ret = 0; - int sh_len; sec4_sg_src_index = 1 + (buflen ? 1 : 0); sec4_sg_bytes = sec4_sg_src_index * sizeof(struct sec4_sg_entry); /* allocate space for base edesc and hw desc commands, link tables */ - edesc = ahash_edesc_alloc(ctx, sec4_sg_src_index, flags); + edesc = ahash_edesc_alloc(ctx, sec4_sg_src_index, + ctx->sh_desc_fin, ctx->sh_desc_fin_dma, + flags); if (!edesc) return -ENOMEM; - sh_len = desc_len(sh_desc); desc = edesc->hw_desc; - init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER | HDR_REVERSE); edesc->sec4_sg_bytes = sec4_sg_bytes; edesc->src_nents = 0; @@ -1018,14 +1017,12 @@ static int ahash_finup_ctx(struct ahash_request *req) int buflen = state->current_buf ? state->buflen_1 : state->buflen_0; int last_buflen = state->current_buf ? state->buflen_0 : state->buflen_1; - u32 *sh_desc = ctx->sh_desc_finup, *desc; - dma_addr_t ptr = ctx->sh_desc_finup_dma; + u32 *desc; int sec4_sg_bytes, sec4_sg_src_index; int src_nents, mapped_nents; int digestsize = crypto_ahash_digestsize(ahash); struct ahash_edesc *edesc; int ret = 0; - int sh_len; src_nents = sg_nents_for_len(req->src, req->nbytes); if (src_nents < 0) { @@ -1050,15 +1047,14 @@ static int ahash_finup_ctx(struct ahash_request *req) /* allocate space for base edesc and hw desc commands, link tables */ edesc = ahash_edesc_alloc(ctx, sec4_sg_src_index + mapped_nents, + ctx->sh_desc_finup, ctx->sh_desc_finup_dma, flags); if (!edesc) { dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE); return -ENOMEM; } - sh_len = desc_len(sh_desc); desc = edesc->hw_desc; - init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER | HDR_REVERSE); edesc->src_nents = src_nents; edesc->sec4_sg_bytes = sec4_sg_bytes; @@ -1118,15 +1114,13 @@ static int ahash_digest(struct ahash_request *req) struct device *jrdev = ctx->jrdev; gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC; - u32 *sh_desc = ctx->sh_desc_digest, *desc; - dma_addr_t ptr = ctx->sh_desc_digest_dma; + u32 *desc; int digestsize = crypto_ahash_digestsize(ahash); int src_nents, mapped_nents, sec4_sg_bytes; dma_addr_t src_dma; struct ahash_edesc *edesc; int ret = 0; u32 options; - int sh_len; src_nents = sg_nents_for_len(req->src, req->nbytes); if (src_nents < 0) { @@ -1152,6 +1146,7 @@ static int ahash_digest(struct ahash_request *req) /* allocate space for base edesc and hw desc commands, link tables */ edesc = ahash_edesc_alloc(ctx, mapped_nents > 1 ? mapped_nents : 0, + ctx->sh_desc_digest, ctx->sh_desc_digest_dma, flags); if (!edesc) { dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE); @@ -1161,9 +1156,7 @@ static int ahash_digest(struct ahash_request *req) edesc->sec4_sg_bytes = sec4_sg_bytes; edesc->src_nents = src_nents; - sh_len = desc_len(sh_desc); desc = edesc->hw_desc; - init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER | HDR_REVERSE); if (src_nents > 1) { sg_to_sec4_sg_last(req->src, mapped_nents, edesc->sec4_sg, 0); @@ -1219,21 +1212,18 @@ static int ahash_final_no_ctx(struct ahash_request *req) CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC; u8 *buf = state->current_buf ? state->buf_1 : state->buf_0; int buflen = state->current_buf ? state->buflen_1 : state->buflen_0; - u32 *sh_desc = ctx->sh_desc_digest, *desc; - dma_addr_t ptr = ctx->sh_desc_digest_dma; + u32 *desc; int digestsize = crypto_ahash_digestsize(ahash); struct ahash_edesc *edesc; int ret = 0; - int sh_len; /* allocate space for base edesc and hw desc commands, link tables */ - edesc = ahash_edesc_alloc(ctx, 0, flags); + edesc = ahash_edesc_alloc(ctx, 0, ctx->sh_desc_digest, + ctx->sh_desc_digest_dma, flags); if (!edesc) return -ENOMEM; - sh_len = desc_len(sh_desc); desc = edesc->hw_desc; - init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER | HDR_REVERSE); state->buf_dma = dma_map_single(jrdev, buf, buflen, DMA_TO_DEVICE); if (dma_mapping_error(jrdev, state->buf_dma)) { @@ -1288,10 +1278,8 @@ static int ahash_update_no_ctx(struct ahash_request *req) int in_len = *buflen + req->nbytes, to_hash; int sec4_sg_bytes, src_nents, mapped_nents; struct ahash_edesc *edesc; - u32 *desc, *sh_desc = ctx->sh_desc_update_first; - dma_addr_t ptr = ctx->sh_desc_update_first_dma; + u32 *desc; int ret = 0; - int sh_len; *next_buflen = in_len & (crypto_tfm_alg_blocksize(&ahash->base) - 1); to_hash = in_len - *next_buflen; @@ -1322,7 +1310,10 @@ static int ahash_update_no_ctx(struct ahash_request *req) * allocate space for base edesc and hw desc commands, * link tables */ - edesc = ahash_edesc_alloc(ctx, 1 + mapped_nents, flags); + edesc = ahash_edesc_alloc(ctx, 1 + mapped_nents, + ctx->sh_desc_update_first, + ctx->sh_desc_update_first_dma, + flags); if (!edesc) { dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE); return -ENOMEM; @@ -1345,10 +1336,7 @@ static int ahash_update_no_ctx(struct ahash_request *req) state->current_buf = !state->current_buf; - sh_len = desc_len(sh_desc); desc = edesc->hw_desc; - init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER | - HDR_REVERSE); edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg, sec4_sg_bytes, @@ -1414,12 +1402,10 @@ static int ahash_finup_no_ctx(struct ahash_request *req) int buflen = state->current_buf ? state->buflen_1 : state->buflen_0; int last_buflen = state->current_buf ? state->buflen_0 : state->buflen_1; - u32 *sh_desc = ctx->sh_desc_digest, *desc; - dma_addr_t ptr = ctx->sh_desc_digest_dma; + u32 *desc; int sec4_sg_bytes, sec4_sg_src_index, src_nents, mapped_nents; int digestsize = crypto_ahash_digestsize(ahash); struct ahash_edesc *edesc; - int sh_len; int ret = 0; src_nents = sg_nents_for_len(req->src, req->nbytes); @@ -1444,15 +1430,15 @@ static int ahash_finup_no_ctx(struct ahash_request *req) sizeof(struct sec4_sg_entry); /* allocate space for base edesc and hw desc commands, link tables */ - edesc = ahash_edesc_alloc(ctx, sec4_sg_src_index + mapped_nents, flags); + edesc = ahash_edesc_alloc(ctx, sec4_sg_src_index + mapped_nents, + ctx->sh_desc_digest, ctx->sh_desc_digest_dma, + flags); if (!edesc) { dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE); return -ENOMEM; } - sh_len = desc_len(sh_desc); desc = edesc->hw_desc; - init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER | HDR_REVERSE); edesc->src_nents = src_nents; edesc->sec4_sg_bytes = sec4_sg_bytes; @@ -1513,14 +1499,12 @@ static int ahash_update_first(struct ahash_request *req) int *next_buflen = state->current_buf ? &state->buflen_1 : &state->buflen_0; int to_hash; - u32 *sh_desc = ctx->sh_desc_update_first, *desc; - dma_addr_t ptr = ctx->sh_desc_update_first_dma; + u32 *desc; int sec4_sg_bytes, src_nents, mapped_nents; dma_addr_t src_dma; u32 options; struct ahash_edesc *edesc; int ret = 0; - int sh_len; *next_buflen = req->nbytes & (crypto_tfm_alg_blocksize(&ahash->base) - 1); @@ -1555,7 +1539,10 @@ static int ahash_update_first(struct ahash_request *req) * link tables */ edesc = ahash_edesc_alloc(ctx, mapped_nents > 1 ? - mapped_nents : 0, flags); + mapped_nents : 0, + ctx->sh_desc_update_first, + ctx->sh_desc_update_first_dma, + flags); if (!edesc) { dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE); return -ENOMEM; @@ -1588,10 +1575,7 @@ static int ahash_update_first(struct ahash_request *req) scatterwalk_map_and_copy(next_buf, req->src, to_hash, *next_buflen, 0); - sh_len = desc_len(sh_desc); desc = edesc->hw_desc; - init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER | - HDR_REVERSE); append_seq_in_ptr(desc, src_dma, to_hash, options); -- cgit v1.2.3-58-ga151 From 65cf164a4afab86b9d26c063bc4cef5432b52021 Mon Sep 17 00:00:00 2001 From: Russell King Date: Mon, 8 Aug 2016 18:05:19 +0100 Subject: crypto: caam - add ahash_edesc_add_src() Add a helper to map the source scatterlist into the descriptor. Signed-off-by: Russell King Signed-off-by: Herbert Xu --- drivers/crypto/caam/caamhash.c | 137 +++++++++++++++++------------------------ 1 file changed, 57 insertions(+), 80 deletions(-) (limited to 'drivers') diff --git a/drivers/crypto/caam/caamhash.c b/drivers/crypto/caam/caamhash.c index aa863783d454..9d7fc9ec0b7e 100644 --- a/drivers/crypto/caam/caamhash.c +++ b/drivers/crypto/caam/caamhash.c @@ -789,6 +789,41 @@ static struct ahash_edesc *ahash_edesc_alloc(struct caam_hash_ctx *ctx, return edesc; } +static int ahash_edesc_add_src(struct caam_hash_ctx *ctx, + struct ahash_edesc *edesc, + struct ahash_request *req, int nents, + unsigned int first_sg, + unsigned int first_bytes, size_t to_hash) +{ + dma_addr_t src_dma; + u32 options; + + if (nents > 1 || first_sg) { + struct sec4_sg_entry *sg = edesc->sec4_sg; + unsigned int sgsize = sizeof(*sg) * (first_sg + nents); + + sg_to_sec4_sg_last(req->src, nents, sg + first_sg, 0); + + src_dma = dma_map_single(ctx->jrdev, sg, sgsize, DMA_TO_DEVICE); + if (dma_mapping_error(ctx->jrdev, src_dma)) { + dev_err(ctx->jrdev, "unable to map S/G table\n"); + return -ENOMEM; + } + + edesc->sec4_sg_bytes = sgsize; + edesc->sec4_sg_dma = src_dma; + options = LDST_SGF; + } else { + src_dma = sg_dma_address(req->src); + options = 0; + } + + append_seq_in_ptr(edesc->hw_desc, src_dma, first_bytes + to_hash, + options); + + return 0; +} + /* submit update job descriptor */ static int ahash_update_ctx(struct ahash_request *req) { @@ -1018,7 +1053,7 @@ static int ahash_finup_ctx(struct ahash_request *req) int last_buflen = state->current_buf ? state->buflen_0 : state->buflen_1; u32 *desc; - int sec4_sg_bytes, sec4_sg_src_index; + int sec4_sg_src_index; int src_nents, mapped_nents; int digestsize = crypto_ahash_digestsize(ahash); struct ahash_edesc *edesc; @@ -1042,8 +1077,6 @@ static int ahash_finup_ctx(struct ahash_request *req) } sec4_sg_src_index = 1 + (buflen ? 1 : 0); - sec4_sg_bytes = (sec4_sg_src_index + mapped_nents) * - sizeof(struct sec4_sg_entry); /* allocate space for base edesc and hw desc commands, link tables */ edesc = ahash_edesc_alloc(ctx, sec4_sg_src_index + mapped_nents, @@ -1057,7 +1090,6 @@ static int ahash_finup_ctx(struct ahash_request *req) desc = edesc->hw_desc; edesc->src_nents = src_nents; - edesc->sec4_sg_bytes = sec4_sg_bytes; ret = ctx_map_to_sec4_sg(desc, jrdev, state, ctx->ctx_len, edesc->sec4_sg, DMA_TO_DEVICE); @@ -1068,19 +1100,11 @@ static int ahash_finup_ctx(struct ahash_request *req) buf, state->buf_dma, buflen, last_buflen); - sg_to_sec4_sg_last(req->src, mapped_nents, - edesc->sec4_sg + sec4_sg_src_index, 0); - - edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg, - sec4_sg_bytes, DMA_TO_DEVICE); - if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) { - dev_err(jrdev, "unable to map S/G table\n"); - ret = -ENOMEM; + ret = ahash_edesc_add_src(ctx, edesc, req, mapped_nents, + sec4_sg_src_index, ctx->ctx_len + buflen, + req->nbytes); + if (ret) goto err; - } - - append_seq_in_ptr(desc, edesc->sec4_sg_dma, ctx->ctx_len + - buflen + req->nbytes, LDST_SGF); edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result, digestsize); @@ -1116,11 +1140,9 @@ static int ahash_digest(struct ahash_request *req) CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC; u32 *desc; int digestsize = crypto_ahash_digestsize(ahash); - int src_nents, mapped_nents, sec4_sg_bytes; - dma_addr_t src_dma; + int src_nents, mapped_nents; struct ahash_edesc *edesc; int ret = 0; - u32 options; src_nents = sg_nents_for_len(req->src, req->nbytes); if (src_nents < 0) { @@ -1139,11 +1161,6 @@ static int ahash_digest(struct ahash_request *req) mapped_nents = 0; } - if (mapped_nents > 1) - sec4_sg_bytes = mapped_nents * sizeof(struct sec4_sg_entry); - else - sec4_sg_bytes = 0; - /* allocate space for base edesc and hw desc commands, link tables */ edesc = ahash_edesc_alloc(ctx, mapped_nents > 1 ? mapped_nents : 0, ctx->sh_desc_digest, ctx->sh_desc_digest_dma, @@ -1153,28 +1170,17 @@ static int ahash_digest(struct ahash_request *req) return -ENOMEM; } - edesc->sec4_sg_bytes = sec4_sg_bytes; edesc->src_nents = src_nents; - desc = edesc->hw_desc; - - if (src_nents > 1) { - sg_to_sec4_sg_last(req->src, mapped_nents, edesc->sec4_sg, 0); - edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg, - sec4_sg_bytes, DMA_TO_DEVICE); - if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) { - dev_err(jrdev, "unable to map S/G table\n"); - ahash_unmap(jrdev, edesc, req, digestsize); - kfree(edesc); - return -ENOMEM; - } - src_dma = edesc->sec4_sg_dma; - options = LDST_SGF; - } else { - src_dma = sg_dma_address(req->src); - options = 0; + ret = ahash_edesc_add_src(ctx, edesc, req, mapped_nents, 0, 0, + req->nbytes); + if (ret) { + ahash_unmap(jrdev, edesc, req, digestsize); + kfree(edesc); + return ret; } - append_seq_in_ptr(desc, src_dma, req->nbytes, options); + + desc = edesc->hw_desc; edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result, digestsize); @@ -1447,20 +1453,15 @@ static int ahash_finup_no_ctx(struct ahash_request *req) state->buf_dma, buflen, last_buflen); - sg_to_sec4_sg_last(req->src, mapped_nents, edesc->sec4_sg + 1, 0); - - edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg, - sec4_sg_bytes, DMA_TO_DEVICE); - if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) { + ret = ahash_edesc_add_src(ctx, edesc, req, mapped_nents, 1, buflen, + req->nbytes); + if (ret) { dev_err(jrdev, "unable to map S/G table\n"); ahash_unmap(jrdev, edesc, req, digestsize); kfree(edesc); return -ENOMEM; } - append_seq_in_ptr(desc, edesc->sec4_sg_dma, buflen + - req->nbytes, LDST_SGF); - edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result, digestsize); if (dma_mapping_error(jrdev, edesc->dst_dma)) { @@ -1500,9 +1501,7 @@ static int ahash_update_first(struct ahash_request *req) &state->buflen_1 : &state->buflen_0; int to_hash; u32 *desc; - int sec4_sg_bytes, src_nents, mapped_nents; - dma_addr_t src_dma; - u32 options; + int src_nents, mapped_nents; struct ahash_edesc *edesc; int ret = 0; @@ -1528,11 +1527,6 @@ static int ahash_update_first(struct ahash_request *req) } else { mapped_nents = 0; } - if (mapped_nents > 1) - sec4_sg_bytes = mapped_nents * - sizeof(struct sec4_sg_entry); - else - sec4_sg_bytes = 0; /* * allocate space for base edesc and hw desc commands, @@ -1549,27 +1543,12 @@ static int ahash_update_first(struct ahash_request *req) } edesc->src_nents = src_nents; - edesc->sec4_sg_bytes = sec4_sg_bytes; edesc->dst_dma = 0; - if (src_nents > 1) { - sg_to_sec4_sg_last(req->src, mapped_nents, - edesc->sec4_sg, 0); - edesc->sec4_sg_dma = dma_map_single(jrdev, - edesc->sec4_sg, - sec4_sg_bytes, - DMA_TO_DEVICE); - if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) { - dev_err(jrdev, "unable to map S/G table\n"); - ret = -ENOMEM; - goto err; - } - src_dma = edesc->sec4_sg_dma; - options = LDST_SGF; - } else { - src_dma = sg_dma_address(req->src); - options = 0; - } + ret = ahash_edesc_add_src(ctx, edesc, req, mapped_nents, 0, 0, + to_hash); + if (ret) + goto err; if (*next_buflen) scatterwalk_map_and_copy(next_buf, req->src, to_hash, @@ -1577,8 +1556,6 @@ static int ahash_update_first(struct ahash_request *req) desc = edesc->hw_desc; - append_seq_in_ptr(desc, src_dma, to_hash, options); - ret = map_seq_out_ptr_ctx(desc, jrdev, state, ctx->ctx_len); if (ret) goto err; -- cgit v1.2.3-58-ga151 From 66d2e2028091a074aa1290d2eeda5ddb1a6c329c Mon Sep 17 00:00:00 2001 From: Russell King Date: Mon, 8 Aug 2016 18:05:24 +0100 Subject: crypto: caam - get rid of tasklet Threaded interrupts can perform the function of the tasklet, and much more safely too - without races when trying to take the tasklet and interrupt down on device removal. With the old code, there is a window where we call tasklet_kill(). If the interrupt handler happens to be running on a different CPU, and subsequently calls tasklet_schedule(), the tasklet will be re-scheduled for execution. Switching to a hardirq/threadirq combination implementation avoids this, and it also means generic code deals with the teardown sequencing of the threaded and non-threaded parts. Signed-off-by: Russell King Signed-off-by: Herbert Xu --- drivers/crypto/caam/intern.h | 1 - drivers/crypto/caam/jr.c | 25 +++++++++---------------- 2 files changed, 9 insertions(+), 17 deletions(-) (limited to 'drivers') diff --git a/drivers/crypto/caam/intern.h b/drivers/crypto/caam/intern.h index e2bcacc1a921..5d4c05074a5c 100644 --- a/drivers/crypto/caam/intern.h +++ b/drivers/crypto/caam/intern.h @@ -41,7 +41,6 @@ struct caam_drv_private_jr { struct device *dev; int ridx; struct caam_job_ring __iomem *rregs; /* JobR's register space */ - struct tasklet_struct irqtask; int irq; /* One per queue */ /* Number of scatterlist crypt transforms active on the JobR */ diff --git a/drivers/crypto/caam/jr.c b/drivers/crypto/caam/jr.c index a81f551ac222..320228875e9a 100644 --- a/drivers/crypto/caam/jr.c +++ b/drivers/crypto/caam/jr.c @@ -73,8 +73,6 @@ static int caam_jr_shutdown(struct device *dev) ret = caam_reset_hw_jr(dev); - tasklet_kill(&jrp->irqtask); - /* Release interrupt */ free_irq(jrp->irq, dev); @@ -130,7 +128,7 @@ static irqreturn_t caam_jr_interrupt(int irq, void *st_dev) /* * Check the output ring for ready responses, kick - * tasklet if jobs done. + * the threaded irq if jobs done. */ irqstate = rd_reg32(&jrp->rregs->jrintstatus); if (!irqstate) @@ -152,18 +150,13 @@ static irqreturn_t caam_jr_interrupt(int irq, void *st_dev) /* Have valid interrupt at this point, just ACK and trigger */ wr_reg32(&jrp->rregs->jrintstatus, irqstate); - preempt_disable(); - tasklet_schedule(&jrp->irqtask); - preempt_enable(); - - return IRQ_HANDLED; + return IRQ_WAKE_THREAD; } -/* Deferred service handler, run as interrupt-fired tasklet */ -static void caam_jr_dequeue(unsigned long devarg) +static irqreturn_t caam_jr_threadirq(int irq, void *st_dev) { int hw_idx, sw_idx, i, head, tail; - struct device *dev = (struct device *)devarg; + struct device *dev = st_dev; struct caam_drv_private_jr *jrp = dev_get_drvdata(dev); void (*usercall)(struct device *dev, u32 *desc, u32 status, void *arg); u32 *userdesc, userstatus; @@ -237,6 +230,8 @@ static void caam_jr_dequeue(unsigned long devarg) /* reenable / unmask IRQs */ clrsetbits_32(&jrp->rregs->rconfig_lo, JRCFG_IMSK, 0); + + return IRQ_HANDLED; } /** @@ -394,11 +389,10 @@ static int caam_jr_init(struct device *dev) jrp = dev_get_drvdata(dev); - tasklet_init(&jrp->irqtask, caam_jr_dequeue, (unsigned long)dev); - /* Connect job ring interrupt handler. */ - error = request_irq(jrp->irq, caam_jr_interrupt, IRQF_SHARED, - dev_name(dev), dev); + error = request_threaded_irq(jrp->irq, caam_jr_interrupt, + caam_jr_threadirq, IRQF_SHARED, + dev_name(dev), dev); if (error) { dev_err(dev, "can't connect JobR %d interrupt (%d)\n", jrp->ridx, jrp->irq); @@ -460,7 +454,6 @@ out_free_inpring: out_free_irq: free_irq(jrp->irq, dev); out_kill_deq: - tasklet_kill(&jrp->irqtask); return error; } -- cgit v1.2.3-58-ga151 From bdc67da7947d4a8d8cb38939d88d315536bb4dfd Mon Sep 17 00:00:00 2001 From: Russell King Date: Tue, 9 Aug 2016 08:30:10 +0100 Subject: crypto: caam - avoid kernel warnings on probe failure While debugging setkey issues, the following warnings were found while trying to reinsert the caam module. Fix this by avoiding the duplicated cleanup in the probe path after caam_remove(), which has already cleaned up the resources. ------------[ cut here ]------------ WARNING: CPU: 0 PID: 2346 at /home/rmk/git/linux-rmk/mm/vmalloc.c:1490 __vunmap+0xcc/0xf4 Trying to vfree() nonexistent vm area (f2400000) Modules linked in: caam(+) cbc rfcomm bnep bluetooth nfsd em28xx_rc si2157 si2168 em28xx_dvb uvcvideo snd_soc_imx_sgtl5000 em28xx snd_soc_imx_spdif tveeprom snd_soc_fsl_asoc_card snd_soc_imx_audmux snd_soc_sgtl5000 imx_sdma imx2_wdt coda v4l2_mem2mem videobuf2_dma_contig snd_soc_fsl_ssi rc_cec snd_soc_fsl_spdif imx_pcm_dma videobuf2_vmalloc videobuf2_memops imx_thermal dw_hdmi_ahb_audio dw_hdmi_cec etnaviv fuse rc_pinnacle_pctv_hd [last unloaded: caam] CPU: 0 PID: 2346 Comm: modprobe Tainted: G W 4.8.0-rc1+ #2014 Hardware name: Freescale i.MX6 Quad/DualLite (Device Tree) Backtrace: [] (dump_backtrace) from [] (show_stack+0x18/0x1c) [] (show_stack) from [] (dump_stack+0xa4/0xdc) [] (dump_stack) from [] (__warn+0xdc/0x108) [] (__warn) from [] (warn_slowpath_fmt+0x40/0x48) [] (warn_slowpath_fmt) from [] (__vunmap+0xcc/0xf4) [] (__vunmap) from [] (vunmap+0x4c/0x54) [] (vunmap) from [] (__iounmap+0x2c/0x30) [] (__iounmap) from [] (iounmap+0x1c/0x20) [] (iounmap) from [] (caam_probe+0x3dc/0x1498 [caam]) [] (caam_probe [caam]) from [] (platform_drv_probe+0x58/0xb8) [] (platform_drv_probe) from [] (driver_probe_device+0x1fc/0x2b8) [] (driver_probe_device) from [] (__driver_attach+0xbc/0xc0) r10:00000000 r8:bf24b000 r7:00000000 r6:ef215844 r5:bf2490c4 r4:ef215810 [] (__driver_attach) from [] (bus_for_each_dev+0x5c/0x90) [] (bus_for_each_dev) from [] (driver_attach+0x24/0x28) [] (driver_attach) from [] (bus_add_driver+0xf4/0x200) [] (bus_add_driver) from [] (driver_register+0x80/0xfc) [] (driver_register) from [] (__platform_driver_register+0x48/0x4c) [] (__platform_driver_register) from [] (caam_driver_init+0x18/0x24 [caam]) [] (caam_driver_init [caam]) from [] (do_one_initcall+0x44/0x178) [] (do_one_initcall) from [] (do_init_module+0x68/0x1d8) [] (do_init_module) from [] (load_module+0x1974/0x20b0) [] (load_module) from [] (SyS_finit_module+0x94/0xa0) [] (SyS_finit_module) from [] (ret_fast_syscall+0x0/0x1c) ---[ end trace 34e3370d88bb1786 ]--- ------------[ cut here ]------------ WARNING: CPU: 0 PID: 2346 at /home/rmk/git/linux-rmk/drivers/clk/clk.c:594 clk_core_disable+0xe4/0x26c Modules linked in: caam(+) cbc rfcomm bnep bluetooth nfsd em28xx_rc si2157 si2168 em28xx_dvb uvcvideo snd_soc_imx_sgtl5000 em28xx snd_soc_imx_spdif tveeprom snd_soc_fsl_asoc_card snd_soc_imx_audmux snd_soc_sgtl5000 imx_sdma imx2_wdt coda v4l2_mem2mem videobuf2_dma_contig snd_soc_fsl_ssi rc_cec snd_soc_fsl_spdif imx_pcm_dma videobuf2_vmalloc videobuf2_memops imx_thermal dw_hdmi_ahb_audio dw_hdmi_cec etnaviv fuse rc_pinnacle_pctv_hd [last unloaded: caam] CPU: 0 PID: 2346 Comm: modprobe Tainted: G W 4.8.0-rc1+ #2014 Hardware name: Freescale i.MX6 Quad/DualLite (Device Tree) Backtrace: [] (dump_backtrace) from [] (show_stack+0x18/0x1c) [] (show_stack) from [] (dump_stack+0xa4/0xdc) [] (dump_stack) from [] (__warn+0xdc/0x108) [] (__warn) from [] (warn_slowpath_null+0x28/0x30) [] (warn_slowpath_null) from [] (clk_core_disable+0xe4/0x26c) [] (clk_core_disable) from [] (clk_core_disable_lock+0x20/0x2c) [] (clk_core_disable_lock) from [] (clk_disable+0x24/0x28) [] (clk_disable) from [] (caam_probe+0x3fc/0x1498 [caam]) [] (caam_probe [caam]) from [] (platform_drv_probe+0x58/0xb8) [] (platform_drv_probe) from [] (driver_probe_device+0x1fc/0x2b8) [] (driver_probe_device) from [] (__driver_attach+0xbc/0xc0) r10:00000000 r8:bf24b000 r7:00000000 r6:ef215844 r5:bf2490c4 r4:ef215810 [] (__driver_attach) from [] (bus_for_each_dev+0x5c/0x90) [] (bus_for_each_dev) from [] (driver_attach+0x24/0x28) [] (driver_attach) from [] (bus_add_driver+0xf4/0x200) [] (bus_add_driver) from [] (driver_register+0x80/0xfc) [] (driver_register) from [] (__platform_driver_register+0x48/0x4c) [] (__platform_driver_register) from [] (caam_driver_init+0x18/0x24 [caam]) [] (caam_driver_init [caam]) from [] (do_one_initcall+0x44/0x178) [] (do_one_initcall) from [] (do_init_module+0x68/0x1d8) [] (do_init_module) from [] (load_module+0x1974/0x20b0) [] (load_module) from [] (SyS_finit_module+0x94/0xa0) [] (SyS_finit_module) from [] (ret_fast_syscall+0x0/0x1c) ---[ end trace 34e3370d88bb1787 ]--- ------------[ cut here ]------------ WARNING: CPU: 0 PID: 2346 at /home/rmk/git/linux-rmk/drivers/clk/clk.c:476 clk_core_unprepare+0x204/0x388 Modules linked in: caam(+) cbc rfcomm bnep bluetooth nfsd em28xx_rc si2157 si2168 em28xx_dvb uvcvideo snd_soc_imx_sgtl5000 em28xx snd_soc_imx_spdif tveeprom snd_soc_fsl_asoc_card snd_soc_imx_audmux snd_soc_sgtl5000 imx_sdma imx2_wdt coda v4l2_mem2mem videobuf2_dma_contig snd_soc_fsl_ssi rc_cec snd_soc_fsl_spdif imx_pcm_dma videobuf2_vmalloc videobuf2_memops imx_thermal dw_hdmi_ahb_audio dw_hdmi_cec etnaviv fuse rc_pinnacle_pctv_hd [last unloaded: caam] CPU: 0 PID: 2346 Comm: modprobe Tainted: G W 4.8.0-rc1+ #2014 Hardware name: Freescale i.MX6 Quad/DualLite (Device Tree) Backtrace: [] (dump_backtrace) from [] (show_stack+0x18/0x1c) [] (show_stack) from [] (dump_stack+0xa4/0xdc) [] (dump_stack) from [] (__warn+0xdc/0x108) [] (__warn) from [] (warn_slowpath_null+0x28/0x30) [] (warn_slowpath_null) from [] (clk_core_unprepare+0x204/0x388) [] (clk_core_unprepare) from [] (clk_unprepare+0x2c/0x34) [] (clk_unprepare) from [] (caam_probe+0x404/0x1498 [caam]) [] (caam_probe [caam]) from [] (platform_drv_probe+0x58/0xb8) [] (platform_drv_probe) from [] (driver_probe_device+0x1fc/0x2b8) [] (driver_probe_device) from [] (__driver_attach+0xbc/0xc0) r10:00000000 r8:bf24b000 r7:00000000 r6:ef215844 r5:bf2490c4 r4:ef215810 [] (__driver_attach) from [] (bus_for_each_dev+0x5c/0x90) [] (bus_for_each_dev) from [] (driver_attach+0x24/0x28) [] (driver_attach) from [] (bus_add_driver+0xf4/0x200) [] (bus_add_driver) from [] (driver_register+0x80/0xfc) [] (driver_register) from [] (__platform_driver_register+0x48/0x4c) [] (__platform_driver_register) from [] (caam_driver_init+0x18/0x24 [caam]) [] (caam_driver_init [caam]) from [] (do_one_initcall+0x44/0x178) [] (do_one_initcall) from [] (do_init_module+0x68/0x1d8) [] (do_init_module) from [] (load_module+0x1974/0x20b0) [] (load_module) from [] (SyS_finit_module+0x94/0xa0) [] (SyS_finit_module) from [] (ret_fast_syscall+0x0/0x1c) ---[ end trace 34e3370d88bb1788 ]--- Signed-off-by: Russell King Signed-off-by: Herbert Xu --- drivers/crypto/caam/ctrl.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'drivers') diff --git a/drivers/crypto/caam/ctrl.c b/drivers/crypto/caam/ctrl.c index 0ec112ee5204..f4c044f5bcb2 100644 --- a/drivers/crypto/caam/ctrl.c +++ b/drivers/crypto/caam/ctrl.c @@ -826,6 +826,8 @@ static int caam_probe(struct platform_device *pdev) caam_remove: caam_remove(pdev); + return ret; + iounmap_ctrl: iounmap(ctrl); disable_caam_emi_slow: -- cgit v1.2.3-58-ga151 From 36225b9170ea7b25371063364721ed96b2a58f97 Mon Sep 17 00:00:00 2001 From: Thomas Petazzoni Date: Tue, 9 Aug 2016 11:03:14 +0200 Subject: crypto: marvell - be explicit about destination in mv_cesa_dma_add_op() The mv_cesa_dma_add_op() function builds a mv_cesa_tdma_desc structure to copy the operation description to the SRAM, but doesn't explicitly initialize the destination of the copy. It works fine because the operatin description must be copied at the beginning of the SRAM, and the mv_cesa_tdma_desc structure is initialized to zero when allocated. However, it is somewhat confusing to not have a destination defined. Signed-off-by: Thomas Petazzoni Signed-off-by: Herbert Xu --- drivers/crypto/marvell/tdma.c | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers') diff --git a/drivers/crypto/marvell/tdma.c b/drivers/crypto/marvell/tdma.c index 86a065bcc187..9fd7a5fbaa1b 100644 --- a/drivers/crypto/marvell/tdma.c +++ b/drivers/crypto/marvell/tdma.c @@ -261,6 +261,7 @@ struct mv_cesa_op_ctx *mv_cesa_dma_add_op(struct mv_cesa_tdma_chain *chain, tdma->op = op; tdma->byte_cnt = cpu_to_le32(size | BIT(31)); tdma->src = cpu_to_le32(dma_handle); + tdma->dst = CESA_SA_CFG_SRAM_OFFSET; tdma->flags = CESA_TDMA_DST_IN_SRAM | CESA_TDMA_OP; return op; -- cgit v1.2.3-58-ga151 From 2a8a78573b346b1da5f1b544985d65d1e08a6148 Mon Sep 17 00:00:00 2001 From: Thomas Petazzoni Date: Tue, 9 Aug 2016 11:03:15 +0200 Subject: crypto: marvell - remove unused parameter in mv_cesa_ahash_dma_add_cache() The dma_iter parameter of mv_cesa_ahash_dma_add_cache() is never used, so get rid of it. Signed-off-by: Thomas Petazzoni Signed-off-by: Herbert Xu --- drivers/crypto/marvell/hash.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/crypto/marvell/hash.c b/drivers/crypto/marvell/hash.c index 82e0f4e6eb1c..0d7f5f9ace14 100644 --- a/drivers/crypto/marvell/hash.c +++ b/drivers/crypto/marvell/hash.c @@ -455,7 +455,6 @@ mv_cesa_dma_add_frag(struct mv_cesa_tdma_chain *chain, static int mv_cesa_ahash_dma_add_cache(struct mv_cesa_tdma_chain *chain, - struct mv_cesa_ahash_dma_iter *dma_iter, struct mv_cesa_ahash_req *creq, gfp_t flags) { @@ -586,7 +585,7 @@ static int mv_cesa_ahash_dma_req_init(struct ahash_request *req) * Add the cache (left-over data from a previous block) first. * This will never overflow the SRAM size. */ - ret = mv_cesa_ahash_dma_add_cache(&basereq->chain, &iter, creq, flags); + ret = mv_cesa_ahash_dma_add_cache(&basereq->chain, creq, flags); if (ret) goto err_free_tdma; -- cgit v1.2.3-58-ga151 From 3e5c66c9c31afecf1d6f7ef4ffc50ac3c1ff3c7c Mon Sep 17 00:00:00 2001 From: Thomas Petazzoni Date: Tue, 9 Aug 2016 11:03:16 +0200 Subject: crypto: marvell - turn mv_cesa_ahash_init() into a function returning void The mv_cesa_ahash_init() function always returns 0, and the return value is anyway never checked. Turn it into a function returning void. Signed-off-by: Thomas Petazzoni Signed-off-by: Herbert Xu --- drivers/crypto/marvell/hash.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) (limited to 'drivers') diff --git a/drivers/crypto/marvell/hash.c b/drivers/crypto/marvell/hash.c index 0d7f5f9ace14..729166435a8d 100644 --- a/drivers/crypto/marvell/hash.c +++ b/drivers/crypto/marvell/hash.c @@ -374,7 +374,7 @@ static const struct mv_cesa_req_ops mv_cesa_ahash_req_ops = { .complete = mv_cesa_ahash_complete, }; -static int mv_cesa_ahash_init(struct ahash_request *req, +static void mv_cesa_ahash_init(struct ahash_request *req, struct mv_cesa_op_ctx *tmpl, bool algo_le) { struct mv_cesa_ahash_req *creq = ahash_request_ctx(req); @@ -390,8 +390,6 @@ static int mv_cesa_ahash_init(struct ahash_request *req, creq->op_tmpl = *tmpl; creq->len = 0; creq->algo_le = algo_le; - - return 0; } static inline int mv_cesa_ahash_cra_init(struct crypto_tfm *tfm) -- cgit v1.2.3-58-ga151 From 6dc156f4536b9f0abb764cb34e652730dcc4883b Mon Sep 17 00:00:00 2001 From: Thomas Petazzoni Date: Tue, 9 Aug 2016 11:03:17 +0200 Subject: crypto: marvell - make mv_cesa_ahash_cache_req() return bool The mv_cesa_ahash_cache_req() function always returns 0, which makes its return value pretty much useless. However, in addition to returning a useless value, it also returns a boolean in a variable passed by reference to indicate if the request was already cached. So, this commit changes mv_cesa_ahash_cache_req() to return this boolean. It consequently simplifies the only call site of mv_cesa_ahash_cache_req(), where the "ret" variable is no longer needed. Signed-off-by: Thomas Petazzoni Signed-off-by: Herbert Xu --- drivers/crypto/marvell/hash.c | 20 +++++++++----------- 1 file changed, 9 insertions(+), 11 deletions(-) (limited to 'drivers') diff --git a/drivers/crypto/marvell/hash.c b/drivers/crypto/marvell/hash.c index 729166435a8d..cf8063d8c488 100644 --- a/drivers/crypto/marvell/hash.c +++ b/drivers/crypto/marvell/hash.c @@ -403,15 +403,16 @@ static inline int mv_cesa_ahash_cra_init(struct crypto_tfm *tfm) return 0; } -static int mv_cesa_ahash_cache_req(struct ahash_request *req, bool *cached) +static bool mv_cesa_ahash_cache_req(struct ahash_request *req) { struct mv_cesa_ahash_req *creq = ahash_request_ctx(req); + bool cached = false; if (creq->cache_ptr + req->nbytes < 64 && !creq->last_req) { - *cached = true; + cached = true; if (!req->nbytes) - return 0; + return cached; sg_pcopy_to_buffer(req->src, creq->src_nents, creq->cache + creq->cache_ptr, @@ -420,7 +421,7 @@ static int mv_cesa_ahash_cache_req(struct ahash_request *req, bool *cached) creq->cache_ptr += req->nbytes; } - return 0; + return cached; } static struct mv_cesa_op_ctx * @@ -665,7 +666,6 @@ err: static int mv_cesa_ahash_req_init(struct ahash_request *req, bool *cached) { struct mv_cesa_ahash_req *creq = ahash_request_ctx(req); - int ret; creq->src_nents = sg_nents_for_len(req->src, req->nbytes); if (creq->src_nents < 0) { @@ -673,17 +673,15 @@ static int mv_cesa_ahash_req_init(struct ahash_request *req, bool *cached) return creq->src_nents; } - ret = mv_cesa_ahash_cache_req(req, cached); - if (ret) - return ret; + *cached = mv_cesa_ahash_cache_req(req); if (*cached) return 0; if (cesa_dev->caps->has_tdma) - ret = mv_cesa_ahash_dma_req_init(req); - - return ret; + return mv_cesa_ahash_dma_req_init(req); + else + return 0; } static int mv_cesa_ahash_queue_req(struct ahash_request *req) -- cgit v1.2.3-58-ga151 From 09951d83fc58a6f772de09c08e370f6d9970dbb6 Mon Sep 17 00:00:00 2001 From: Romain Perier Date: Tue, 9 Aug 2016 11:03:18 +0200 Subject: crypto: marvell - Update transformation context for each dequeued req So far, sub part of mv_cesa_int was responsible of dequeuing complete requests, then call the 'cleanup' operation on these reqs and call the crypto api callback 'complete'. The problem is that the transformation context 'ctx' is retrieved only once before the while loop. Which means that the wrong 'cleanup' operation might be called on the wrong type of cesa requests, it can lead to memory corruptions with this message: marvell-cesa f1090000.crypto: dma_pool_free cesa_padding, 5a5a5a5a/5a5a5a5a (bad dma) This commit fixes the issue, by updating the transformation context for each dequeued cesa request. Fixes: commit 85030c5168f1 ("crypto: marvell - Add support for chai...") Signed-off-by: Romain Perier Signed-off-by: Herbert Xu --- drivers/crypto/marvell/cesa.c | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers') diff --git a/drivers/crypto/marvell/cesa.c b/drivers/crypto/marvell/cesa.c index d64af8625d7e..37dadb2a4feb 100644 --- a/drivers/crypto/marvell/cesa.c +++ b/drivers/crypto/marvell/cesa.c @@ -166,6 +166,7 @@ static irqreturn_t mv_cesa_int(int irq, void *priv) if (!req) break; + ctx = crypto_tfm_ctx(req->tfm); mv_cesa_complete_req(ctx, req, 0); } } -- cgit v1.2.3-58-ga151 From 57cfda1ac74e58767f6305bd1ea3449177425460 Mon Sep 17 00:00:00 2001 From: Romain Perier Date: Tue, 9 Aug 2016 11:03:19 +0200 Subject: crypto: marvell - Don't overwrite default creq->state during initialization Currently, in mv_cesa_{md5,sha1,sha256}_init creq->state is initialized before the call to mv_cesa_ahash_init. This is wrong because this function fills creq with zero by using memset, so its 'state' that contains the default DIGEST is overwritten. This commit fixes the issue by initializing creq->state just after the call to mv_cesa_ahash_init. Fixes: commit b0ef51067cb4 ("crypto: marvell/cesa - initialize hash...") Signed-off-by: Romain Perier Signed-off-by: Herbert Xu --- drivers/crypto/marvell/hash.c | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) (limited to 'drivers') diff --git a/drivers/crypto/marvell/hash.c b/drivers/crypto/marvell/hash.c index cf8063d8c488..44a8abeba295 100644 --- a/drivers/crypto/marvell/hash.c +++ b/drivers/crypto/marvell/hash.c @@ -800,13 +800,14 @@ static int mv_cesa_md5_init(struct ahash_request *req) struct mv_cesa_op_ctx tmpl = { }; mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_MACM_MD5); + + mv_cesa_ahash_init(req, &tmpl, true); + creq->state[0] = MD5_H0; creq->state[1] = MD5_H1; creq->state[2] = MD5_H2; creq->state[3] = MD5_H3; - mv_cesa_ahash_init(req, &tmpl, true); - return 0; } @@ -868,14 +869,15 @@ static int mv_cesa_sha1_init(struct ahash_request *req) struct mv_cesa_op_ctx tmpl = { }; mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_MACM_SHA1); + + mv_cesa_ahash_init(req, &tmpl, false); + creq->state[0] = SHA1_H0; creq->state[1] = SHA1_H1; creq->state[2] = SHA1_H2; creq->state[3] = SHA1_H3; creq->state[4] = SHA1_H4; - mv_cesa_ahash_init(req, &tmpl, false); - return 0; } @@ -937,6 +939,9 @@ static int mv_cesa_sha256_init(struct ahash_request *req) struct mv_cesa_op_ctx tmpl = { }; mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_MACM_SHA256); + + mv_cesa_ahash_init(req, &tmpl, false); + creq->state[0] = SHA256_H0; creq->state[1] = SHA256_H1; creq->state[2] = SHA256_H2; @@ -946,8 +951,6 @@ static int mv_cesa_sha256_init(struct ahash_request *req) creq->state[6] = SHA256_H6; creq->state[7] = SHA256_H7; - mv_cesa_ahash_init(req, &tmpl, false); - return 0; } -- cgit v1.2.3-58-ga151 From 47856204146ba6fd0f198dbb23c4ed7ad1c3fd99 Mon Sep 17 00:00:00 2001 From: Romain Perier Date: Tue, 9 Aug 2016 11:03:20 +0200 Subject: crypto: marvell - Don't hardcode block size in mv_cesa_ahash_cache_req Don't use 64 'as is', as max block size in mv_cesa_ahash_cache_req. Use CESA_MAX_HASH_BLOCK_SIZE instead, this is better for readability. Signed-off-by: Romain Perier Signed-off-by: Herbert Xu --- drivers/crypto/marvell/hash.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/crypto/marvell/hash.c b/drivers/crypto/marvell/hash.c index 44a8abeba295..9f284682c091 100644 --- a/drivers/crypto/marvell/hash.c +++ b/drivers/crypto/marvell/hash.c @@ -408,7 +408,7 @@ static bool mv_cesa_ahash_cache_req(struct ahash_request *req) struct mv_cesa_ahash_req *creq = ahash_request_ctx(req); bool cached = false; - if (creq->cache_ptr + req->nbytes < 64 && !creq->last_req) { + if (creq->cache_ptr + req->nbytes < CESA_MAX_HASH_BLOCK_SIZE && !creq->last_req) { cached = true; if (!req->nbytes) -- cgit v1.2.3-58-ga151 From ea1a67414318822b34255c284fa1caa084cbd604 Mon Sep 17 00:00:00 2001 From: Corentin LABBE Date: Wed, 10 Aug 2016 11:45:29 +0200 Subject: crypto: sun4i-ss - fix a few signed warning The variable i is always checked against unsigned value and cannot be negative. This patch set it as unsigned. Signed-off-by: LABBE Corentin Signed-off-by: Herbert Xu --- drivers/crypto/sunxi-ss/sun4i-ss-cipher.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/crypto/sunxi-ss/sun4i-ss-cipher.c b/drivers/crypto/sunxi-ss/sun4i-ss-cipher.c index 3830d7c4e138..90efd10d57a1 100644 --- a/drivers/crypto/sunxi-ss/sun4i-ss-cipher.c +++ b/drivers/crypto/sunxi-ss/sun4i-ss-cipher.c @@ -29,7 +29,8 @@ static int sun4i_ss_opti_poll(struct ablkcipher_request *areq) u32 tx_cnt = 0; u32 spaces; u32 v; - int i, err = 0; + int err = 0; + unsigned int i; unsigned int ileft = areq->nbytes; unsigned int oleft = areq->nbytes; unsigned int todo; @@ -139,7 +140,8 @@ static int sun4i_ss_cipher_poll(struct ablkcipher_request *areq) u32 tx_cnt = 0; u32 v; u32 spaces; - int i, err = 0; + int err = 0; + unsigned int i; unsigned int ileft = areq->nbytes; unsigned int oleft = areq->nbytes; unsigned int todo; -- cgit v1.2.3-58-ga151 From 477d9b2e591b8da6901b7c64cc2f08ce7499cfb6 Mon Sep 17 00:00:00 2001 From: Corentin LABBE Date: Wed, 10 Aug 2016 11:45:30 +0200 Subject: crypto: sun4i-ss - unify update/final function The update and final functions have lots of common action. This patch mix them in one function. This will give some improvements: - This will permit asynchronous support more easily - This will permit to use finup/digest functions with some performance improvements Signed-off-by: LABBE Corentin Signed-off-by: Herbert Xu --- drivers/crypto/sunxi-ss/sun4i-ss-hash.c | 147 ++++++++++++++++++-------------- drivers/crypto/sunxi-ss/sun4i-ss.h | 1 + 2 files changed, 85 insertions(+), 63 deletions(-) (limited to 'drivers') diff --git a/drivers/crypto/sunxi-ss/sun4i-ss-hash.c b/drivers/crypto/sunxi-ss/sun4i-ss-hash.c index ff8031498809..2fb068423341 100644 --- a/drivers/crypto/sunxi-ss/sun4i-ss-hash.c +++ b/drivers/crypto/sunxi-ss/sun4i-ss-hash.c @@ -129,6 +129,9 @@ int sun4i_hash_import_sha1(struct ahash_request *areq, const void *in) return 0; } +#define SS_HASH_UPDATE 1 +#define SS_HASH_FINAL 2 + /* * sun4i_hash_update: update hash engine * @@ -156,7 +159,7 @@ int sun4i_hash_import_sha1(struct ahash_request *areq, const void *in) * write remaining data in op->buf * final state op->len=56 */ -int sun4i_hash_update(struct ahash_request *areq) +int sun4i_hash(struct ahash_request *areq) { u32 v, ivmode = 0; unsigned int i = 0; @@ -180,22 +183,30 @@ int sun4i_hash_update(struct ahash_request *areq) u32 spaces, rx_cnt = SS_RX_DEFAULT; size_t copied = 0; struct sg_mapping_iter mi; + unsigned int j = 0; + int zeros; + unsigned int index, padlen; + __be64 bits; + u32 bf[32]; + u32 wb = 0; + unsigned int nwait, nbw = 0; + struct scatterlist *in_sg = areq->src; dev_dbg(ss->dev, "%s %s bc=%llu len=%u mode=%x wl=%u h0=%0x", __func__, crypto_tfm_alg_name(areq->base.tfm), op->byte_count, areq->nbytes, op->mode, op->len, op->hash[0]); - if (areq->nbytes == 0) + if (unlikely(areq->nbytes == 0) && (op->flags & SS_HASH_FINAL) == 0) return 0; /* protect against overflow */ - if (areq->nbytes > UINT_MAX - op->len) { + if (unlikely(areq->nbytes > UINT_MAX - op->len)) { dev_err(ss->dev, "Cannot process too large request\n"); return -EINVAL; } - if (op->len + areq->nbytes < 64) { + if (op->len + areq->nbytes < 64 && (op->flags & SS_HASH_FINAL) == 0) { /* linearize data to op->buf */ copied = sg_pcopy_to_buffer(areq->src, sg_nents(areq->src), op->buf + op->len, areq->nbytes, 0); @@ -203,14 +214,6 @@ int sun4i_hash_update(struct ahash_request *areq) return 0; } - end = ((areq->nbytes + op->len) / 64) * 64 - op->len; - - if (end > areq->nbytes || areq->nbytes - end > 63) { - dev_err(ss->dev, "ERROR: Bound error %u %u\n", - end, areq->nbytes); - return -EINVAL; - } - spin_lock_bh(&ss->slock); /* @@ -225,6 +228,33 @@ int sun4i_hash_update(struct ahash_request *areq) /* Enable the device */ writel(op->mode | SS_ENABLED | ivmode, ss->base + SS_CTL); + if ((op->flags & SS_HASH_UPDATE) == 0) + goto hash_final; + + /* start of handling data */ + if ((op->flags & SS_HASH_FINAL) == 0) { + end = ((areq->nbytes + op->len) / 64) * 64 - op->len; + + if (end > areq->nbytes || areq->nbytes - end > 63) { + dev_err(ss->dev, "ERROR: Bound error %u %u\n", + end, areq->nbytes); + return -EINVAL; + } + } else { + /* Since we have the flag final, we can go up to modulo 4 */ + end = ((areq->nbytes + op->len) / 4) * 4 - op->len; + } + + /* TODO if SGlen % 4 and op->len == 0 then DMA */ + i = 1; + while (in_sg && i == 1) { + if ((in_sg->length % 4) != 0) + i = 0; + in_sg = sg_next(in_sg); + } + if (i == 1 && op->len == 0) + dev_dbg(ss->dev, "We can DMA\n"); + i = 0; sg_miter_start(&mi, areq->src, sg_nents(areq->src), SG_MITER_FROM_SG | SG_MITER_ATOMIC); @@ -285,7 +315,11 @@ int sun4i_hash_update(struct ahash_request *areq) } } } while (i < end); - /* final linear */ + + /* + * Now we have written to the device all that we can, + * store the remaining bytes in op->buf + */ if ((areq->nbytes - i) < 64) { while (i < areq->nbytes && in_i < mi.length && op->len < 64) { /* how many bytes we can read from current SG */ @@ -304,13 +338,21 @@ int sun4i_hash_update(struct ahash_request *areq) sg_miter_stop(&mi); + /* + * End of data process + * Now if we have the flag final go to finalize part + * If not, store the partial hash + */ + if ((op->flags & SS_HASH_FINAL) > 0) + goto hash_final; + writel(op->mode | SS_ENABLED | SS_DATA_END, ss->base + SS_CTL); i = 0; do { v = readl(ss->base + SS_CTL); i++; } while (i < SS_TIMEOUT && (v & SS_DATA_END) > 0); - if (i >= SS_TIMEOUT) { + if (unlikely(i >= SS_TIMEOUT)) { dev_err_ratelimited(ss->dev, "ERROR: hash end timeout %d>%d ctl=%x len=%u\n", i, SS_TIMEOUT, v, areq->nbytes); @@ -318,56 +360,24 @@ int sun4i_hash_update(struct ahash_request *areq) goto release_ss; } - /* get the partial hash only if something was written */ for (i = 0; i < crypto_ahash_digestsize(tfm) / 4; i++) op->hash[i] = readl(ss->base + SS_MD0 + i * 4); -release_ss: - writel(0, ss->base + SS_CTL); - spin_unlock_bh(&ss->slock); - return err; -} + goto release_ss; /* - * sun4i_hash_final: finalize hashing operation + * hash_final: finalize hashing operation * * If we have some remaining bytes, we write them. * Then ask the SS for finalizing the hashing operation * * I do not check RX FIFO size in this function since the size is 32 * after each enabling and this function neither write more than 32 words. + * If we come from the update part, we cannot have more than + * 3 remainings bytes to write and SS is fast enought to not care about it. */ -int sun4i_hash_final(struct ahash_request *areq) -{ - u32 v, ivmode = 0; - unsigned int i; - unsigned int j = 0; - int zeros, err = 0; - unsigned int index, padlen; - __be64 bits; - struct sun4i_req_ctx *op = ahash_request_ctx(areq); - struct sun4i_ss_ctx *ss = op->ss; - struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq); - u32 bf[32]; - u32 wb = 0; - unsigned int nwait, nbw = 0; - dev_dbg(ss->dev, "%s: byte=%llu len=%u mode=%x wl=%u h=%x", - __func__, op->byte_count, areq->nbytes, op->mode, - op->len, op->hash[0]); - - spin_lock_bh(&ss->slock); - - /* - * if we have already written something, - * restore the partial hash state - */ - if (op->byte_count > 0) { - ivmode = SS_IV_ARBITRARY; - for (i = 0; i < crypto_ahash_digestsize(tfm) / 4; i++) - writel(op->hash[i], ss->base + SS_IV0 + i * 4); - } - writel(op->mode | SS_ENABLED | ivmode, ss->base + SS_CTL); +hash_final: /* write the remaining words of the wait buffer */ if (op->len > 0) { @@ -436,7 +446,7 @@ int sun4i_hash_final(struct ahash_request *areq) v = readl(ss->base + SS_CTL); i++; } while (i < SS_TIMEOUT && (v & SS_DATA_END) > 0); - if (i >= SS_TIMEOUT) { + if (unlikely(i >= SS_TIMEOUT)) { dev_err_ratelimited(ss->dev, "ERROR: hash end timeout %d>%d ctl=%x len=%u\n", i, SS_TIMEOUT, v, areq->nbytes); @@ -463,30 +473,41 @@ release_ss: return err; } +int sun4i_hash_final(struct ahash_request *areq) +{ + struct sun4i_req_ctx *op = ahash_request_ctx(areq); + + op->flags = SS_HASH_FINAL; + return sun4i_hash(areq); +} + +int sun4i_hash_update(struct ahash_request *areq) +{ + struct sun4i_req_ctx *op = ahash_request_ctx(areq); + + op->flags = SS_HASH_UPDATE; + return sun4i_hash(areq); +} + /* sun4i_hash_finup: finalize hashing operation after an update */ int sun4i_hash_finup(struct ahash_request *areq) { - int err; - - err = sun4i_hash_update(areq); - if (err != 0) - return err; + struct sun4i_req_ctx *op = ahash_request_ctx(areq); - return sun4i_hash_final(areq); + op->flags = SS_HASH_UPDATE | SS_HASH_FINAL; + return sun4i_hash(areq); } /* combo of init/update/final functions */ int sun4i_hash_digest(struct ahash_request *areq) { int err; + struct sun4i_req_ctx *op = ahash_request_ctx(areq); err = sun4i_hash_init(areq); if (err != 0) return err; - err = sun4i_hash_update(areq); - if (err != 0) - return err; - - return sun4i_hash_final(areq); + op->flags = SS_HASH_UPDATE | SS_HASH_FINAL; + return sun4i_hash(areq); } diff --git a/drivers/crypto/sunxi-ss/sun4i-ss.h b/drivers/crypto/sunxi-ss/sun4i-ss.h index 8e9c05f6e4d4..ece5a1ca67de 100644 --- a/drivers/crypto/sunxi-ss/sun4i-ss.h +++ b/drivers/crypto/sunxi-ss/sun4i-ss.h @@ -164,6 +164,7 @@ struct sun4i_req_ctx { char buf[64]; unsigned int len; struct sun4i_ss_ctx *ss; + int flags; }; int sun4i_hash_crainit(struct crypto_tfm *tfm); -- cgit v1.2.3-58-ga151 From d98a949bb852c7bc87ffc87c883a8c02e0ee0d5b Mon Sep 17 00:00:00 2001 From: Corentin LABBE Date: Wed, 10 Aug 2016 11:45:31 +0200 Subject: crypto: sun4i-ss - clean unused ss The ss variable is never used, remove it. Signed-off-by: LABBE Corentin Signed-off-by: Herbert Xu --- drivers/crypto/sunxi-ss/sun4i-ss-hash.c | 2 -- 1 file changed, 2 deletions(-) (limited to 'drivers') diff --git a/drivers/crypto/sunxi-ss/sun4i-ss-hash.c b/drivers/crypto/sunxi-ss/sun4i-ss-hash.c index 2fb068423341..7841d73c54c5 100644 --- a/drivers/crypto/sunxi-ss/sun4i-ss-hash.c +++ b/drivers/crypto/sunxi-ss/sun4i-ss-hash.c @@ -32,12 +32,10 @@ int sun4i_hash_init(struct ahash_request *areq) struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq); struct ahash_alg *alg = __crypto_ahash_alg(tfm->base.__crt_alg); struct sun4i_ss_alg_template *algt; - struct sun4i_ss_ctx *ss; memset(op, 0, sizeof(struct sun4i_req_ctx)); algt = container_of(alg, struct sun4i_ss_alg_template, alg.hash); - ss = algt->ss; op->ss = algt->ss; op->mode = algt->mode; -- cgit v1.2.3-58-ga151 From 933e7e3ab239fe7fad5d64f3e2828618b3568e8c Mon Sep 17 00:00:00 2001 From: Corentin LABBE Date: Wed, 10 Aug 2016 11:45:32 +0200 Subject: crypto: sun4i-ss - fix spelling Two words are badly spelled, this patch respell them. Signed-off-by: LABBE Corentin Signed-off-by: Herbert Xu --- drivers/crypto/sunxi-ss/sun4i-ss-hash.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/crypto/sunxi-ss/sun4i-ss-hash.c b/drivers/crypto/sunxi-ss/sun4i-ss-hash.c index 7841d73c54c5..60031e0eeb25 100644 --- a/drivers/crypto/sunxi-ss/sun4i-ss-hash.c +++ b/drivers/crypto/sunxi-ss/sun4i-ss-hash.c @@ -372,7 +372,7 @@ int sun4i_hash(struct ahash_request *areq) * I do not check RX FIFO size in this function since the size is 32 * after each enabling and this function neither write more than 32 words. * If we come from the update part, we cannot have more than - * 3 remainings bytes to write and SS is fast enought to not care about it. + * 3 remaining bytes to write and SS is fast enough to not care about it. */ hash_final: @@ -436,7 +436,7 @@ hash_final: /* * Wait for SS to finish the hash. - * The timeout could happen only in case of bad overcloking + * The timeout could happen only in case of bad overclocking * or driver bug. */ i = 0; -- cgit v1.2.3-58-ga151 From 9b17e5ad7831cde78314f96445cfb4a9945c6c68 Mon Sep 17 00:00:00 2001 From: Corentin LABBE Date: Wed, 10 Aug 2016 11:45:33 +0200 Subject: crypto: sun4i-ss - Always use sun4i_tfm_ctx for storing pointer to dev ss The dev *ss is stored both in sun4i_tfm_ctx and sun4i_req_ctx. Since this pointer will never be changed during tfm life, it is better to remove it from sun4i_req_ctx. Signed-off-by: LABBE Corentin Signed-off-by: Herbert Xu --- drivers/crypto/sunxi-ss/sun4i-ss-hash.c | 13 +++++++++++-- drivers/crypto/sunxi-ss/sun4i-ss.h | 1 - 2 files changed, 11 insertions(+), 3 deletions(-) (limited to 'drivers') diff --git a/drivers/crypto/sunxi-ss/sun4i-ss-hash.c b/drivers/crypto/sunxi-ss/sun4i-ss-hash.c index 60031e0eeb25..2ee3b59d5ddf 100644 --- a/drivers/crypto/sunxi-ss/sun4i-ss-hash.c +++ b/drivers/crypto/sunxi-ss/sun4i-ss-hash.c @@ -20,6 +20,15 @@ int sun4i_hash_crainit(struct crypto_tfm *tfm) { + struct sun4i_tfm_ctx *op = crypto_tfm_ctx(tfm); + struct ahash_alg *alg = __crypto_ahash_alg(tfm->__crt_alg); + struct sun4i_ss_alg_template *algt; + + memset(op, 0, sizeof(struct sun4i_tfm_ctx)); + + algt = container_of(alg, struct sun4i_ss_alg_template, alg.hash); + op->ss = algt->ss; + crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm), sizeof(struct sun4i_req_ctx)); return 0; @@ -36,7 +45,6 @@ int sun4i_hash_init(struct ahash_request *areq) memset(op, 0, sizeof(struct sun4i_req_ctx)); algt = container_of(alg, struct sun4i_ss_alg_template, alg.hash); - op->ss = algt->ss; op->mode = algt->mode; return 0; @@ -168,8 +176,9 @@ int sun4i_hash(struct ahash_request *areq) */ struct sun4i_req_ctx *op = ahash_request_ctx(areq); - struct sun4i_ss_ctx *ss = op->ss; struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq); + struct sun4i_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm); + struct sun4i_ss_ctx *ss = tfmctx->ss; unsigned int in_i = 0; /* advancement in the current SG */ unsigned int end; /* diff --git a/drivers/crypto/sunxi-ss/sun4i-ss.h b/drivers/crypto/sunxi-ss/sun4i-ss.h index ece5a1ca67de..f04c0f8cf026 100644 --- a/drivers/crypto/sunxi-ss/sun4i-ss.h +++ b/drivers/crypto/sunxi-ss/sun4i-ss.h @@ -163,7 +163,6 @@ struct sun4i_req_ctx { u32 hash[5]; /* for storing SS_IVx register */ char buf[64]; unsigned int len; - struct sun4i_ss_ctx *ss; int flags; }; -- cgit v1.2.3-58-ga151 From b6ff2fdd429cffe83b576f2e1eed33a4b0f1e4fe Mon Sep 17 00:00:00 2001 From: Corentin LABBE Date: Wed, 10 Aug 2016 11:45:34 +0200 Subject: crypto: sun4i-ss - fix indentation of two crypto alg Two crypto alg are badly indented, this patch fix this style issue. Signed-off-by: LABBE Corentin Signed-off-by: Herbert Xu --- drivers/crypto/sunxi-ss/sun4i-ss-core.c | 68 ++++++++++++++++----------------- 1 file changed, 34 insertions(+), 34 deletions(-) (limited to 'drivers') diff --git a/drivers/crypto/sunxi-ss/sun4i-ss-core.c b/drivers/crypto/sunxi-ss/sun4i-ss-core.c index 107cd2a41cae..3ac6c6c4ad18 100644 --- a/drivers/crypto/sunxi-ss/sun4i-ss-core.c +++ b/drivers/crypto/sunxi-ss/sun4i-ss-core.c @@ -172,45 +172,45 @@ static struct sun4i_ss_alg_template ss_algs[] = { }, { .type = CRYPTO_ALG_TYPE_ABLKCIPHER, .alg.crypto = { - .cra_name = "cbc(des3_ede)", - .cra_driver_name = "cbc-des3-sun4i-ss", - .cra_priority = 300, - .cra_blocksize = DES3_EDE_BLOCK_SIZE, - .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER, - .cra_ctxsize = sizeof(struct sun4i_req_ctx), - .cra_module = THIS_MODULE, - .cra_alignmask = 3, - .cra_type = &crypto_ablkcipher_type, - .cra_init = sun4i_ss_cipher_init, - .cra_u.ablkcipher = { - .min_keysize = DES3_EDE_KEY_SIZE, - .max_keysize = DES3_EDE_KEY_SIZE, - .ivsize = DES3_EDE_BLOCK_SIZE, - .setkey = sun4i_ss_des3_setkey, - .encrypt = sun4i_ss_cbc_des3_encrypt, - .decrypt = sun4i_ss_cbc_des3_decrypt, + .cra_name = "cbc(des3_ede)", + .cra_driver_name = "cbc-des3-sun4i-ss", + .cra_priority = 300, + .cra_blocksize = DES3_EDE_BLOCK_SIZE, + .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER, + .cra_ctxsize = sizeof(struct sun4i_req_ctx), + .cra_module = THIS_MODULE, + .cra_alignmask = 3, + .cra_type = &crypto_ablkcipher_type, + .cra_init = sun4i_ss_cipher_init, + .cra_u.ablkcipher = { + .min_keysize = DES3_EDE_KEY_SIZE, + .max_keysize = DES3_EDE_KEY_SIZE, + .ivsize = DES3_EDE_BLOCK_SIZE, + .setkey = sun4i_ss_des3_setkey, + .encrypt = sun4i_ss_cbc_des3_encrypt, + .decrypt = sun4i_ss_cbc_des3_decrypt, } } }, { .type = CRYPTO_ALG_TYPE_ABLKCIPHER, .alg.crypto = { - .cra_name = "ecb(des3_ede)", - .cra_driver_name = "ecb-des3-sun4i-ss", - .cra_priority = 300, - .cra_blocksize = DES3_EDE_BLOCK_SIZE, - .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER, - .cra_ctxsize = sizeof(struct sun4i_req_ctx), - .cra_module = THIS_MODULE, - .cra_alignmask = 3, - .cra_type = &crypto_ablkcipher_type, - .cra_init = sun4i_ss_cipher_init, - .cra_u.ablkcipher = { - .min_keysize = DES3_EDE_KEY_SIZE, - .max_keysize = DES3_EDE_KEY_SIZE, - .ivsize = DES3_EDE_BLOCK_SIZE, - .setkey = sun4i_ss_des3_setkey, - .encrypt = sun4i_ss_ecb_des3_encrypt, - .decrypt = sun4i_ss_ecb_des3_decrypt, + .cra_name = "ecb(des3_ede)", + .cra_driver_name = "ecb-des3-sun4i-ss", + .cra_priority = 300, + .cra_blocksize = DES3_EDE_BLOCK_SIZE, + .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER, + .cra_ctxsize = sizeof(struct sun4i_req_ctx), + .cra_module = THIS_MODULE, + .cra_alignmask = 3, + .cra_type = &crypto_ablkcipher_type, + .cra_init = sun4i_ss_cipher_init, + .cra_u.ablkcipher = { + .min_keysize = DES3_EDE_KEY_SIZE, + .max_keysize = DES3_EDE_KEY_SIZE, + .ivsize = DES3_EDE_BLOCK_SIZE, + .setkey = sun4i_ss_des3_setkey, + .encrypt = sun4i_ss_ecb_des3_encrypt, + .decrypt = sun4i_ss_ecb_des3_decrypt, } } }, -- cgit v1.2.3-58-ga151 From dabc7904a74c47ead9d40cc00d5e8b1946a0736c Mon Sep 17 00:00:00 2001 From: Wei Yongjun Date: Fri, 12 Aug 2016 00:00:09 +0000 Subject: crypto: ccp - Fix non static symbol warning Fixes the following sparse warning: drivers/crypto/ccp/ccp-dev.c:62:14: warning: symbol 'ccp_increment_unit_ordinal' was not declared. Should it be static? Signed-off-by: Wei Yongjun Signed-off-by: Herbert Xu --- drivers/crypto/ccp/ccp-dev.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/crypto/ccp/ccp-dev.c b/drivers/crypto/ccp/ccp-dev.c index 38a98d879f82..5d36eeff6d26 100644 --- a/drivers/crypto/ccp/ccp-dev.c +++ b/drivers/crypto/ccp/ccp-dev.c @@ -59,7 +59,7 @@ static struct ccp_device *ccp_rr; /* Ever-increasing value to produce unique unit numbers */ static atomic_t ccp_unit_ordinal; -unsigned int ccp_increment_unit_ordinal(void) +static unsigned int ccp_increment_unit_ordinal(void) { return atomic_inc_return(&ccp_unit_ordinal); } -- cgit v1.2.3-58-ga151 From c5f91cde6ba3e16772bca3885a3d51db73ed6a97 Mon Sep 17 00:00:00 2001 From: Markus Elfring Date: Tue, 16 Aug 2016 07:51:21 +0200 Subject: hwrng: pic32 - Delete unnecessary assignment for the field "owner" The field "owner" is set by the core. Thus delete an unneeded initialisation. Generated by: scripts/coccinelle/api/platform_no_drv_owner.cocci Signed-off-by: Markus Elfring Signed-off-by: Herbert Xu --- drivers/char/hw_random/pic32-rng.c | 1 - 1 file changed, 1 deletion(-) (limited to 'drivers') diff --git a/drivers/char/hw_random/pic32-rng.c b/drivers/char/hw_random/pic32-rng.c index 108897bea2d0..11dc9b7c09ce 100644 --- a/drivers/char/hw_random/pic32-rng.c +++ b/drivers/char/hw_random/pic32-rng.c @@ -143,7 +143,6 @@ static struct platform_driver pic32_rng_driver = { .remove = pic32_rng_remove, .driver = { .name = "pic32-rng", - .owner = THIS_MODULE, .of_match_table = of_match_ptr(pic32_rng_of_match), }, }; -- cgit v1.2.3-58-ga151 From b46b9d1aadf7f31eb5c0bab20cd61e71011156cf Mon Sep 17 00:00:00 2001 From: Wei Yongjun Date: Sat, 20 Aug 2016 10:48:53 +0000 Subject: crypto: sun4i-ss - fix missing unlock on error in sun4i_hash() Add the missing unlock before return from function sun4i_hash() in the error handling case. Fixes: 477d9b2e591b ("crypto: sun4i-ss - unify update/final function") Signed-off-by: Wei Yongjun Acked-by: Corentin LABBE Signed-off-by: Herbert Xu --- drivers/crypto/sunxi-ss/sun4i-ss-hash.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/crypto/sunxi-ss/sun4i-ss-hash.c b/drivers/crypto/sunxi-ss/sun4i-ss-hash.c index 2ee3b59d5ddf..1afeb8e5f709 100644 --- a/drivers/crypto/sunxi-ss/sun4i-ss-hash.c +++ b/drivers/crypto/sunxi-ss/sun4i-ss-hash.c @@ -245,7 +245,8 @@ int sun4i_hash(struct ahash_request *areq) if (end > areq->nbytes || areq->nbytes - end > 63) { dev_err(ss->dev, "ERROR: Bound error %u %u\n", end, areq->nbytes); - return -EINVAL; + err = -EINVAL; + goto release_ss; } } else { /* Since we have the flag final, we can go up to modulo 4 */ -- cgit v1.2.3-58-ga151 From 161040fc1c2a4a91765c18ec260589475cb791d9 Mon Sep 17 00:00:00 2001 From: PrasannaKumar Muralidharan Date: Sat, 20 Aug 2016 21:31:06 +0530 Subject: hwrng: omap3-rom - Remove obsoleted functions Remove omap3_rom_rng_data_present method as it was returning 1 always. Use .read callback instead of .data_read callback. This avoids use of obsolete callbacks. This patch is not tested with hardware as I don't have access to it. Signed-off-by: PrasannaKumar Muralidharan Signed-off-by: Herbert Xu --- drivers/char/hw_random/omap3-rom-rng.c | 10 ++-------- 1 file changed, 2 insertions(+), 8 deletions(-) (limited to 'drivers') diff --git a/drivers/char/hw_random/omap3-rom-rng.c b/drivers/char/hw_random/omap3-rom-rng.c index 8da14f1a1f56..37a58d78aab3 100644 --- a/drivers/char/hw_random/omap3-rom-rng.c +++ b/drivers/char/hw_random/omap3-rom-rng.c @@ -71,12 +71,7 @@ static int omap3_rom_rng_get_random(void *buf, unsigned int count) return 0; } -static int omap3_rom_rng_data_present(struct hwrng *rng, int wait) -{ - return 1; -} - -static int omap3_rom_rng_data_read(struct hwrng *rng, u32 *data) +static int omap3_rom_rng_read(struct hwrng *rng, void *data, size_t max, bool w) { int r; @@ -88,8 +83,7 @@ static int omap3_rom_rng_data_read(struct hwrng *rng, u32 *data) static struct hwrng omap3_rom_rng_ops = { .name = "omap3-rom", - .data_present = omap3_rom_rng_data_present, - .data_read = omap3_rom_rng_data_read, + .read = omap3_rom_rng_read, }; static int omap3_rom_rng_probe(struct platform_device *pdev) -- cgit v1.2.3-58-ga151 From 59df87c3498a34283baf185ab0396f4742acdee5 Mon Sep 17 00:00:00 2001 From: Fabio Estevam Date: Sun, 21 Aug 2016 23:37:27 -0300 Subject: crypto: mxc-scc - check clk_prepare_enable() error clk_prepare_enable() may fail, so we should better check its return value and propagate it in the case of failure. Signed-off-by: Fabio Estevam Signed-off-by: Herbert Xu --- drivers/crypto/mxc-scc.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/crypto/mxc-scc.c b/drivers/crypto/mxc-scc.c index ff383ef83871..ee4be1b0d30b 100644 --- a/drivers/crypto/mxc-scc.c +++ b/drivers/crypto/mxc-scc.c @@ -668,7 +668,9 @@ static int mxc_scc_probe(struct platform_device *pdev) return PTR_ERR(scc->clk); } - clk_prepare_enable(scc->clk); + ret = clk_prepare_enable(scc->clk); + if (ret) + return ret; /* clear error status register */ writel(0x0, scc->base + SCC_SCM_ERROR_STATUS); -- cgit v1.2.3-58-ga151 From 16d56963e832339fc2dff6a4afce14138fb61932 Mon Sep 17 00:00:00 2001 From: Sudip Mukherjee Date: Tue, 23 Aug 2016 20:28:54 +0530 Subject: crypto: rockchip - use devm_add_action_or_reset() If devm_add_action() fails we are explicitly calling the cleanup to free the resources allocated. Lets use the helper devm_add_action_or_reset() and return directly in case of error, as we know that the cleanup function has been already called by the helper if there was any error. Signed-off-by: Sudip Mukherjee Reviewed-by: Heiko Stuebner Signed-off-by: Herbert Xu --- drivers/crypto/rockchip/rk3288_crypto.c | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) (limited to 'drivers') diff --git a/drivers/crypto/rockchip/rk3288_crypto.c b/drivers/crypto/rockchip/rk3288_crypto.c index af508258d2ea..d0f80c6241f9 100644 --- a/drivers/crypto/rockchip/rk3288_crypto.c +++ b/drivers/crypto/rockchip/rk3288_crypto.c @@ -304,11 +304,9 @@ static int rk_crypto_probe(struct platform_device *pdev) usleep_range(10, 20); reset_control_deassert(crypto_info->rst); - err = devm_add_action(dev, rk_crypto_action, crypto_info); - if (err) { - reset_control_assert(crypto_info->rst); + err = devm_add_action_or_reset(dev, rk_crypto_action, crypto_info); + if (err) goto err_crypto; - } spin_lock_init(&crypto_info->lock); -- cgit v1.2.3-58-ga151 From 21b5b8eebbae427d7d890b7dd1e43a63aca7c26c Mon Sep 17 00:00:00 2001 From: Ananth Jasty Date: Tue, 23 Aug 2016 16:27:14 -0700 Subject: PCI: quirk fixup for cavium invalid sriov link value. Cavium cn88xx hardware presents an incorrect SR-IOV Function Dependency Link, add a fixup quirk for the affected devices. Acked-by: David Daney Signed-off-by: Ananth Jasty Signed-off-by: Omer Khaliq Acked-by: Bjorn Helgaas Signed-off-by: Herbert Xu --- drivers/pci/quirks.c | 11 +++++++++++ 1 file changed, 11 insertions(+) (limited to 'drivers') diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c index 37ff0158e45f..5980aae41d40 100644 --- a/drivers/pci/quirks.c +++ b/drivers/pci/quirks.c @@ -834,6 +834,17 @@ static void quirk_amd_ioapic(struct pci_dev *dev) DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_VIPER_7410, quirk_amd_ioapic); #endif /* CONFIG_X86_IO_APIC */ +#ifdef CONFIG_ARM64 + +static void quirk_cavium_sriov_rnm_link(struct pci_dev *dev) +{ + /* Fix for improper SRIOV configuration on Cavium cn88xx RNM device */ + if (dev->subsystem_device == 0xa118) + dev->sriov->link = dev->devfn; +} +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CAVIUM, 0xa018, quirk_cavium_sriov_rnm_link); +#endif + /* * Some settings of MMRBC can lead to data corruption so block changes. * See AMD 8131 HyperTransport PCI-X Tunnel Revision Guide -- cgit v1.2.3-58-ga151 From cc2f1908c6b8f6257b7da9124ce6d859c13cba3b Mon Sep 17 00:00:00 2001 From: Omer Khaliq Date: Tue, 23 Aug 2016 16:27:15 -0700 Subject: hwrng: cavium - Add Cavium HWRNG driver for ThunderX SoC. The Cavium ThunderX SoC has a hardware random number generator. This driver provides support using the HWRNG framework. Signed-off-by: Omer Khaliq Signed-off-by: Ananth Jasty Acked-by: David Daney Signed-off-by: Herbert Xu --- drivers/char/hw_random/Kconfig | 13 +++++ drivers/char/hw_random/Makefile | 1 + drivers/char/hw_random/cavium-rng-vf.c | 99 ++++++++++++++++++++++++++++++++++ drivers/char/hw_random/cavium-rng.c | 94 ++++++++++++++++++++++++++++++++ 4 files changed, 207 insertions(+) create mode 100644 drivers/char/hw_random/cavium-rng-vf.c create mode 100644 drivers/char/hw_random/cavium-rng.c (limited to 'drivers') diff --git a/drivers/char/hw_random/Kconfig b/drivers/char/hw_random/Kconfig index 56ad5a5936a9..fb9c7adfdb3a 100644 --- a/drivers/char/hw_random/Kconfig +++ b/drivers/char/hw_random/Kconfig @@ -410,6 +410,19 @@ config HW_RANDOM_MESON If unsure, say Y. +config HW_RANDOM_CAVIUM + tristate "Cavium ThunderX Random Number Generator support" + depends on HW_RANDOM && PCI && (ARM64 || (COMPILE_TEST && 64BIT)) + default HW_RANDOM + ---help--- + This driver provides kernel-side support for the Random Number + Generator hardware found on Cavium SoCs. + + To compile this driver as a module, choose M here: the + module will be called cavium_rng. + + If unsure, say Y. + endif # HW_RANDOM config UML_RANDOM diff --git a/drivers/char/hw_random/Makefile b/drivers/char/hw_random/Makefile index 04bb0b03356f..5f52b1e4e7be 100644 --- a/drivers/char/hw_random/Makefile +++ b/drivers/char/hw_random/Makefile @@ -35,3 +35,4 @@ obj-$(CONFIG_HW_RANDOM_XGENE) += xgene-rng.o obj-$(CONFIG_HW_RANDOM_STM32) += stm32-rng.o obj-$(CONFIG_HW_RANDOM_PIC32) += pic32-rng.o obj-$(CONFIG_HW_RANDOM_MESON) += meson-rng.o +obj-$(CONFIG_HW_RANDOM_CAVIUM) += cavium-rng.o cavium-rng-vf.o diff --git a/drivers/char/hw_random/cavium-rng-vf.c b/drivers/char/hw_random/cavium-rng-vf.c new file mode 100644 index 000000000000..066ae0e78d63 --- /dev/null +++ b/drivers/char/hw_random/cavium-rng-vf.c @@ -0,0 +1,99 @@ +/* + * Hardware Random Number Generator support for Cavium, Inc. + * Thunder processor family. + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 2016 Cavium, Inc. + */ + +#include +#include +#include +#include +#include + +struct cavium_rng { + struct hwrng ops; + void __iomem *result; +}; + +/* Read data from the RNG unit */ +static int cavium_rng_read(struct hwrng *rng, void *dat, size_t max, bool wait) +{ + struct cavium_rng *p = container_of(rng, struct cavium_rng, ops); + unsigned int size = max; + + while (size >= 8) { + *((u64 *)dat) = readq(p->result); + size -= 8; + dat += 8; + } + while (size > 0) { + *((u8 *)dat) = readb(p->result); + size--; + dat++; + } + return max; +} + +/* Map Cavium RNG to an HWRNG object */ +static int cavium_rng_probe_vf(struct pci_dev *pdev, + const struct pci_device_id *id) +{ + struct cavium_rng *rng; + int ret; + + rng = devm_kzalloc(&pdev->dev, sizeof(*rng), GFP_KERNEL); + if (!rng) + return -ENOMEM; + + /* Map the RNG result */ + rng->result = pcim_iomap(pdev, 0, 0); + if (!rng->result) { + dev_err(&pdev->dev, "Error iomap failed retrieving result.\n"); + return -ENOMEM; + } + + rng->ops.name = "cavium rng"; + rng->ops.read = cavium_rng_read; + rng->ops.quality = 1000; + + pci_set_drvdata(pdev, rng); + + ret = hwrng_register(&rng->ops); + if (ret) { + dev_err(&pdev->dev, "Error registering device as HWRNG.\n"); + return ret; + } + + return 0; +} + +/* Remove the VF */ +void cavium_rng_remove_vf(struct pci_dev *pdev) +{ + struct cavium_rng *rng; + + rng = pci_get_drvdata(pdev); + hwrng_unregister(&rng->ops); +} + +static const struct pci_device_id cavium_rng_vf_id_table[] = { + { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, 0xa033), 0, 0, 0}, + {0,}, +}; +MODULE_DEVICE_TABLE(pci, cavium_rng_vf_id_table); + +static struct pci_driver cavium_rng_vf_driver = { + .name = "cavium_rng_vf", + .id_table = cavium_rng_vf_id_table, + .probe = cavium_rng_probe_vf, + .remove = cavium_rng_remove_vf, +}; +module_pci_driver(cavium_rng_vf_driver); + +MODULE_AUTHOR("Omer Khaliq "); +MODULE_LICENSE("GPL"); diff --git a/drivers/char/hw_random/cavium-rng.c b/drivers/char/hw_random/cavium-rng.c new file mode 100644 index 000000000000..a944e0a47f42 --- /dev/null +++ b/drivers/char/hw_random/cavium-rng.c @@ -0,0 +1,94 @@ +/* + * Hardware Random Number Generator support for Cavium Inc. + * Thunder processor family. + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 2016 Cavium, Inc. + */ + +#include +#include +#include +#include +#include + +#define THUNDERX_RNM_ENT_EN 0x1 +#define THUNDERX_RNM_RNG_EN 0x2 + +struct cavium_rng_pf { + void __iomem *control_status; +}; + +/* Enable the RNG hardware and activate the VF */ +static int cavium_rng_probe(struct pci_dev *pdev, + const struct pci_device_id *id) +{ + struct cavium_rng_pf *rng; + int iov_err; + + rng = devm_kzalloc(&pdev->dev, sizeof(*rng), GFP_KERNEL); + if (!rng) + return -ENOMEM; + + /*Map the RNG control */ + rng->control_status = pcim_iomap(pdev, 0, 0); + if (!rng->control_status) { + dev_err(&pdev->dev, + "Error iomap failed retrieving control_status.\n"); + return -ENOMEM; + } + + /* Enable the RNG hardware and entropy source */ + writeq(THUNDERX_RNM_RNG_EN | THUNDERX_RNM_ENT_EN, + rng->control_status); + + pci_set_drvdata(pdev, rng); + + /* Enable the Cavium RNG as a VF */ + iov_err = pci_enable_sriov(pdev, 1); + if (iov_err != 0) { + /* Disable the RNG hardware and entropy source */ + writeq(0, rng->control_status); + dev_err(&pdev->dev, + "Error initializing RNG virtual function,(%i).\n", + iov_err); + return iov_err; + } + + return 0; +} + +/* Disable VF and RNG Hardware */ +void cavium_rng_remove(struct pci_dev *pdev) +{ + struct cavium_rng_pf *rng; + + rng = pci_get_drvdata(pdev); + + /* Remove the VF */ + pci_disable_sriov(pdev); + + /* Disable the RNG hardware and entropy source */ + writeq(0, rng->control_status); +} + +static const struct pci_device_id cavium_rng_pf_id_table[] = { + { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, 0xa018), 0, 0, 0}, /* Thunder RNM */ + {0,}, +}; + +MODULE_DEVICE_TABLE(pci, cavium_rng_pf_id_table); + +static struct pci_driver cavium_rng_pf_driver = { + .name = "cavium_rng_pf", + .id_table = cavium_rng_pf_id_table, + .probe = cavium_rng_probe, + .remove = cavium_rng_remove, +}; + +module_pci_driver(cavium_rng_pf_driver); +MODULE_AUTHOR("Omer Khaliq "); +MODULE_LICENSE("GPL"); -- cgit v1.2.3-58-ga151 From 1ac6b731b81cbbd2b6da389949f65f2d1e453d69 Mon Sep 17 00:00:00 2001 From: Baoyou Xie Date: Fri, 26 Aug 2016 17:56:24 +0800 Subject: crypto: caam - add missing header dependencies We get 1 warning when biuld kernel with W=1: drivers/crypto/caam/ctrl.c:398:5: warning: no previous prototype for 'caam_get_era' [-Wmissing-prototypes] In fact, this function is declared in drivers/crypto/caam/ctrl.h, so this patch add missing header dependencies. Signed-off-by: Baoyou Xie Signed-off-by: Herbert Xu --- drivers/crypto/caam/ctrl.c | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers') diff --git a/drivers/crypto/caam/ctrl.c b/drivers/crypto/caam/ctrl.c index f4c044f5bcb2..72ff19658985 100644 --- a/drivers/crypto/caam/ctrl.c +++ b/drivers/crypto/caam/ctrl.c @@ -14,6 +14,7 @@ #include "jr.h" #include "desc_constr.h" #include "error.h" +#include "ctrl.h" bool caam_little_end; EXPORT_SYMBOL(caam_little_end); -- cgit v1.2.3-58-ga151 From 6020721320c67331b1c4602adfbb8fc7398986ab Mon Sep 17 00:00:00 2001 From: Corentin LABBE Date: Fri, 26 Aug 2016 13:11:29 +0200 Subject: hwrng: amd - Fix style problem with blank line Some blank line are unncessary, and one is missing after declaration. This patch fix thoses style problems. Signed-off-by: LABBE Corentin Signed-off-by: Herbert Xu --- drivers/char/hw_random/amd-rng.c | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) (limited to 'drivers') diff --git a/drivers/char/hw_random/amd-rng.c b/drivers/char/hw_random/amd-rng.c index 48f6a83cdd61..45b79654e071 100644 --- a/drivers/char/hw_random/amd-rng.c +++ b/drivers/char/hw_random/amd-rng.c @@ -31,10 +31,8 @@ #include #include - #define PFX KBUILD_MODNAME ": " - /* * Data for PCI driver interface * @@ -52,7 +50,6 @@ MODULE_DEVICE_TABLE(pci, pci_tbl); static struct pci_dev *amd_pdev; - static int amd_rng_data_present(struct hwrng *rng, int wait) { u32 pmbase = (u32)rng->priv; @@ -100,7 +97,6 @@ static void amd_rng_cleanup(struct hwrng *rng) pci_write_config_byte(amd_pdev, 0x40, rnen); } - static struct hwrng amd_rng = { .name = "amd", .init = amd_rng_init, @@ -109,7 +105,6 @@ static struct hwrng amd_rng = { .data_read = amd_rng_data_read, }; - static int __init mod_init(void) { int err = -ENODEV; @@ -157,6 +152,7 @@ out: static void __exit mod_exit(void) { u32 pmbase = (unsigned long)amd_rng.priv; + release_region(pmbase + 0xF0, 8); hwrng_unregister(&amd_rng); } -- cgit v1.2.3-58-ga151 From 1c335d4487c8988364b68c862324fbb3567d00ee Mon Sep 17 00:00:00 2001 From: Corentin LABBE Date: Fri, 26 Aug 2016 13:11:30 +0200 Subject: hwrng: amd - use the BIT macro This patch add usage of the BIT() macro Signed-off-by: LABBE Corentin Signed-off-by: Herbert Xu --- drivers/char/hw_random/amd-rng.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'drivers') diff --git a/drivers/char/hw_random/amd-rng.c b/drivers/char/hw_random/amd-rng.c index 45b79654e071..d0042f5d857d 100644 --- a/drivers/char/hw_random/amd-rng.c +++ b/drivers/char/hw_random/amd-rng.c @@ -78,11 +78,11 @@ static int amd_rng_init(struct hwrng *rng) u8 rnen; pci_read_config_byte(amd_pdev, 0x40, &rnen); - rnen |= (1 << 7); /* RNG on */ + rnen |= BIT(7); /* RNG on */ pci_write_config_byte(amd_pdev, 0x40, rnen); pci_read_config_byte(amd_pdev, 0x41, &rnen); - rnen |= (1 << 7); /* PMIO enable */ + rnen |= BIT(7); /* PMIO enable */ pci_write_config_byte(amd_pdev, 0x41, rnen); return 0; @@ -93,7 +93,7 @@ static void amd_rng_cleanup(struct hwrng *rng) u8 rnen; pci_read_config_byte(amd_pdev, 0x40, &rnen); - rnen &= ~(1 << 7); /* RNG off */ + rnen &= ~BIT(7); /* RNG off */ pci_write_config_byte(amd_pdev, 0x40, rnen); } -- cgit v1.2.3-58-ga151 From f8169bfb4955ebb669b49d5d2564b250cdd7aa7b Mon Sep 17 00:00:00 2001 From: Corentin LABBE Date: Fri, 26 Aug 2016 13:11:31 +0200 Subject: hwrng: amd - Be consitent with the driver name The driver name is displayed each time differently. This patch make use of the same name everywhere. Signed-off-by: LABBE Corentin Signed-off-by: Herbert Xu --- drivers/char/hw_random/amd-rng.c | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) (limited to 'drivers') diff --git a/drivers/char/hw_random/amd-rng.c b/drivers/char/hw_random/amd-rng.c index d0042f5d857d..93157afa51eb 100644 --- a/drivers/char/hw_random/amd-rng.c +++ b/drivers/char/hw_random/amd-rng.c @@ -31,7 +31,7 @@ #include #include -#define PFX KBUILD_MODNAME ": " +#define DRV_NAME "AMD768-HWRNG" /* * Data for PCI driver interface @@ -128,8 +128,8 @@ found: pmbase &= 0x0000FF00; if (pmbase == 0) goto out; - if (!request_region(pmbase + 0xF0, 8, "AMD HWRNG")) { - dev_err(&pdev->dev, "AMD HWRNG region 0x%x already in use!\n", + if (!request_region(pmbase + 0xF0, 8, DRV_NAME)) { + dev_err(&pdev->dev, DRV_NAME " region 0x%x already in use!\n", pmbase + 0xF0); err = -EBUSY; goto out; @@ -137,11 +137,10 @@ found: amd_rng.priv = (unsigned long)pmbase; amd_pdev = pdev; - pr_info("AMD768 RNG detected\n"); + pr_info(DRV_NAME " detected\n"); err = hwrng_register(&amd_rng); if (err) { - pr_err(PFX "RNG registering failed (%d)\n", - err); + pr_err(DRV_NAME " registering failed (%d)\n", err); release_region(pmbase + 0xF0, 8); goto out; } -- cgit v1.2.3-58-ga151 From 055ae890388ad2b02a53d5c3e1f60f976989e34e Mon Sep 17 00:00:00 2001 From: Corentin LABBE Date: Fri, 26 Aug 2016 13:11:32 +0200 Subject: hwrng: amd - Remove asm/io.h checkpatch complains about used instead of linux/io.h. In fact it is not needed. This patch remove it, and in the process, alphabetize the other headers. Signed-off-by: LABBE Corentin Signed-off-by: Herbert Xu --- drivers/char/hw_random/amd-rng.c | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) (limited to 'drivers') diff --git a/drivers/char/hw_random/amd-rng.c b/drivers/char/hw_random/amd-rng.c index 93157afa51eb..de82fe3643a5 100644 --- a/drivers/char/hw_random/amd-rng.c +++ b/drivers/char/hw_random/amd-rng.c @@ -24,12 +24,11 @@ * warranty of any kind, whether express or implied. */ -#include +#include +#include #include +#include #include -#include -#include -#include #define DRV_NAME "AMD768-HWRNG" -- cgit v1.2.3-58-ga151 From fdec60d683568c8e79457c106fa6642ca7ae95f3 Mon Sep 17 00:00:00 2001 From: Corentin LABBE Date: Fri, 26 Aug 2016 13:11:33 +0200 Subject: hwrng: amd - release_region must be called after hwrng_unregister The driver release the memory region before being sure that nobody use it. This patch made hwrng_unregister ran before any release was done. Signed-off-by: LABBE Corentin Signed-off-by: Herbert Xu --- drivers/char/hw_random/amd-rng.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/char/hw_random/amd-rng.c b/drivers/char/hw_random/amd-rng.c index de82fe3643a5..383e1974234d 100644 --- a/drivers/char/hw_random/amd-rng.c +++ b/drivers/char/hw_random/amd-rng.c @@ -151,8 +151,9 @@ static void __exit mod_exit(void) { u32 pmbase = (unsigned long)amd_rng.priv; - release_region(pmbase + 0xF0, 8); hwrng_unregister(&amd_rng); + + release_region(pmbase + 0xF0, 8); } module_init(mod_init); -- cgit v1.2.3-58-ga151 From 7bad2cc062b343575f19d5de6be3f297a1429f37 Mon Sep 17 00:00:00 2001 From: Corentin LABBE Date: Fri, 26 Aug 2016 13:11:34 +0200 Subject: hwrng: amd - Replace global variable with private struct Instead of having two global variable, it's better to use a private struct. This will permit to remove amd_pdev variable Signed-off-by: LABBE Corentin Signed-off-by: Herbert Xu --- drivers/char/hw_random/amd-rng.c | 57 ++++++++++++++++++++++++++-------------- 1 file changed, 38 insertions(+), 19 deletions(-) (limited to 'drivers') diff --git a/drivers/char/hw_random/amd-rng.c b/drivers/char/hw_random/amd-rng.c index 383e1974234d..4ef94e9b9fe7 100644 --- a/drivers/char/hw_random/amd-rng.c +++ b/drivers/char/hw_random/amd-rng.c @@ -47,15 +47,18 @@ static const struct pci_device_id pci_tbl[] = { }; MODULE_DEVICE_TABLE(pci, pci_tbl); -static struct pci_dev *amd_pdev; +struct amd768_priv { + struct pci_dev *pcidev; + u32 pmbase; +}; static int amd_rng_data_present(struct hwrng *rng, int wait) { - u32 pmbase = (u32)rng->priv; + struct amd768_priv *priv = (struct amd768_priv *)rng->priv; int data, i; for (i = 0; i < 20; i++) { - data = !!(inl(pmbase + 0xF4) & 1); + data = !!(inl(priv->pmbase + 0xF4) & 1); if (data || !wait) break; udelay(10); @@ -65,35 +68,37 @@ static int amd_rng_data_present(struct hwrng *rng, int wait) static int amd_rng_data_read(struct hwrng *rng, u32 *data) { - u32 pmbase = (u32)rng->priv; + struct amd768_priv *priv = (struct amd768_priv *)rng->priv; - *data = inl(pmbase + 0xF0); + *data = inl(priv->pmbase + 0xF0); return 4; } static int amd_rng_init(struct hwrng *rng) { + struct amd768_priv *priv = (struct amd768_priv *)rng->priv; u8 rnen; - pci_read_config_byte(amd_pdev, 0x40, &rnen); + pci_read_config_byte(priv->pcidev, 0x40, &rnen); rnen |= BIT(7); /* RNG on */ - pci_write_config_byte(amd_pdev, 0x40, rnen); + pci_write_config_byte(priv->pcidev, 0x40, rnen); - pci_read_config_byte(amd_pdev, 0x41, &rnen); + pci_read_config_byte(priv->pcidev, 0x41, &rnen); rnen |= BIT(7); /* PMIO enable */ - pci_write_config_byte(amd_pdev, 0x41, rnen); + pci_write_config_byte(priv->pcidev, 0x41, rnen); return 0; } static void amd_rng_cleanup(struct hwrng *rng) { + struct amd768_priv *priv = (struct amd768_priv *)rng->priv; u8 rnen; - pci_read_config_byte(amd_pdev, 0x40, &rnen); + pci_read_config_byte(priv->pcidev, 0x40, &rnen); rnen &= ~BIT(7); /* RNG off */ - pci_write_config_byte(amd_pdev, 0x40, rnen); + pci_write_config_byte(priv->pcidev, 0x40, rnen); } static struct hwrng amd_rng = { @@ -110,6 +115,7 @@ static int __init mod_init(void) struct pci_dev *pdev = NULL; const struct pci_device_id *ent; u32 pmbase; + struct amd768_priv *priv; for_each_pci_dev(pdev) { ent = pci_match_id(pci_tbl, pdev); @@ -117,24 +123,30 @@ static int __init mod_init(void) goto found; } /* Device not found. */ - goto out; + return -ENODEV; found: err = pci_read_config_dword(pdev, 0x58, &pmbase); if (err) - goto out; - err = -EIO; + return err; + pmbase &= 0x0000FF00; if (pmbase == 0) - goto out; + return -EIO; + + priv = kzalloc(sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + if (!request_region(pmbase + 0xF0, 8, DRV_NAME)) { dev_err(&pdev->dev, DRV_NAME " region 0x%x already in use!\n", pmbase + 0xF0); err = -EBUSY; goto out; } - amd_rng.priv = (unsigned long)pmbase; - amd_pdev = pdev; + amd_rng.priv = (unsigned long)priv; + priv->pmbase = pmbase; + priv->pcidev = pdev; pr_info(DRV_NAME " detected\n"); err = hwrng_register(&amd_rng); @@ -143,17 +155,24 @@ found: release_region(pmbase + 0xF0, 8); goto out; } + return 0; + out: + kfree(priv); return err; } static void __exit mod_exit(void) { - u32 pmbase = (unsigned long)amd_rng.priv; + struct amd768_priv *priv; + + priv = (struct amd768_priv *)amd_rng.priv; hwrng_unregister(&amd_rng); - release_region(pmbase + 0xF0, 8); + release_region(priv->pmbase + 0xF0, 8); + + kfree(priv); } module_init(mod_init); -- cgit v1.2.3-58-ga151 From 3c343a37644c5fe07960c3226a463d98627607f0 Mon Sep 17 00:00:00 2001 From: Corentin LABBE Date: Fri, 26 Aug 2016 13:11:35 +0200 Subject: hwrng: amd - Access hardware via ioread32/iowrite32 Instead of accessing hw directly via pmbase, it's better to access after ioport_map() via ioread32/iowrite32. Signed-off-by: LABBE Corentin Signed-off-by: Herbert Xu --- drivers/char/hw_random/amd-rng.c | 31 +++++++++++++++++++++++++------ 1 file changed, 25 insertions(+), 6 deletions(-) (limited to 'drivers') diff --git a/drivers/char/hw_random/amd-rng.c b/drivers/char/hw_random/amd-rng.c index 4ef94e9b9fe7..bfa14b9a3a30 100644 --- a/drivers/char/hw_random/amd-rng.c +++ b/drivers/char/hw_random/amd-rng.c @@ -32,6 +32,11 @@ #define DRV_NAME "AMD768-HWRNG" +#define RNGDATA 0x00 +#define RNGDONE 0x04 +#define PMBASE_OFFSET 0xF0 +#define PMBASE_SIZE 8 + /* * Data for PCI driver interface * @@ -48,6 +53,7 @@ static const struct pci_device_id pci_tbl[] = { MODULE_DEVICE_TABLE(pci, pci_tbl); struct amd768_priv { + void __iomem *iobase; struct pci_dev *pcidev; u32 pmbase; }; @@ -58,7 +64,7 @@ static int amd_rng_data_present(struct hwrng *rng, int wait) int data, i; for (i = 0; i < 20; i++) { - data = !!(inl(priv->pmbase + 0xF4) & 1); + data = !!(ioread32(priv->iobase + RNGDONE) & 1); if (data || !wait) break; udelay(10); @@ -70,7 +76,7 @@ static int amd_rng_data_read(struct hwrng *rng, u32 *data) { struct amd768_priv *priv = (struct amd768_priv *)rng->priv; - *data = inl(priv->pmbase + 0xF0); + *data = ioread32(priv->iobase + RNGDATA); return 4; } @@ -138,12 +144,20 @@ found: if (!priv) return -ENOMEM; - if (!request_region(pmbase + 0xF0, 8, DRV_NAME)) { + if (!request_region(pmbase + PMBASE_OFFSET, PMBASE_SIZE, DRV_NAME)) { dev_err(&pdev->dev, DRV_NAME " region 0x%x already in use!\n", pmbase + 0xF0); err = -EBUSY; goto out; } + + priv->iobase = ioport_map(pmbase + PMBASE_OFFSET, PMBASE_SIZE); + if (!priv->iobase) { + pr_err(DRV_NAME "Cannot map ioport\n"); + err = -EINVAL; + goto err_iomap; + } + amd_rng.priv = (unsigned long)priv; priv->pmbase = pmbase; priv->pcidev = pdev; @@ -152,11 +166,14 @@ found: err = hwrng_register(&amd_rng); if (err) { pr_err(DRV_NAME " registering failed (%d)\n", err); - release_region(pmbase + 0xF0, 8); - goto out; + goto err_hwrng; } return 0; +err_hwrng: + ioport_unmap(priv->iobase); +err_iomap: + release_region(pmbase + PMBASE_OFFSET, PMBASE_SIZE); out: kfree(priv); return err; @@ -170,7 +187,9 @@ static void __exit mod_exit(void) hwrng_unregister(&amd_rng); - release_region(priv->pmbase + 0xF0, 8); + ioport_unmap(priv->iobase); + + release_region(priv->pmbase + PMBASE_OFFSET, PMBASE_SIZE); kfree(priv); } -- cgit v1.2.3-58-ga151 From 85962d2248fae84ffaa04f09826596d20af56819 Mon Sep 17 00:00:00 2001 From: Corentin LABBE Date: Fri, 26 Aug 2016 13:11:36 +0200 Subject: hwrng: amd - Convert to new hwrng read() API This patch convert the hwrng interface used by amd768-rng to its new API by replacing data_read()/data_present() by read(). Signed-off-by: LABBE Corentin Signed-off-by: Herbert Xu --- drivers/char/hw_random/amd-rng.c | 47 ++++++++++++++++++++++++---------------- 1 file changed, 28 insertions(+), 19 deletions(-) (limited to 'drivers') diff --git a/drivers/char/hw_random/amd-rng.c b/drivers/char/hw_random/amd-rng.c index bfa14b9a3a30..9959c762da2f 100644 --- a/drivers/char/hw_random/amd-rng.c +++ b/drivers/char/hw_random/amd-rng.c @@ -58,27 +58,37 @@ struct amd768_priv { u32 pmbase; }; -static int amd_rng_data_present(struct hwrng *rng, int wait) +static int amd_rng_read(struct hwrng *rng, void *buf, size_t max, bool wait) { + u32 *data = buf; struct amd768_priv *priv = (struct amd768_priv *)rng->priv; - int data, i; - - for (i = 0; i < 20; i++) { - data = !!(ioread32(priv->iobase + RNGDONE) & 1); - if (data || !wait) - break; - udelay(10); + size_t read = 0; + /* We will wait at maximum one time per read */ + int timeout = max / 4 + 1; + + /* + * RNG data is available when RNGDONE is set to 1 + * New random numbers are generated approximately 128 microseconds + * after RNGDATA is read + */ + while (read < max) { + if (ioread32(priv->iobase + RNGDONE) == 0) { + if (wait) { + /* Delay given by datasheet */ + usleep_range(128, 196); + if (timeout-- == 0) + return read; + } else { + return 0; + } + } else { + *data = ioread32(priv->iobase + RNGDATA); + data++; + read += 4; + } } - return data; -} - -static int amd_rng_data_read(struct hwrng *rng, u32 *data) -{ - struct amd768_priv *priv = (struct amd768_priv *)rng->priv; - - *data = ioread32(priv->iobase + RNGDATA); - return 4; + return read; } static int amd_rng_init(struct hwrng *rng) @@ -111,8 +121,7 @@ static struct hwrng amd_rng = { .name = "amd", .init = amd_rng_init, .cleanup = amd_rng_cleanup, - .data_present = amd_rng_data_present, - .data_read = amd_rng_data_read, + .read = amd_rng_read, }; static int __init mod_init(void) -- cgit v1.2.3-58-ga151 From 68734bc9f58ee01f1b530394cc31d58e364125f6 Mon Sep 17 00:00:00 2001 From: PrasannaKumar Muralidharan Date: Sat, 27 Aug 2016 00:02:04 +0530 Subject: hwrng: Remove check for max less than 4 bytes HW RNG core never asks for data less than 4 bytes. The check whether max is less than 4 bytes is unnecessary. Remove the check. Signed-off-by: PrasannaKumar Muralidharan Signed-off-by: Herbert Xu --- drivers/char/hw_random/meson-rng.c | 3 --- drivers/char/hw_random/st-rng.c | 3 --- 2 files changed, 6 deletions(-) (limited to 'drivers') diff --git a/drivers/char/hw_random/meson-rng.c b/drivers/char/hw_random/meson-rng.c index 0cfd81bcaeac..58bef39f7286 100644 --- a/drivers/char/hw_random/meson-rng.c +++ b/drivers/char/hw_random/meson-rng.c @@ -76,9 +76,6 @@ static int meson_rng_read(struct hwrng *rng, void *buf, size_t max, bool wait) struct meson_rng_data *data = container_of(rng, struct meson_rng_data, rng); - if (max < sizeof(u32)) - return 0; - *(u32 *)buf = readl_relaxed(data->base + RNG_DATA); return sizeof(u32); diff --git a/drivers/char/hw_random/st-rng.c b/drivers/char/hw_random/st-rng.c index 1d35363d23c5..7e8aa6b7b452 100644 --- a/drivers/char/hw_random/st-rng.c +++ b/drivers/char/hw_random/st-rng.c @@ -54,9 +54,6 @@ static int st_rng_read(struct hwrng *rng, void *data, size_t max, bool wait) u32 status; int i; - if (max < sizeof(u16)) - return -EINVAL; - /* Wait until FIFO is full - max 4uS*/ for (i = 0; i < ST_RNG_FILL_FIFO_TIMEOUT; i++) { status = readl_relaxed(ddata->base + ST_RNG_STATUS_REG); -- cgit v1.2.3-58-ga151 From 98a3c465fe987f1e0ae87a50ce980d6560233285 Mon Sep 17 00:00:00 2001 From: PrasannaKumar Muralidharan Date: Sun, 28 Aug 2016 14:19:12 +0530 Subject: hwrng: tx4939 - Use devm_hwrng_register instead of hwrng_register By using devm_hwrng_register instead of hwrng_register the .remove callback in platform_driver can be removed. This reduces a few lines in code. Signed-off-by: PrasannaKumar Muralidharan Signed-off-by: Herbert Xu --- drivers/char/hw_random/tx4939-rng.c | 11 +---------- 1 file changed, 1 insertion(+), 10 deletions(-) (limited to 'drivers') diff --git a/drivers/char/hw_random/tx4939-rng.c b/drivers/char/hw_random/tx4939-rng.c index a7b694913416..1093583b579c 100644 --- a/drivers/char/hw_random/tx4939-rng.c +++ b/drivers/char/hw_random/tx4939-rng.c @@ -144,22 +144,13 @@ static int __init tx4939_rng_probe(struct platform_device *dev) } platform_set_drvdata(dev, rngdev); - return hwrng_register(&rngdev->rng); -} - -static int __exit tx4939_rng_remove(struct platform_device *dev) -{ - struct tx4939_rng *rngdev = platform_get_drvdata(dev); - - hwrng_unregister(&rngdev->rng); - return 0; + return devm_hwrng_register(&dev->dev, &rngdev->rng); } static struct platform_driver tx4939_rng_driver = { .driver = { .name = "tx4939-rng", }, - .remove = tx4939_rng_remove, }; module_platform_driver_probe(tx4939_rng_driver, tx4939_rng_probe); -- cgit v1.2.3-58-ga151 From 93ba73fed31d0ec96489fedaeef1a1c402f9c8e4 Mon Sep 17 00:00:00 2001 From: Maksim Lukoshkov Date: Mon, 29 Aug 2016 13:28:31 +0100 Subject: crypto: qat - fix constants table DMA Copy const_tab array into DMA-able memory (accesible by qat hw). Signed-off-by: Maksim Lukoshkov Signed-off-by: Giovanni Cabiddu Signed-off-by: Herbert Xu --- drivers/crypto/qat/qat_common/adf_admin.c | 20 ++++++++++++-------- 1 file changed, 12 insertions(+), 8 deletions(-) (limited to 'drivers') diff --git a/drivers/crypto/qat/qat_common/adf_admin.c b/drivers/crypto/qat/qat_common/adf_admin.c index ce7c4626c983..3744b22f0c46 100644 --- a/drivers/crypto/qat/qat_common/adf_admin.c +++ b/drivers/crypto/qat/qat_common/adf_admin.c @@ -146,6 +146,7 @@ struct adf_admin_comms { dma_addr_t phy_addr; dma_addr_t const_tbl_addr; void *virt_addr; + void *virt_tbl_addr; void __iomem *mailbox_addr; struct mutex lock; /* protects adf_admin_comms struct */ }; @@ -251,17 +252,19 @@ int adf_init_admin_comms(struct adf_accel_dev *accel_dev) return -ENOMEM; } - admin->const_tbl_addr = dma_map_single(&GET_DEV(accel_dev), - (void *) const_tab, 1024, - DMA_TO_DEVICE); - - if (unlikely(dma_mapping_error(&GET_DEV(accel_dev), - admin->const_tbl_addr))) { + admin->virt_tbl_addr = dma_zalloc_coherent(&GET_DEV(accel_dev), + PAGE_SIZE, + &admin->const_tbl_addr, + GFP_KERNEL); + if (!admin->virt_tbl_addr) { + dev_err(&GET_DEV(accel_dev), "Failed to allocate const_tbl\n"); dma_free_coherent(&GET_DEV(accel_dev), PAGE_SIZE, admin->virt_addr, admin->phy_addr); kfree(admin); return -ENOMEM; } + + memcpy(admin->virt_tbl_addr, const_tab, sizeof(const_tab)); reg_val = (u64)admin->phy_addr; ADF_CSR_WR(csr, ADF_DH895XCC_ADMINMSGUR_OFFSET, reg_val >> 32); ADF_CSR_WR(csr, ADF_DH895XCC_ADMINMSGLR_OFFSET, reg_val); @@ -282,9 +285,10 @@ void adf_exit_admin_comms(struct adf_accel_dev *accel_dev) if (admin->virt_addr) dma_free_coherent(&GET_DEV(accel_dev), PAGE_SIZE, admin->virt_addr, admin->phy_addr); + if (admin->virt_tbl_addr) + dma_free_coherent(&GET_DEV(accel_dev), PAGE_SIZE, + admin->virt_tbl_addr, admin->const_tbl_addr); - dma_unmap_single(&GET_DEV(accel_dev), admin->const_tbl_addr, 1024, - DMA_TO_DEVICE); mutex_destroy(&admin->lock); kfree(admin); accel_dev->admin = NULL; -- cgit v1.2.3-58-ga151 From da59c51cf2201b6b242bdea320fcfe7b8d331900 Mon Sep 17 00:00:00 2001 From: Arvind Yadav Date: Mon, 29 Aug 2016 22:40:16 +0530 Subject: hwrng: bcm2835 - handle of_iomap failures Check return value of of_iomap and handle errors correctly. Signed-off-by: Arvind Yadav Acked-by: Eric Anholt Signed-off-by: Herbert Xu --- drivers/char/hw_random/bcm2835-rng.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/char/hw_random/bcm2835-rng.c b/drivers/char/hw_random/bcm2835-rng.c index af2149273fe0..574211a49549 100644 --- a/drivers/char/hw_random/bcm2835-rng.c +++ b/drivers/char/hw_random/bcm2835-rng.c @@ -92,9 +92,10 @@ static int bcm2835_rng_probe(struct platform_device *pdev) bcm2835_rng_ops.priv = (unsigned long)rng_base; rng_id = of_match_node(bcm2835_rng_of_match, np); - if (!rng_id) + if (!rng_id) { + iounmap(rng_base); return -EINVAL; - + } /* Check for rng init function, execute it */ rng_setup = rng_id->data; if (rng_setup) -- cgit v1.2.3-58-ga151 From 1548a37da044aa123492638e61283a23cb4b475d Mon Sep 17 00:00:00 2001 From: Maksim Lukoshkov Date: Tue, 30 Aug 2016 18:56:00 +0100 Subject: crypto: qat - fix incorrect accelerator mask for C3X devices Fix incorrect value of ADF_C3XXX_ACCELERATORS_MASK. Signed-off-by: Maksim Lukoshkov Signed-off-by: Giovanni Cabiddu Signed-off-by: Herbert Xu --- drivers/crypto/qat/qat_c3xxx/adf_c3xxx_hw_data.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/crypto/qat/qat_c3xxx/adf_c3xxx_hw_data.h b/drivers/crypto/qat/qat_c3xxx/adf_c3xxx_hw_data.h index 2f2681d3458a..afc9a0a86747 100644 --- a/drivers/crypto/qat/qat_c3xxx/adf_c3xxx_hw_data.h +++ b/drivers/crypto/qat/qat_c3xxx/adf_c3xxx_hw_data.h @@ -55,7 +55,7 @@ #define ADF_C3XXX_MAX_ACCELERATORS 3 #define ADF_C3XXX_MAX_ACCELENGINES 6 #define ADF_C3XXX_ACCELERATORS_REG_OFFSET 16 -#define ADF_C3XXX_ACCELERATORS_MASK 0x3 +#define ADF_C3XXX_ACCELERATORS_MASK 0x7 #define ADF_C3XXX_ACCELENGINES_MASK 0x3F #define ADF_C3XXX_ETR_MAX_BANKS 16 #define ADF_C3XXX_SMIAPF0_MASK_OFFSET (0x3A000 + 0x28) -- cgit v1.2.3-58-ga151 From 2589ad84047f1dbed741b48785680b152db2e5db Mon Sep 17 00:00:00 2001 From: Corentin LABBE Date: Wed, 31 Aug 2016 14:02:57 +0200 Subject: crypto: engine - move crypto engine to its own header This patch move the whole crypto engine API to its own header crypto/engine.h. Signed-off-by: Corentin Labbe Signed-off-by: Herbert Xu --- crypto/crypto_engine.c | 1 + drivers/crypto/omap-aes.c | 1 + drivers/crypto/omap-des.c | 1 + include/crypto/algapi.h | 70 ------------------------------------ include/crypto/engine.h | 90 +++++++++++++++++++++++++++++++++++++++++++++++ 5 files changed, 93 insertions(+), 70 deletions(-) create mode 100644 include/crypto/engine.h (limited to 'drivers') diff --git a/crypto/crypto_engine.c b/crypto/crypto_engine.c index a55c82dd48ef..795b6f9412ba 100644 --- a/crypto/crypto_engine.c +++ b/crypto/crypto_engine.c @@ -14,6 +14,7 @@ #include #include +#include #include "internal.h" #define CRYPTO_ENGINE_MAX_QLEN 10 diff --git a/drivers/crypto/omap-aes.c b/drivers/crypto/omap-aes.c index 4ab53a604312..993e08ecd16f 100644 --- a/drivers/crypto/omap-aes.c +++ b/drivers/crypto/omap-aes.c @@ -36,6 +36,7 @@ #include #include #include +#include #define DST_MAXBURST 4 #define DMA_MIN (DST_MAXBURST * sizeof(u32)) diff --git a/drivers/crypto/omap-des.c b/drivers/crypto/omap-des.c index 5691434ffb2d..dc36e1c96eba 100644 --- a/drivers/crypto/omap-des.c +++ b/drivers/crypto/omap-des.c @@ -39,6 +39,7 @@ #include #include #include +#include #define DST_MAXBURST 2 diff --git a/include/crypto/algapi.h b/include/crypto/algapi.h index 8637cdfe382a..404e9558e879 100644 --- a/include/crypto/algapi.h +++ b/include/crypto/algapi.h @@ -15,7 +15,6 @@ #include #include #include -#include #include struct crypto_aead; @@ -129,75 +128,6 @@ struct ablkcipher_walk { unsigned int blocksize; }; -#define ENGINE_NAME_LEN 30 -/* - * struct crypto_engine - crypto hardware engine - * @name: the engine name - * @idling: the engine is entering idle state - * @busy: request pump is busy - * @running: the engine is on working - * @cur_req_prepared: current request is prepared - * @list: link with the global crypto engine list - * @queue_lock: spinlock to syncronise access to request queue - * @queue: the crypto queue of the engine - * @rt: whether this queue is set to run as a realtime task - * @prepare_crypt_hardware: a request will soon arrive from the queue - * so the subsystem requests the driver to prepare the hardware - * by issuing this call - * @unprepare_crypt_hardware: there are currently no more requests on the - * queue so the subsystem notifies the driver that it may relax the - * hardware by issuing this call - * @prepare_request: do some prepare if need before handle the current request - * @unprepare_request: undo any work done by prepare_message() - * @crypt_one_request: do encryption for current request - * @kworker: thread struct for request pump - * @kworker_task: pointer to task for request pump kworker thread - * @pump_requests: work struct for scheduling work to the request pump - * @priv_data: the engine private data - * @cur_req: the current request which is on processing - */ -struct crypto_engine { - char name[ENGINE_NAME_LEN]; - bool idling; - bool busy; - bool running; - bool cur_req_prepared; - - struct list_head list; - spinlock_t queue_lock; - struct crypto_queue queue; - - bool rt; - - int (*prepare_crypt_hardware)(struct crypto_engine *engine); - int (*unprepare_crypt_hardware)(struct crypto_engine *engine); - - int (*prepare_request)(struct crypto_engine *engine, - struct ablkcipher_request *req); - int (*unprepare_request)(struct crypto_engine *engine, - struct ablkcipher_request *req); - int (*crypt_one_request)(struct crypto_engine *engine, - struct ablkcipher_request *req); - - struct kthread_worker kworker; - struct task_struct *kworker_task; - struct kthread_work pump_requests; - - void *priv_data; - struct ablkcipher_request *cur_req; -}; - -int crypto_transfer_request(struct crypto_engine *engine, - struct ablkcipher_request *req, bool need_pump); -int crypto_transfer_request_to_engine(struct crypto_engine *engine, - struct ablkcipher_request *req); -void crypto_finalize_request(struct crypto_engine *engine, - struct ablkcipher_request *req, int err); -int crypto_engine_start(struct crypto_engine *engine); -int crypto_engine_stop(struct crypto_engine *engine); -struct crypto_engine *crypto_engine_alloc_init(struct device *dev, bool rt); -int crypto_engine_exit(struct crypto_engine *engine); - extern const struct crypto_type crypto_ablkcipher_type; extern const struct crypto_type crypto_blkcipher_type; diff --git a/include/crypto/engine.h b/include/crypto/engine.h new file mode 100644 index 000000000000..40899bd246ec --- /dev/null +++ b/include/crypto/engine.h @@ -0,0 +1,90 @@ +/* + * Crypto engine API + * + * Copyright (c) 2016 Baolin Wang + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + */ +#ifndef _CRYPTO_ENGINE_H +#define _CRYPTO_ENGINE_H + +#include +#include +#include +#include +#include + +#define ENGINE_NAME_LEN 30 +/* + * struct crypto_engine - crypto hardware engine + * @name: the engine name + * @idling: the engine is entering idle state + * @busy: request pump is busy + * @running: the engine is on working + * @cur_req_prepared: current request is prepared + * @list: link with the global crypto engine list + * @queue_lock: spinlock to syncronise access to request queue + * @queue: the crypto queue of the engine + * @rt: whether this queue is set to run as a realtime task + * @prepare_crypt_hardware: a request will soon arrive from the queue + * so the subsystem requests the driver to prepare the hardware + * by issuing this call + * @unprepare_crypt_hardware: there are currently no more requests on the + * queue so the subsystem notifies the driver that it may relax the + * hardware by issuing this call + * @prepare_request: do some prepare if need before handle the current request + * @unprepare_request: undo any work done by prepare_message() + * @crypt_one_request: do encryption for current request + * @kworker: thread struct for request pump + * @kworker_task: pointer to task for request pump kworker thread + * @pump_requests: work struct for scheduling work to the request pump + * @priv_data: the engine private data + * @cur_req: the current request which is on processing + */ +struct crypto_engine { + char name[ENGINE_NAME_LEN]; + bool idling; + bool busy; + bool running; + bool cur_req_prepared; + + struct list_head list; + spinlock_t queue_lock; + struct crypto_queue queue; + + bool rt; + + int (*prepare_crypt_hardware)(struct crypto_engine *engine); + int (*unprepare_crypt_hardware)(struct crypto_engine *engine); + + int (*prepare_request)(struct crypto_engine *engine, + struct ablkcipher_request *req); + int (*unprepare_request)(struct crypto_engine *engine, + struct ablkcipher_request *req); + int (*crypt_one_request)(struct crypto_engine *engine, + struct ablkcipher_request *req); + + struct kthread_worker kworker; + struct task_struct *kworker_task; + struct kthread_work pump_requests; + + void *priv_data; + struct ablkcipher_request *cur_req; +}; + +int crypto_transfer_request(struct crypto_engine *engine, + struct ablkcipher_request *req, bool need_pump); +int crypto_transfer_request_to_engine(struct crypto_engine *engine, + struct ablkcipher_request *req); +void crypto_finalize_request(struct crypto_engine *engine, + struct ablkcipher_request *req, int err); +int crypto_engine_start(struct crypto_engine *engine); +int crypto_engine_stop(struct crypto_engine *engine); +struct crypto_engine *crypto_engine_alloc_init(struct device *dev, bool rt); +int crypto_engine_exit(struct crypto_engine *engine); + +#endif /* _CRYPTO_ENGINE_H */ -- cgit v1.2.3-58-ga151 From 4cba7cf025f35599f8de3282c8a7278ecc43eea4 Mon Sep 17 00:00:00 2001 From: Corentin LABBE Date: Wed, 31 Aug 2016 14:02:58 +0200 Subject: crypto: engine - permit to enqueue ashash_request The current crypto engine allow only ablkcipher_request to be enqueued. Thus denying any use of it for hardware that also handle hash algo. This patch modify the API for allowing to enqueue ciphers and hash. Since omap-aes/omap-des are the only users, this patch also convert them to the new cryptoengine API. Signed-off-by: Corentin Labbe Signed-off-by: Herbert Xu --- crypto/crypto_engine.c | 186 ++++++++++++++++++++++++++++++++++++---------- drivers/crypto/omap-aes.c | 8 +- drivers/crypto/omap-des.c | 8 +- include/crypto/engine.h | 49 ++++++++---- 4 files changed, 189 insertions(+), 62 deletions(-) (limited to 'drivers') diff --git a/crypto/crypto_engine.c b/crypto/crypto_engine.c index 795b6f9412ba..bfb92ace2c91 100644 --- a/crypto/crypto_engine.c +++ b/crypto/crypto_engine.c @@ -15,13 +15,11 @@ #include #include #include +#include #include "internal.h" #define CRYPTO_ENGINE_MAX_QLEN 10 -void crypto_finalize_request(struct crypto_engine *engine, - struct ablkcipher_request *req, int err); - /** * crypto_pump_requests - dequeue one request from engine queue to process * @engine: the hardware engine @@ -35,10 +33,11 @@ static void crypto_pump_requests(struct crypto_engine *engine, bool in_kthread) { struct crypto_async_request *async_req, *backlog; - struct ablkcipher_request *req; + struct ahash_request *hreq; + struct ablkcipher_request *breq; unsigned long flags; bool was_busy = false; - int ret; + int ret, rtype; spin_lock_irqsave(&engine->queue_lock, flags); @@ -83,9 +82,7 @@ static void crypto_pump_requests(struct crypto_engine *engine, if (!async_req) goto out; - req = ablkcipher_request_cast(async_req); - - engine->cur_req = req; + engine->cur_req = async_req; if (backlog) backlog->complete(backlog, -EINPROGRESS); @@ -96,6 +93,7 @@ static void crypto_pump_requests(struct crypto_engine *engine, spin_unlock_irqrestore(&engine->queue_lock, flags); + rtype = crypto_tfm_alg_type(engine->cur_req->tfm); /* Until here we get the request need to be encrypted successfully */ if (!was_busy && engine->prepare_crypt_hardware) { ret = engine->prepare_crypt_hardware(engine); @@ -105,24 +103,55 @@ static void crypto_pump_requests(struct crypto_engine *engine, } } - if (engine->prepare_request) { - ret = engine->prepare_request(engine, engine->cur_req); + switch (rtype) { + case CRYPTO_ALG_TYPE_AHASH: + hreq = ahash_request_cast(engine->cur_req); + if (engine->prepare_hash_request) { + ret = engine->prepare_hash_request(engine, hreq); + if (ret) { + pr_err("failed to prepare request: %d\n", ret); + goto req_err; + } + engine->cur_req_prepared = true; + } + ret = engine->hash_one_request(engine, hreq); if (ret) { - pr_err("failed to prepare request: %d\n", ret); + pr_err("failed to hash one request from queue\n"); goto req_err; } - engine->cur_req_prepared = true; - } - - ret = engine->crypt_one_request(engine, engine->cur_req); - if (ret) { - pr_err("failed to crypt one request from queue\n"); - goto req_err; + return; + case CRYPTO_ALG_TYPE_ABLKCIPHER: + breq = ablkcipher_request_cast(engine->cur_req); + if (engine->prepare_cipher_request) { + ret = engine->prepare_cipher_request(engine, breq); + if (ret) { + pr_err("failed to prepare request: %d\n", ret); + goto req_err; + } + engine->cur_req_prepared = true; + } + ret = engine->cipher_one_request(engine, breq); + if (ret) { + pr_err("failed to cipher one request from queue\n"); + goto req_err; + } + return; + default: + pr_err("failed to prepare request of unknown type\n"); + return; } - return; req_err: - crypto_finalize_request(engine, engine->cur_req, ret); + switch (rtype) { + case CRYPTO_ALG_TYPE_AHASH: + hreq = ahash_request_cast(engine->cur_req); + crypto_finalize_hash_request(engine, hreq, ret); + break; + case CRYPTO_ALG_TYPE_ABLKCIPHER: + breq = ablkcipher_request_cast(engine->cur_req); + crypto_finalize_cipher_request(engine, breq, ret); + break; + } return; out: @@ -138,12 +167,14 @@ static void crypto_pump_work(struct kthread_work *work) } /** - * crypto_transfer_request - transfer the new request into the engine queue + * crypto_transfer_cipher_request - transfer the new request into the + * enginequeue * @engine: the hardware engine * @req: the request need to be listed into the engine queue */ -int crypto_transfer_request(struct crypto_engine *engine, - struct ablkcipher_request *req, bool need_pump) +int crypto_transfer_cipher_request(struct crypto_engine *engine, + struct ablkcipher_request *req, + bool need_pump) { unsigned long flags; int ret; @@ -163,46 +194,125 @@ int crypto_transfer_request(struct crypto_engine *engine, spin_unlock_irqrestore(&engine->queue_lock, flags); return ret; } -EXPORT_SYMBOL_GPL(crypto_transfer_request); +EXPORT_SYMBOL_GPL(crypto_transfer_cipher_request); + +/** + * crypto_transfer_cipher_request_to_engine - transfer one request to list + * into the engine queue + * @engine: the hardware engine + * @req: the request need to be listed into the engine queue + */ +int crypto_transfer_cipher_request_to_engine(struct crypto_engine *engine, + struct ablkcipher_request *req) +{ + return crypto_transfer_cipher_request(engine, req, true); +} +EXPORT_SYMBOL_GPL(crypto_transfer_cipher_request_to_engine); + +/** + * crypto_transfer_hash_request - transfer the new request into the + * enginequeue + * @engine: the hardware engine + * @req: the request need to be listed into the engine queue + */ +int crypto_transfer_hash_request(struct crypto_engine *engine, + struct ahash_request *req, bool need_pump) +{ + unsigned long flags; + int ret; + + spin_lock_irqsave(&engine->queue_lock, flags); + + if (!engine->running) { + spin_unlock_irqrestore(&engine->queue_lock, flags); + return -ESHUTDOWN; + } + + ret = ahash_enqueue_request(&engine->queue, req); + + if (!engine->busy && need_pump) + queue_kthread_work(&engine->kworker, &engine->pump_requests); + + spin_unlock_irqrestore(&engine->queue_lock, flags); + return ret; +} +EXPORT_SYMBOL_GPL(crypto_transfer_hash_request); /** - * crypto_transfer_request_to_engine - transfer one request to list into the - * engine queue + * crypto_transfer_hash_request_to_engine - transfer one request to list + * into the engine queue * @engine: the hardware engine * @req: the request need to be listed into the engine queue */ -int crypto_transfer_request_to_engine(struct crypto_engine *engine, - struct ablkcipher_request *req) +int crypto_transfer_hash_request_to_engine(struct crypto_engine *engine, + struct ahash_request *req) { - return crypto_transfer_request(engine, req, true); + return crypto_transfer_hash_request(engine, req, true); } -EXPORT_SYMBOL_GPL(crypto_transfer_request_to_engine); +EXPORT_SYMBOL_GPL(crypto_transfer_hash_request_to_engine); /** - * crypto_finalize_request - finalize one request if the request is done + * crypto_finalize_cipher_request - finalize one request if the request is done * @engine: the hardware engine * @req: the request need to be finalized * @err: error number */ -void crypto_finalize_request(struct crypto_engine *engine, - struct ablkcipher_request *req, int err) +void crypto_finalize_cipher_request(struct crypto_engine *engine, + struct ablkcipher_request *req, int err) { unsigned long flags; bool finalize_cur_req = false; int ret; spin_lock_irqsave(&engine->queue_lock, flags); - if (engine->cur_req == req) + if (engine->cur_req == &req->base) finalize_cur_req = true; spin_unlock_irqrestore(&engine->queue_lock, flags); if (finalize_cur_req) { - if (engine->cur_req_prepared && engine->unprepare_request) { - ret = engine->unprepare_request(engine, req); + if (engine->cur_req_prepared && + engine->unprepare_cipher_request) { + ret = engine->unprepare_cipher_request(engine, req); if (ret) pr_err("failed to unprepare request\n"); } + spin_lock_irqsave(&engine->queue_lock, flags); + engine->cur_req = NULL; + engine->cur_req_prepared = false; + spin_unlock_irqrestore(&engine->queue_lock, flags); + } + + req->base.complete(&req->base, err); + queue_kthread_work(&engine->kworker, &engine->pump_requests); +} +EXPORT_SYMBOL_GPL(crypto_finalize_cipher_request); + +/** + * crypto_finalize_hash_request - finalize one request if the request is done + * @engine: the hardware engine + * @req: the request need to be finalized + * @err: error number + */ +void crypto_finalize_hash_request(struct crypto_engine *engine, + struct ahash_request *req, int err) +{ + unsigned long flags; + bool finalize_cur_req = false; + int ret; + + spin_lock_irqsave(&engine->queue_lock, flags); + if (engine->cur_req == &req->base) + finalize_cur_req = true; + spin_unlock_irqrestore(&engine->queue_lock, flags); + + if (finalize_cur_req) { + if (engine->cur_req_prepared && + engine->unprepare_hash_request) { + ret = engine->unprepare_hash_request(engine, req); + if (ret) + pr_err("failed to unprepare request\n"); + } spin_lock_irqsave(&engine->queue_lock, flags); engine->cur_req = NULL; engine->cur_req_prepared = false; @@ -213,7 +323,7 @@ void crypto_finalize_request(struct crypto_engine *engine, queue_kthread_work(&engine->kworker, &engine->pump_requests); } -EXPORT_SYMBOL_GPL(crypto_finalize_request); +EXPORT_SYMBOL_GPL(crypto_finalize_hash_request); /** * crypto_engine_start - start the hardware engine @@ -250,7 +360,7 @@ EXPORT_SYMBOL_GPL(crypto_engine_start); int crypto_engine_stop(struct crypto_engine *engine) { unsigned long flags; - unsigned limit = 500; + unsigned int limit = 500; int ret = 0; spin_lock_irqsave(&engine->queue_lock, flags); diff --git a/drivers/crypto/omap-aes.c b/drivers/crypto/omap-aes.c index 993e08ecd16f..3483ab66b1ca 100644 --- a/drivers/crypto/omap-aes.c +++ b/drivers/crypto/omap-aes.c @@ -520,7 +520,7 @@ static void omap_aes_finish_req(struct omap_aes_dev *dd, int err) pr_debug("err: %d\n", err); - crypto_finalize_request(dd->engine, req, err); + crypto_finalize_cipher_request(dd->engine, req, err); } static int omap_aes_crypt_dma_stop(struct omap_aes_dev *dd) @@ -593,7 +593,7 @@ static int omap_aes_handle_queue(struct omap_aes_dev *dd, struct ablkcipher_request *req) { if (req) - return crypto_transfer_request_to_engine(dd->engine, req); + return crypto_transfer_cipher_request_to_engine(dd->engine, req); return 0; } @@ -1209,8 +1209,8 @@ static int omap_aes_probe(struct platform_device *pdev) if (!dd->engine) goto err_algs; - dd->engine->prepare_request = omap_aes_prepare_req; - dd->engine->crypt_one_request = omap_aes_crypt_req; + dd->engine->prepare_cipher_request = omap_aes_prepare_req; + dd->engine->cipher_one_request = omap_aes_crypt_req; err = crypto_engine_start(dd->engine); if (err) goto err_engine; diff --git a/drivers/crypto/omap-des.c b/drivers/crypto/omap-des.c index dc36e1c96eba..c0a28b1c66e4 100644 --- a/drivers/crypto/omap-des.c +++ b/drivers/crypto/omap-des.c @@ -507,7 +507,7 @@ static void omap_des_finish_req(struct omap_des_dev *dd, int err) pr_debug("err: %d\n", err); pm_runtime_put(dd->dev); - crypto_finalize_request(dd->engine, req, err); + crypto_finalize_cipher_request(dd->engine, req, err); } static int omap_des_crypt_dma_stop(struct omap_des_dev *dd) @@ -575,7 +575,7 @@ static int omap_des_handle_queue(struct omap_des_dev *dd, struct ablkcipher_request *req) { if (req) - return crypto_transfer_request_to_engine(dd->engine, req); + return crypto_transfer_cipher_request_to_engine(dd->engine, req); return 0; } @@ -1099,8 +1099,8 @@ static int omap_des_probe(struct platform_device *pdev) if (!dd->engine) goto err_algs; - dd->engine->prepare_request = omap_des_prepare_req; - dd->engine->crypt_one_request = omap_des_crypt_req; + dd->engine->prepare_cipher_request = omap_des_prepare_req; + dd->engine->cipher_one_request = omap_des_crypt_req; err = crypto_engine_start(dd->engine); if (err) goto err_engine; diff --git a/include/crypto/engine.h b/include/crypto/engine.h index 40899bd246ec..04eb5c77addd 100644 --- a/include/crypto/engine.h +++ b/include/crypto/engine.h @@ -17,6 +17,7 @@ #include #include #include +#include #define ENGINE_NAME_LEN 30 /* @@ -36,9 +37,12 @@ * @unprepare_crypt_hardware: there are currently no more requests on the * queue so the subsystem notifies the driver that it may relax the * hardware by issuing this call - * @prepare_request: do some prepare if need before handle the current request - * @unprepare_request: undo any work done by prepare_message() - * @crypt_one_request: do encryption for current request + * @prepare_cipher_request: do some prepare if need before handle the current request + * @unprepare_cipher_request: undo any work done by prepare_cipher_request() + * @cipher_one_request: do encryption for current request + * @prepare_hash_request: do some prepare if need before handle the current request + * @unprepare_hash_request: undo any work done by prepare_hash_request() + * @hash_one_request: do hash for current request * @kworker: thread struct for request pump * @kworker_task: pointer to task for request pump kworker thread * @pump_requests: work struct for scheduling work to the request pump @@ -61,27 +65,40 @@ struct crypto_engine { int (*prepare_crypt_hardware)(struct crypto_engine *engine); int (*unprepare_crypt_hardware)(struct crypto_engine *engine); - int (*prepare_request)(struct crypto_engine *engine, - struct ablkcipher_request *req); - int (*unprepare_request)(struct crypto_engine *engine, - struct ablkcipher_request *req); - int (*crypt_one_request)(struct crypto_engine *engine, - struct ablkcipher_request *req); + int (*prepare_cipher_request)(struct crypto_engine *engine, + struct ablkcipher_request *req); + int (*unprepare_cipher_request)(struct crypto_engine *engine, + struct ablkcipher_request *req); + int (*prepare_hash_request)(struct crypto_engine *engine, + struct ahash_request *req); + int (*unprepare_hash_request)(struct crypto_engine *engine, + struct ahash_request *req); + int (*cipher_one_request)(struct crypto_engine *engine, + struct ablkcipher_request *req); + int (*hash_one_request)(struct crypto_engine *engine, + struct ahash_request *req); struct kthread_worker kworker; struct task_struct *kworker_task; struct kthread_work pump_requests; void *priv_data; - struct ablkcipher_request *cur_req; + struct crypto_async_request *cur_req; }; -int crypto_transfer_request(struct crypto_engine *engine, - struct ablkcipher_request *req, bool need_pump); -int crypto_transfer_request_to_engine(struct crypto_engine *engine, - struct ablkcipher_request *req); -void crypto_finalize_request(struct crypto_engine *engine, - struct ablkcipher_request *req, int err); +int crypto_transfer_cipher_request(struct crypto_engine *engine, + struct ablkcipher_request *req, + bool need_pump); +int crypto_transfer_cipher_request_to_engine(struct crypto_engine *engine, + struct ablkcipher_request *req); +int crypto_transfer_hash_request(struct crypto_engine *engine, + struct ahash_request *req, bool need_pump); +int crypto_transfer_hash_request_to_engine(struct crypto_engine *engine, + struct ahash_request *req); +void crypto_finalize_cipher_request(struct crypto_engine *engine, + struct ablkcipher_request *req, int err); +void crypto_finalize_hash_request(struct crypto_engine *engine, + struct ahash_request *req, int err); int crypto_engine_start(struct crypto_engine *engine); int crypto_engine_stop(struct crypto_engine *engine); struct crypto_engine *crypto_engine_alloc_init(struct device *dev, bool rt); -- cgit v1.2.3-58-ga151 From 5ba1c7b5ffc16ecf245f41805e46f7351c115e57 Mon Sep 17 00:00:00 2001 From: Catalin Vasile Date: Wed, 31 Aug 2016 15:57:55 +0300 Subject: crypto: caam - fix rfc3686(ctr(aes)) IV load MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit -nonce is being loaded using append_load_imm_u32() instead of append_load_as_imm() (nonce is a byte array / stream, not a 4-byte variable) -counter is not being added in big endian format, as mandatated by RFC3686 and expected by the crypto engine Signed-off-by: Catalin Vasile Reviewed-by: Horia Geantă Signed-off-by: Herbert Xu --- drivers/crypto/caam/caamalg.c | 82 +++++++++++++++++++-------------------- drivers/crypto/caam/desc_constr.h | 17 ++++++++ 2 files changed, 57 insertions(+), 42 deletions(-) (limited to 'drivers') diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c index 6dc597126b79..f1116e7f7cd5 100644 --- a/drivers/crypto/caam/caamalg.c +++ b/drivers/crypto/caam/caamalg.c @@ -227,8 +227,9 @@ static void append_key_aead(u32 *desc, struct caam_ctx *ctx, if (is_rfc3686) { nonce = (u32 *)((void *)ctx->key + ctx->split_key_pad_len + enckeylen); - append_load_imm_u32(desc, *nonce, LDST_CLASS_IND_CCB | - LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM); + append_load_as_imm(desc, nonce, CTR_RFC3686_NONCE_SIZE, + LDST_CLASS_IND_CCB | + LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM); append_move(desc, MOVE_SRC_OUTFIFO | MOVE_DEST_CLASS1CTX | @@ -500,11 +501,10 @@ static int aead_set_sh_desc(struct crypto_aead *aead) /* Load Counter into CONTEXT1 reg */ if (is_rfc3686) - append_load_imm_u32(desc, be32_to_cpu(1), LDST_IMM | - LDST_CLASS_1_CCB | - LDST_SRCDST_BYTE_CONTEXT | - ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) << - LDST_OFFSET_SHIFT)); + append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB | + LDST_SRCDST_BYTE_CONTEXT | + ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) << + LDST_OFFSET_SHIFT)); /* Class 1 operation */ append_operation(desc, ctx->class1_alg_type | @@ -567,11 +567,10 @@ skip_enc: /* Load Counter into CONTEXT1 reg */ if (is_rfc3686) - append_load_imm_u32(desc, be32_to_cpu(1), LDST_IMM | - LDST_CLASS_1_CCB | - LDST_SRCDST_BYTE_CONTEXT | - ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) << - LDST_OFFSET_SHIFT)); + append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB | + LDST_SRCDST_BYTE_CONTEXT | + ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) << + LDST_OFFSET_SHIFT)); /* Choose operation */ if (ctr_mode) @@ -672,11 +671,10 @@ copy_iv: /* Load Counter into CONTEXT1 reg */ if (is_rfc3686) - append_load_imm_u32(desc, be32_to_cpu(1), LDST_IMM | - LDST_CLASS_1_CCB | - LDST_SRCDST_BYTE_CONTEXT | - ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) << - LDST_OFFSET_SHIFT)); + append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB | + LDST_SRCDST_BYTE_CONTEXT | + ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) << + LDST_OFFSET_SHIFT)); /* Class 1 operation */ append_operation(desc, ctx->class1_alg_type | @@ -1467,7 +1465,7 @@ static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher, int ret = 0; u32 *key_jump_cmd; u32 *desc; - u32 *nonce; + u8 *nonce; u32 geniv; u32 ctx1_iv_off = 0; const bool ctr_mode = ((ctx->class1_alg_type & OP_ALG_AAI_MASK) == @@ -1520,9 +1518,10 @@ static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher, /* Load nonce into CONTEXT1 reg */ if (is_rfc3686) { - nonce = (u32 *)(key + keylen); - append_load_imm_u32(desc, *nonce, LDST_CLASS_IND_CCB | - LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM); + nonce = (u8 *)key + keylen; + append_load_as_imm(desc, nonce, CTR_RFC3686_NONCE_SIZE, + LDST_CLASS_IND_CCB | + LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM); append_move(desc, MOVE_WAITCOMP | MOVE_SRC_OUTFIFO | MOVE_DEST_CLASS1CTX | @@ -1538,11 +1537,10 @@ static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher, /* Load counter into CONTEXT1 reg */ if (is_rfc3686) - append_load_imm_u32(desc, be32_to_cpu(1), LDST_IMM | - LDST_CLASS_1_CCB | - LDST_SRCDST_BYTE_CONTEXT | - ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) << - LDST_OFFSET_SHIFT)); + append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB | + LDST_SRCDST_BYTE_CONTEXT | + ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) << + LDST_OFFSET_SHIFT)); /* Load operation */ append_operation(desc, ctx->class1_alg_type | @@ -1579,9 +1577,10 @@ static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher, /* Load nonce into CONTEXT1 reg */ if (is_rfc3686) { - nonce = (u32 *)(key + keylen); - append_load_imm_u32(desc, *nonce, LDST_CLASS_IND_CCB | - LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM); + nonce = (u8 *)key + keylen; + append_load_as_imm(desc, nonce, CTR_RFC3686_NONCE_SIZE, + LDST_CLASS_IND_CCB | + LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM); append_move(desc, MOVE_WAITCOMP | MOVE_SRC_OUTFIFO | MOVE_DEST_CLASS1CTX | @@ -1597,11 +1596,10 @@ static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher, /* Load counter into CONTEXT1 reg */ if (is_rfc3686) - append_load_imm_u32(desc, be32_to_cpu(1), LDST_IMM | - LDST_CLASS_1_CCB | - LDST_SRCDST_BYTE_CONTEXT | - ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) << - LDST_OFFSET_SHIFT)); + append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB | + LDST_SRCDST_BYTE_CONTEXT | + ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) << + LDST_OFFSET_SHIFT)); /* Choose operation */ if (ctr_mode) @@ -1642,9 +1640,10 @@ static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher, /* Load Nonce into CONTEXT1 reg */ if (is_rfc3686) { - nonce = (u32 *)(key + keylen); - append_load_imm_u32(desc, *nonce, LDST_CLASS_IND_CCB | - LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM); + nonce = (u8 *)key + keylen; + append_load_as_imm(desc, nonce, CTR_RFC3686_NONCE_SIZE, + LDST_CLASS_IND_CCB | + LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM); append_move(desc, MOVE_WAITCOMP | MOVE_SRC_OUTFIFO | MOVE_DEST_CLASS1CTX | @@ -1674,11 +1673,10 @@ static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher, /* Load Counter into CONTEXT1 reg */ if (is_rfc3686) - append_load_imm_u32(desc, (u32)1, LDST_IMM | - LDST_CLASS_1_CCB | - LDST_SRCDST_BYTE_CONTEXT | - ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) << - LDST_OFFSET_SHIFT)); + append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB | + LDST_SRCDST_BYTE_CONTEXT | + ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) << + LDST_OFFSET_SHIFT)); if (ctx1_iv_off) append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | JUMP_COND_NCP | diff --git a/drivers/crypto/caam/desc_constr.h b/drivers/crypto/caam/desc_constr.h index d3869b95e7b1..a8cd8a78ec1f 100644 --- a/drivers/crypto/caam/desc_constr.h +++ b/drivers/crypto/caam/desc_constr.h @@ -324,6 +324,23 @@ static inline void append_##cmd##_imm_##type(u32 *desc, type immediate, \ } APPEND_CMD_RAW_IMM(load, LOAD, u32); +/* + * ee - endianness + * size - size of immediate type in bytes + */ +#define APPEND_CMD_RAW_IMM2(cmd, op, ee, size) \ +static inline void append_##cmd##_imm_##ee##size(u32 *desc, \ + u##size immediate, \ + u32 options) \ +{ \ + __##ee##size data = cpu_to_##ee##size(immediate); \ + PRINT_POS; \ + append_cmd(desc, CMD_##op | IMMEDIATE | options | sizeof(data)); \ + append_data(desc, &data, sizeof(data)); \ +} + +APPEND_CMD_RAW_IMM2(load, LOAD, be, 32); + /* * Append math command. Only the last part of destination and source need to * be specified -- cgit v1.2.3-58-ga151 From ba22a1e2aa8ef7f8467f755cfe44b79784febefe Mon Sep 17 00:00:00 2001 From: Quentin Lambert Date: Fri, 2 Sep 2016 11:48:53 +0200 Subject: crypto: ccp - add missing release in ccp_dmaengine_register ccp_dmaengine_register used to return with an error code before releasing all resource. This patch adds a jump to the appropriate label ensuring that the resources are properly released before returning. This issue was found with Hector. Signed-off-by: Quentin Lambert Signed-off-by: Herbert Xu --- drivers/crypto/ccp/ccp-dmaengine.c | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/crypto/ccp/ccp-dmaengine.c b/drivers/crypto/ccp/ccp-dmaengine.c index 94f77b0f9ae7..ded26f46c735 100644 --- a/drivers/crypto/ccp/ccp-dmaengine.c +++ b/drivers/crypto/ccp/ccp-dmaengine.c @@ -650,8 +650,11 @@ int ccp_dmaengine_register(struct ccp_device *ccp) dma_desc_cache_name = devm_kasprintf(ccp->dev, GFP_KERNEL, "%s-dmaengine-desc-cache", ccp->name); - if (!dma_cmd_cache_name) - return -ENOMEM; + if (!dma_cmd_cache_name) { + ret = -ENOMEM; + goto err_cache; + } + ccp->dma_desc_cache = kmem_cache_create(dma_desc_cache_name, sizeof(struct ccp_dma_desc), sizeof(void *), -- cgit v1.2.3-58-ga151 From b4587456fea4e2e3021341c4932bd89674c14824 Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Sat, 3 Sep 2016 01:26:40 +0200 Subject: crypto: mv_cesa - remove NO_IRQ reference Drivers should not use NO_IRQ, as we are trying to get rid of that. In this case, the call to irq_of_parse_and_map() is both wrong (as it returns '0' on failure, not NO_IRQ) and unnecessary (as platform_get_irq() does the same thing) This removes the call to irq_of_parse_and_map() and checks for the error code correctly. Signed-off-by: Arnd Bergmann Acked-by: Boris Brezillon Signed-off-by: Herbert Xu --- drivers/crypto/mv_cesa.c | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) (limited to 'drivers') diff --git a/drivers/crypto/mv_cesa.c b/drivers/crypto/mv_cesa.c index e6b658faef63..104e9ce9400a 100644 --- a/drivers/crypto/mv_cesa.c +++ b/drivers/crypto/mv_cesa.c @@ -1091,11 +1091,8 @@ static int mv_probe(struct platform_device *pdev) cp->max_req_size = cp->sram_size - SRAM_CFG_SPACE; - if (pdev->dev.of_node) - irq = irq_of_parse_and_map(pdev->dev.of_node, 0); - else - irq = platform_get_irq(pdev, 0); - if (irq < 0 || irq == NO_IRQ) { + irq = platform_get_irq(pdev, 0); + if (irq < 0) { ret = irq; goto err; } -- cgit v1.2.3-58-ga151 From da28caaaf878b224c3ca89c1eb7373536cc5d556 Mon Sep 17 00:00:00 2001 From: PrasannaKumar Muralidharan Date: Sun, 4 Sep 2016 23:43:08 +0530 Subject: hwrng: pasemi - Migrate to managed API Use devm_ioremap and devm_hwrng_register instead of ioremap and hwrng_register. This removes unregistering and error handling code. Changes in v2: Remove hardcoded resource size in ioremap, use resource struct obtained by calling platform_get_resource. Removing hardcoded resource size was suggested by LABBE Corentin. CC: Darren Stevens Suggested-by: LABBE Corentin Signed-off-by: PrasannaKumar Muralidharan Signed-off-by: Herbert Xu --- drivers/char/hw_random/pasemi-rng.c | 37 +++++++------------------------------ 1 file changed, 7 insertions(+), 30 deletions(-) (limited to 'drivers') diff --git a/drivers/char/hw_random/pasemi-rng.c b/drivers/char/hw_random/pasemi-rng.c index 699b7259f5d7..b4e32f7ab6af 100644 --- a/drivers/char/hw_random/pasemi-rng.c +++ b/drivers/char/hw_random/pasemi-rng.c @@ -95,42 +95,20 @@ static struct hwrng pasemi_rng = { .data_read = pasemi_rng_data_read, }; -static int rng_probe(struct platform_device *ofdev) +static int rng_probe(struct platform_device *pdev) { void __iomem *rng_regs; - struct device_node *rng_np = ofdev->dev.of_node; - struct resource res; - int err = 0; + struct resource *res; - err = of_address_to_resource(rng_np, 0, &res); - if (err) - return -ENODEV; - - rng_regs = ioremap(res.start, 0x100); - - if (!rng_regs) - return -ENOMEM; + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + rng_regs = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(rng_regs)) + return PTR_ERR(rng_regs); pasemi_rng.priv = (unsigned long)rng_regs; pr_info("Registering PA Semi RNG\n"); - - err = hwrng_register(&pasemi_rng); - - if (err) - iounmap(rng_regs); - - return err; -} - -static int rng_remove(struct platform_device *dev) -{ - void __iomem *rng_regs = (void __iomem *)pasemi_rng.priv; - - hwrng_unregister(&pasemi_rng); - iounmap(rng_regs); - - return 0; + return devm_hwrng_register(&pdev->dev, &pasemi_rng); } static const struct of_device_id rng_match[] = { @@ -146,7 +124,6 @@ static struct platform_driver rng_driver = { .of_match_table = rng_match, }, .probe = rng_probe, - .remove = rng_remove, }; module_platform_driver(rng_driver); -- cgit v1.2.3-58-ga151 From 0bec90571cb95afee80beb98c12cd82bd9136ac6 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Mon, 5 Sep 2016 17:12:57 +0800 Subject: PCI: Fix cavium quirk compile failure with PCI_ATS off The newly added quirk_cavium_sriov_rnm_link doesn't compile if PCI_ATS is off. This patch adds a check for PCI_ATS. Fixes: 21b5b8eebbae ("PCI: quirk fixup for cavium invalid sriov...") Reported-by: kbuild test robot Signed-off-by: Herbert Xu --- drivers/pci/quirks.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c index 5980aae41d40..7060823c637e 100644 --- a/drivers/pci/quirks.c +++ b/drivers/pci/quirks.c @@ -834,7 +834,7 @@ static void quirk_amd_ioapic(struct pci_dev *dev) DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_VIPER_7410, quirk_amd_ioapic); #endif /* CONFIG_X86_IO_APIC */ -#ifdef CONFIG_ARM64 +#if defined(CONFIG_ARM64) && defined(CONFIG_PCI_ATS) static void quirk_cavium_sriov_rnm_link(struct pci_dev *dev) { -- cgit v1.2.3-58-ga151 From 4e7813a0b2aebb210416a418595ceb91ba4e5a37 Mon Sep 17 00:00:00 2001 From: Tero Kristo Date: Thu, 4 Aug 2016 13:28:36 +0300 Subject: crypto: omap-sham - avoid executing tasklet where not needed Some of the call paths of OMAP SHA driver can avoid executing the next step of the crypto queue under tasklet; instead, execute the next step directly via function call. This avoids a costly round-trip via the scheduler giving a slight performance boost. Signed-off-by: Tero Kristo Signed-off-by: Herbert Xu --- drivers/crypto/omap-sham.c | 20 +++++++++++++++----- 1 file changed, 15 insertions(+), 5 deletions(-) (limited to 'drivers') diff --git a/drivers/crypto/omap-sham.c b/drivers/crypto/omap-sham.c index 7fe4eef12fe2..fd50005fd942 100644 --- a/drivers/crypto/omap-sham.c +++ b/drivers/crypto/omap-sham.c @@ -1005,9 +1005,6 @@ static void omap_sham_finish_req(struct ahash_request *req, int err) if (req->base.complete) req->base.complete(&req->base, err); - - /* handle new request */ - tasklet_schedule(&dd->done_task); } static int omap_sham_handle_queue(struct omap_sham_dev *dd, @@ -1018,6 +1015,7 @@ static int omap_sham_handle_queue(struct omap_sham_dev *dd, unsigned long flags; int err = 0, ret = 0; +retry: spin_lock_irqsave(&dd->lock, flags); if (req) ret = ahash_enqueue_request(&dd->queue, req); @@ -1061,11 +1059,19 @@ static int omap_sham_handle_queue(struct omap_sham_dev *dd, err = omap_sham_final_req(dd); } err1: - if (err != -EINPROGRESS) + dev_dbg(dd->dev, "exit, err: %d\n", err); + + if (err != -EINPROGRESS) { /* done_task will not finish it, so do it here */ omap_sham_finish_req(req, err); + req = NULL; - dev_dbg(dd->dev, "exit, err: %d\n", err); + /* + * Execute next request immediately if there is anything + * in queue. + */ + goto retry; + } return ret; } @@ -1653,6 +1659,10 @@ finish: dev_dbg(dd->dev, "update done: err: %d\n", err); /* finish curent request */ omap_sham_finish_req(dd->req, err); + + /* If we are not busy, process next req */ + if (!test_bit(FLAGS_BUSY, &dd->flags)) + omap_sham_handle_queue(dd, NULL); } static irqreturn_t omap_sham_irq_common(struct omap_sham_dev *dd) -- cgit v1.2.3-58-ga151 From 5a793bcadbe97df7823598b429c0af052e50df1c Mon Sep 17 00:00:00 2001 From: Tero Kristo Date: Thu, 4 Aug 2016 13:28:39 +0300 Subject: crypto: omap-sham - fix software fallback handling If we have processed any data with the hardware accelerator (digcnt > 0), we must complete the entire hash by using it. This is because the current hash value can't be imported to the software fallback algorithm. Otherwise we end up with wrong hash results. Signed-off-by: Tero Kristo Signed-off-by: Herbert Xu --- drivers/crypto/omap-sham.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/crypto/omap-sham.c b/drivers/crypto/omap-sham.c index fd50005fd942..f788319f7ba7 100644 --- a/drivers/crypto/omap-sham.c +++ b/drivers/crypto/omap-sham.c @@ -1163,7 +1163,7 @@ static int omap_sham_final(struct ahash_request *req) * If buffersize is less than 240, we use fallback SW encoding, * as using DMA + HW in this case doesn't provide any benefit. */ - if ((ctx->digcnt + ctx->bufcnt) < 240) + if (!ctx->digcnt && ctx->bufcnt < 240) return omap_sham_final_shash(req); else if (ctx->bufcnt) return omap_sham_enqueue(req, OP_FINAL); -- cgit v1.2.3-58-ga151 From cb8d5c8346640f3622bc832fede4b514096f762e Mon Sep 17 00:00:00 2001 From: Tero Kristo Date: Thu, 4 Aug 2016 13:28:40 +0300 Subject: crypto: omap-sham - fix SW fallback HMAC handling for omap2/omap3 If software fallback is used on older hardware accelerator setup (OMAP2/ OMAP3), the first block of data must be purged from the buffer. The first block contains the pre-generated ipad value required by the HW, but the software fallback algorithm generates its own, causing wrong results. Signed-off-by: Tero Kristo Signed-off-by: Herbert Xu --- drivers/crypto/omap-sham.c | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/crypto/omap-sham.c b/drivers/crypto/omap-sham.c index f788319f7ba7..cf9f617cfcd7 100644 --- a/drivers/crypto/omap-sham.c +++ b/drivers/crypto/omap-sham.c @@ -1143,9 +1143,20 @@ static int omap_sham_final_shash(struct ahash_request *req) { struct omap_sham_ctx *tctx = crypto_tfm_ctx(req->base.tfm); struct omap_sham_reqctx *ctx = ahash_request_ctx(req); + int offset = 0; + + /* + * If we are running HMAC on limited hardware support, skip + * the ipad in the beginning of the buffer if we are going for + * software fallback algorithm. + */ + if (test_bit(FLAGS_HMAC, &ctx->flags) && + !test_bit(FLAGS_AUTO_XOR, &ctx->dd->flags)) + offset = get_block_size(ctx); return omap_sham_shash_digest(tctx->fallback, req->base.flags, - ctx->buffer, ctx->bufcnt, req->result); + ctx->buffer + offset, + ctx->bufcnt - offset, req->result); } static int omap_sham_final(struct ahash_request *req) -- cgit v1.2.3-58-ga151 From f303b455bb95233297c179b525a851a792e469b2 Mon Sep 17 00:00:00 2001 From: Tero Kristo Date: Thu, 4 Aug 2016 13:28:42 +0300 Subject: crypto: omap-aes - use runtime_pm autosuspend for clock handling Calling runtime PM API at the cra_init/exit is bad for power management purposes, as the lifetime for a CRA can be very long. Instead, use pm_runtime autosuspend approach for handling the device clocks. Clocks are enabled when they are actually required, and autosuspend disables these if they have not been used for a sufficiently long time period. By default, the timeout value is 1 second. Signed-off-by: Tero Kristo Signed-off-by: Herbert Xu --- drivers/crypto/omap-aes.c | 43 ++++++++++++++++--------------------------- 1 file changed, 16 insertions(+), 27 deletions(-) (limited to 'drivers') diff --git a/drivers/crypto/omap-aes.c b/drivers/crypto/omap-aes.c index 3483ab66b1ca..f2aae4a3cbcc 100644 --- a/drivers/crypto/omap-aes.c +++ b/drivers/crypto/omap-aes.c @@ -86,6 +86,8 @@ #define AES_REG_IRQ_DATA_OUT BIT(2) #define DEFAULT_TIMEOUT (5*HZ) +#define DEFAULT_AUTOSUSPEND_DELAY 1000 + #define FLAGS_MODE_MASK 0x000f #define FLAGS_ENCRYPT BIT(0) #define FLAGS_CBC BIT(1) @@ -239,11 +241,19 @@ static void omap_aes_write_n(struct omap_aes_dev *dd, u32 offset, static int omap_aes_hw_init(struct omap_aes_dev *dd) { + int err; + if (!(dd->flags & FLAGS_INIT)) { dd->flags |= FLAGS_INIT; dd->err = 0; } + err = pm_runtime_get_sync(dd->dev); + if (err < 0) { + dev_err(dd->dev, "failed to get sync: %d\n", err); + return err; + } + return 0; } @@ -521,6 +531,9 @@ static void omap_aes_finish_req(struct omap_aes_dev *dd, int err) pr_debug("err: %d\n", err); crypto_finalize_cipher_request(dd->engine, req, err); + + pm_runtime_mark_last_busy(dd->dev); + pm_runtime_put_autosuspend(dd->dev); } static int omap_aes_crypt_dma_stop(struct omap_aes_dev *dd) @@ -762,23 +775,6 @@ static int omap_aes_ctr_decrypt(struct ablkcipher_request *req) static int omap_aes_cra_init(struct crypto_tfm *tfm) { - struct omap_aes_dev *dd = NULL; - int err; - - /* Find AES device, currently picks the first device */ - spin_lock_bh(&list_lock); - list_for_each_entry(dd, &dev_list, list) { - break; - } - spin_unlock_bh(&list_lock); - - err = pm_runtime_get_sync(dd->dev); - if (err < 0) { - dev_err(dd->dev, "%s: failed to get_sync(%d)\n", - __func__, err); - return err; - } - tfm->crt_ablkcipher.reqsize = sizeof(struct omap_aes_reqctx); return 0; @@ -786,16 +782,6 @@ static int omap_aes_cra_init(struct crypto_tfm *tfm) static void omap_aes_cra_exit(struct crypto_tfm *tfm) { - struct omap_aes_dev *dd = NULL; - - /* Find AES device, currently picks the first device */ - spin_lock_bh(&list_lock); - list_for_each_entry(dd, &dev_list, list) { - break; - } - spin_unlock_bh(&list_lock); - - pm_runtime_put_sync(dd->dev); } /* ********************** ALGS ************************************ */ @@ -1141,6 +1127,9 @@ static int omap_aes_probe(struct platform_device *pdev) } dd->phys_base = res.start; + pm_runtime_use_autosuspend(dev); + pm_runtime_set_autosuspend_delay(dev, DEFAULT_AUTOSUSPEND_DELAY); + pm_runtime_enable(dev); err = pm_runtime_get_sync(dev); if (err < 0) { -- cgit v1.2.3-58-ga151 From 164f3ef301bff9741a00ff7015f9d51f1b864201 Mon Sep 17 00:00:00 2001 From: Lokesh Vutla Date: Thu, 4 Aug 2016 13:28:43 +0300 Subject: crypto: omap-aes - Add support for multiple cores Some SoCs like omap4/omap5/dra7 contain multiple AES crypto accelerator cores. Adapt the driver to support this. The driver picks the last used device from a list of AES devices. Signed-off-by: Lokesh Vutla [t-kristo@ti.com: forward ported to 4.7 kernel] Signed-off-by: Tero Kristo Signed-off-by: Herbert Xu --- drivers/crypto/omap-aes.c | 20 ++++++-------------- 1 file changed, 6 insertions(+), 14 deletions(-) (limited to 'drivers') diff --git a/drivers/crypto/omap-aes.c b/drivers/crypto/omap-aes.c index f2aae4a3cbcc..7615026709fe 100644 --- a/drivers/crypto/omap-aes.c +++ b/drivers/crypto/omap-aes.c @@ -330,20 +330,12 @@ static void omap_aes_dma_stop(struct omap_aes_dev *dd) static struct omap_aes_dev *omap_aes_find_dev(struct omap_aes_ctx *ctx) { - struct omap_aes_dev *dd = NULL, *tmp; + struct omap_aes_dev *dd; spin_lock_bh(&list_lock); - if (!ctx->dd) { - list_for_each_entry(tmp, &dev_list, list) { - /* FIXME: take fist available aes core */ - dd = tmp; - break; - } - ctx->dd = dd; - } else { - /* already found before */ - dd = ctx->dd; - } + dd = list_first_entry(&dev_list, struct omap_aes_dev, list); + list_move_tail(&dd->list, &dev_list); + ctx->dd = dd; spin_unlock_bh(&list_lock); return dd; @@ -616,7 +608,7 @@ static int omap_aes_prepare_req(struct crypto_engine *engine, { struct omap_aes_ctx *ctx = crypto_ablkcipher_ctx( crypto_ablkcipher_reqtfm(req)); - struct omap_aes_dev *dd = omap_aes_find_dev(ctx); + struct omap_aes_dev *dd = ctx->dd; struct omap_aes_reqctx *rctx; if (!dd) @@ -662,7 +654,7 @@ static int omap_aes_crypt_req(struct crypto_engine *engine, { struct omap_aes_ctx *ctx = crypto_ablkcipher_ctx( crypto_ablkcipher_reqtfm(req)); - struct omap_aes_dev *dd = omap_aes_find_dev(ctx); + struct omap_aes_dev *dd = ctx->dd; if (!dd) return -ENODEV; -- cgit v1.2.3-58-ga151 From 9fcb191a84eb5c46004b66cc449b45b4d9a2da91 Mon Sep 17 00:00:00 2001 From: Lokesh Vutla Date: Thu, 4 Aug 2016 13:28:44 +0300 Subject: crypto: omap-aes - Add fallback support As setting up the DMA operations is quite costly, add software fallback support for requests smaller than 200 bytes. This change gives some 10% extra performance in ipsec use case. Signed-off-by: Lokesh Vutla [t-kristo@ti.com: udpated against latest upstream, to use skcipher mainly] Signed-off-by: Tero Kristo Signed-off-by: Herbert Xu --- drivers/crypto/Kconfig | 3 +++ drivers/crypto/omap-aes.c | 53 +++++++++++++++++++++++++++++++++++++++++++---- 2 files changed, 52 insertions(+), 4 deletions(-) (limited to 'drivers') diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig index 1af94e2d1a25..19ee6ee60f39 100644 --- a/drivers/crypto/Kconfig +++ b/drivers/crypto/Kconfig @@ -318,6 +318,9 @@ config CRYPTO_DEV_OMAP_AES select CRYPTO_AES select CRYPTO_BLKCIPHER select CRYPTO_ENGINE + select CRYPTO_CBC + select CRYPTO_ECB + select CRYPTO_CTR help OMAP processors have AES module accelerator. Select this if you want to use the OMAP module for AES algorithms. diff --git a/drivers/crypto/omap-aes.c b/drivers/crypto/omap-aes.c index 7615026709fe..975288dc5139 100644 --- a/drivers/crypto/omap-aes.c +++ b/drivers/crypto/omap-aes.c @@ -35,8 +35,8 @@ #include #include #include -#include #include +#include #define DST_MAXBURST 4 #define DMA_MIN (DST_MAXBURST * sizeof(u32)) @@ -106,6 +106,7 @@ struct omap_aes_ctx { int keylen; u32 key[AES_KEYSIZE_256 / sizeof(u32)]; unsigned long flags; + struct crypto_skcipher *fallback; }; struct omap_aes_reqctx { @@ -702,11 +703,29 @@ static int omap_aes_crypt(struct ablkcipher_request *req, unsigned long mode) crypto_ablkcipher_reqtfm(req)); struct omap_aes_reqctx *rctx = ablkcipher_request_ctx(req); struct omap_aes_dev *dd; + int ret; pr_debug("nbytes: %d, enc: %d, cbc: %d\n", req->nbytes, !!(mode & FLAGS_ENCRYPT), !!(mode & FLAGS_CBC)); + if (req->nbytes < 200) { + SKCIPHER_REQUEST_ON_STACK(subreq, ctx->fallback); + + skcipher_request_set_tfm(subreq, ctx->fallback); + skcipher_request_set_callback(subreq, req->base.flags, NULL, + NULL); + skcipher_request_set_crypt(subreq, req->src, req->dst, + req->nbytes, req->info); + + if (mode & FLAGS_ENCRYPT) + ret = crypto_skcipher_encrypt(subreq); + else + ret = crypto_skcipher_decrypt(subreq); + + skcipher_request_zero(subreq); + return ret; + } dd = omap_aes_find_dev(ctx); if (!dd) return -ENODEV; @@ -722,6 +741,7 @@ static int omap_aes_setkey(struct crypto_ablkcipher *tfm, const u8 *key, unsigned int keylen) { struct omap_aes_ctx *ctx = crypto_ablkcipher_ctx(tfm); + int ret; if (keylen != AES_KEYSIZE_128 && keylen != AES_KEYSIZE_192 && keylen != AES_KEYSIZE_256) @@ -732,6 +752,14 @@ static int omap_aes_setkey(struct crypto_ablkcipher *tfm, const u8 *key, memcpy(ctx->key, key, keylen); ctx->keylen = keylen; + crypto_skcipher_clear_flags(ctx->fallback, CRYPTO_TFM_REQ_MASK); + crypto_skcipher_set_flags(ctx->fallback, tfm->base.crt_flags & + CRYPTO_TFM_REQ_MASK); + + ret = crypto_skcipher_setkey(ctx->fallback, key, keylen); + if (!ret) + return 0; + return 0; } @@ -767,6 +795,17 @@ static int omap_aes_ctr_decrypt(struct ablkcipher_request *req) static int omap_aes_cra_init(struct crypto_tfm *tfm) { + const char *name = crypto_tfm_alg_name(tfm); + const u32 flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK; + struct omap_aes_ctx *ctx = crypto_tfm_ctx(tfm); + struct crypto_skcipher *blk; + + blk = crypto_alloc_skcipher(name, 0, flags); + if (IS_ERR(blk)) + return PTR_ERR(blk); + + ctx->fallback = blk; + tfm->crt_ablkcipher.reqsize = sizeof(struct omap_aes_reqctx); return 0; @@ -774,6 +813,12 @@ static int omap_aes_cra_init(struct crypto_tfm *tfm) static void omap_aes_cra_exit(struct crypto_tfm *tfm) { + struct omap_aes_ctx *ctx = crypto_tfm_ctx(tfm); + + if (ctx->fallback) + crypto_free_skcipher(ctx->fallback); + + ctx->fallback = NULL; } /* ********************** ALGS ************************************ */ @@ -785,7 +830,7 @@ static struct crypto_alg algs_ecb_cbc[] = { .cra_priority = 300, .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_KERN_DRIVER_ONLY | - CRYPTO_ALG_ASYNC, + CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK, .cra_blocksize = AES_BLOCK_SIZE, .cra_ctxsize = sizeof(struct omap_aes_ctx), .cra_alignmask = 0, @@ -807,7 +852,7 @@ static struct crypto_alg algs_ecb_cbc[] = { .cra_priority = 300, .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_KERN_DRIVER_ONLY | - CRYPTO_ALG_ASYNC, + CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK, .cra_blocksize = AES_BLOCK_SIZE, .cra_ctxsize = sizeof(struct omap_aes_ctx), .cra_alignmask = 0, @@ -833,7 +878,7 @@ static struct crypto_alg algs_ctr[] = { .cra_priority = 300, .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_KERN_DRIVER_ONLY | - CRYPTO_ALG_ASYNC, + CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK, .cra_blocksize = AES_BLOCK_SIZE, .cra_ctxsize = sizeof(struct omap_aes_ctx), .cra_alignmask = 0, -- cgit v1.2.3-58-ga151 From 0d0cda93de95aee704dfbf2d6a626193431caf43 Mon Sep 17 00:00:00 2001 From: Tero Kristo Date: Thu, 4 Aug 2016 13:28:45 +0300 Subject: crypto: omap-aes - fix crypto engine initialization order The crypto engine must be initialized before registering algorithms, otherwise the test manager will crash as it attempts to execute tests for the algos while they are being registered. Fixes: 0529900a01cb ("crypto: omap-aes - Support crypto engine framework") Signed-off-by: Tero Kristo Signed-off-by: Herbert Xu --- drivers/crypto/omap-aes.c | 28 +++++++++++++++------------- 1 file changed, 15 insertions(+), 13 deletions(-) (limited to 'drivers') diff --git a/drivers/crypto/omap-aes.c b/drivers/crypto/omap-aes.c index 975288dc5139..2033769e8381 100644 --- a/drivers/crypto/omap-aes.c +++ b/drivers/crypto/omap-aes.c @@ -1213,6 +1213,17 @@ static int omap_aes_probe(struct platform_device *pdev) list_add_tail(&dd->list, &dev_list); spin_unlock(&list_lock); + /* Initialize crypto engine */ + dd->engine = crypto_engine_alloc_init(dev, 1); + if (!dd->engine) + goto err_engine; + + dd->engine->prepare_cipher_request = omap_aes_prepare_req; + dd->engine->cipher_one_request = omap_aes_crypt_req; + err = crypto_engine_start(dd->engine); + if (err) + goto err_engine; + for (i = 0; i < dd->pdata->algs_info_size; i++) { if (!dd->pdata->algs_info[i].registered) { for (j = 0; j < dd->pdata->algs_info[i].size; j++) { @@ -1230,26 +1241,17 @@ static int omap_aes_probe(struct platform_device *pdev) } } - /* Initialize crypto engine */ - dd->engine = crypto_engine_alloc_init(dev, 1); - if (!dd->engine) - goto err_algs; - - dd->engine->prepare_cipher_request = omap_aes_prepare_req; - dd->engine->cipher_one_request = omap_aes_crypt_req; - err = crypto_engine_start(dd->engine); - if (err) - goto err_engine; - return 0; -err_engine: - crypto_engine_exit(dd->engine); err_algs: for (i = dd->pdata->algs_info_size - 1; i >= 0; i--) for (j = dd->pdata->algs_info[i].registered - 1; j >= 0; j--) crypto_unregister_alg( &dd->pdata->algs_info[i].algs_list[j]); +err_engine: + if (dd->engine) + crypto_engine_exit(dd->engine); + omap_aes_dma_cleanup(dd); err_irq: tasklet_kill(&dd->done_task); -- cgit v1.2.3-58-ga151 From 1d1f98d180fff442f69813c278bb7c618687757b Mon Sep 17 00:00:00 2001 From: Tero Kristo Date: Thu, 4 Aug 2016 13:28:46 +0300 Subject: crypto: omap-des - fix crypto engine initialization order The crypto engine must be initialized before registering algorithms, otherwise the test manager will crash as it attempts to execute tests for the algos while they are being registered. Fixes: f1b77aaca85a ("crypto: omap-des - Integrate with the crypto engine framework") Signed-off-by: Tero Kristo Signed-off-by: Herbert Xu --- drivers/crypto/omap-des.c | 28 +++++++++++++++------------- 1 file changed, 15 insertions(+), 13 deletions(-) (limited to 'drivers') diff --git a/drivers/crypto/omap-des.c b/drivers/crypto/omap-des.c index c0a28b1c66e4..2b20d960caa8 100644 --- a/drivers/crypto/omap-des.c +++ b/drivers/crypto/omap-des.c @@ -1079,6 +1079,17 @@ static int omap_des_probe(struct platform_device *pdev) list_add_tail(&dd->list, &dev_list); spin_unlock(&list_lock); + /* Initialize des crypto engine */ + dd->engine = crypto_engine_alloc_init(dev, 1); + if (!dd->engine) + goto err_engine; + + dd->engine->prepare_cipher_request = omap_des_prepare_req; + dd->engine->cipher_one_request = omap_des_crypt_req; + err = crypto_engine_start(dd->engine); + if (err) + goto err_engine; + for (i = 0; i < dd->pdata->algs_info_size; i++) { for (j = 0; j < dd->pdata->algs_info[i].size; j++) { algp = &dd->pdata->algs_info[i].algs_list[j]; @@ -1094,27 +1105,18 @@ static int omap_des_probe(struct platform_device *pdev) } } - /* Initialize des crypto engine */ - dd->engine = crypto_engine_alloc_init(dev, 1); - if (!dd->engine) - goto err_algs; - - dd->engine->prepare_cipher_request = omap_des_prepare_req; - dd->engine->cipher_one_request = omap_des_crypt_req; - err = crypto_engine_start(dd->engine); - if (err) - goto err_engine; - return 0; -err_engine: - crypto_engine_exit(dd->engine); err_algs: for (i = dd->pdata->algs_info_size - 1; i >= 0; i--) for (j = dd->pdata->algs_info[i].registered - 1; j >= 0; j--) crypto_unregister_alg( &dd->pdata->algs_info[i].algs_list[j]); +err_engine: + if (dd->engine) + crypto_engine_exit(dd->engine); + omap_des_dma_cleanup(dd); err_irq: tasklet_kill(&dd->done_task); -- cgit v1.2.3-58-ga151 From a826806b7c598f65a7b1322235ab049b1cbd8b16 Mon Sep 17 00:00:00 2001 From: PrasannaKumar Muralidharan Date: Tue, 6 Sep 2016 13:58:39 +0530 Subject: hwrng: pasemi - Use linux/io.h instead of asm/io.h Checkpatch.pl warns about usage of asm/io.h. Use linux/io.h instead. Signed-off-by: PrasannaKumar Muralidharan Signed-off-by: Herbert Xu --- drivers/char/hw_random/pasemi-rng.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/char/hw_random/pasemi-rng.c b/drivers/char/hw_random/pasemi-rng.c index b4e32f7ab6af..545df485bcc4 100644 --- a/drivers/char/hw_random/pasemi-rng.c +++ b/drivers/char/hw_random/pasemi-rng.c @@ -26,7 +26,7 @@ #include #include #include -#include +#include #define SDCRNG_CTL_REG 0x00 #define SDCRNG_CTL_FVLD_M 0x0000f000 -- cgit v1.2.3-58-ga151 From 378bc4ed74a6c30c25012935c7af175501dda374 Mon Sep 17 00:00:00 2001 From: Giovanni Cabiddu Date: Tue, 6 Sep 2016 11:18:51 +0100 Subject: crypto: qat - fix leak on error path Fix a memory leak in an error path in uc loader. Signed-off-by: Giovanni Cabiddu Signed-off-by: Herbert Xu --- drivers/crypto/qat/qat_common/qat_uclo.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'drivers') diff --git a/drivers/crypto/qat/qat_common/qat_uclo.c b/drivers/crypto/qat/qat_common/qat_uclo.c index 9b961b37a282..e2454d90d949 100644 --- a/drivers/crypto/qat/qat_common/qat_uclo.c +++ b/drivers/crypto/qat/qat_common/qat_uclo.c @@ -967,10 +967,6 @@ static int qat_uclo_parse_uof_obj(struct icp_qat_fw_loader_handle *handle) struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle; unsigned int ae; - obj_handle->uword_buf = kcalloc(UWORD_CPYBUF_SIZE, sizeof(uint64_t), - GFP_KERNEL); - if (!obj_handle->uword_buf) - return -ENOMEM; obj_handle->encap_uof_obj.beg_uof = obj_handle->obj_hdr->file_buff; obj_handle->encap_uof_obj.obj_hdr = (struct icp_qat_uof_objhdr *) obj_handle->obj_hdr->file_buff; @@ -982,6 +978,10 @@ static int qat_uclo_parse_uof_obj(struct icp_qat_fw_loader_handle *handle) pr_err("QAT: UOF incompatible\n"); return -EINVAL; } + obj_handle->uword_buf = kcalloc(UWORD_CPYBUF_SIZE, sizeof(uint64_t), + GFP_KERNEL); + if (!obj_handle->uword_buf) + return -ENOMEM; obj_handle->ustore_phy_size = ICP_QAT_UCLO_MAX_USTORE; if (!obj_handle->obj_hdr->file_buff || !qat_uclo_map_str_table(obj_handle->obj_hdr, ICP_QAT_UOF_STRT, -- cgit v1.2.3-58-ga151 From 58b022acac8dd435dad0b162ad92cbe8ec90a703 Mon Sep 17 00:00:00 2001 From: PrasannaKumar Muralidharan Date: Wed, 7 Sep 2016 20:18:02 +0530 Subject: hwrng: core - Allocate memory during module init In core rng_buffer and rng_fillbuf is allocated in hwrng_register only once and it is freed during module exit. This patch moves allocating rng_buffer and rng_fillbuf from hwrng_register to rng core's init. This avoids checking whether rng_buffer and rng_fillbuf was allocated from every hwrng_register call. Also moving them to module init makes it explicit that it is freed in module exit. Change in v2: Fix memory leak when register_miscdev fails. Signed-off-by: PrasannaKumar Muralidharan Signed-off-by: Herbert Xu --- drivers/char/hw_random/core.c | 37 ++++++++++++++++++++----------------- 1 file changed, 20 insertions(+), 17 deletions(-) (limited to 'drivers') diff --git a/drivers/char/hw_random/core.c b/drivers/char/hw_random/core.c index 9203f2d130c0..482794526e8c 100644 --- a/drivers/char/hw_random/core.c +++ b/drivers/char/hw_random/core.c @@ -449,22 +449,6 @@ int hwrng_register(struct hwrng *rng) goto out; mutex_lock(&rng_mutex); - - /* kmalloc makes this safe for virt_to_page() in virtio_rng.c */ - err = -ENOMEM; - if (!rng_buffer) { - rng_buffer = kmalloc(rng_buffer_size(), GFP_KERNEL); - if (!rng_buffer) - goto out_unlock; - } - if (!rng_fillbuf) { - rng_fillbuf = kmalloc(rng_buffer_size(), GFP_KERNEL); - if (!rng_fillbuf) { - kfree(rng_buffer); - goto out_unlock; - } - } - /* Must not register two RNGs with the same name. */ err = -EEXIST; list_for_each_entry(tmp, &rng_list, list) { @@ -573,7 +557,26 @@ EXPORT_SYMBOL_GPL(devm_hwrng_unregister); static int __init hwrng_modinit(void) { - return register_miscdev(); + int ret = -ENOMEM; + + /* kmalloc makes this safe for virt_to_page() in virtio_rng.c */ + rng_buffer = kmalloc(rng_buffer_size(), GFP_KERNEL); + if (!rng_buffer) + return -ENOMEM; + + rng_fillbuf = kmalloc(rng_buffer_size(), GFP_KERNEL); + if (!rng_fillbuf) { + kfree(rng_buffer); + return -ENOMEM; + } + + ret = register_miscdev(); + if (ret) { + kfree(rng_fillbuf); + kfree(rng_buffer); + } + + return ret; } static void __exit hwrng_modexit(void) -- cgit v1.2.3-58-ga151 From 31b2a73c9c5f1f02c896c4e434957b831dd50a7f Mon Sep 17 00:00:00 2001 From: PrasannaKumar Muralidharan Date: Fri, 9 Sep 2016 13:28:23 +0530 Subject: hwrng: amd - Migrate to managed API Managed API eliminates error handling code, thus reduces several lines of code. Signed-off-by: PrasannaKumar Muralidharan Signed-off-by: Herbert Xu --- drivers/char/hw_random/amd-rng.c | 48 +++++++++------------------------------- 1 file changed, 11 insertions(+), 37 deletions(-) (limited to 'drivers') diff --git a/drivers/char/hw_random/amd-rng.c b/drivers/char/hw_random/amd-rng.c index 9959c762da2f..4dbc5aa23339 100644 --- a/drivers/char/hw_random/amd-rng.c +++ b/drivers/char/hw_random/amd-rng.c @@ -55,7 +55,6 @@ MODULE_DEVICE_TABLE(pci, pci_tbl); struct amd768_priv { void __iomem *iobase; struct pci_dev *pcidev; - u32 pmbase; }; static int amd_rng_read(struct hwrng *rng, void *buf, size_t max, bool wait) @@ -149,58 +148,33 @@ found: if (pmbase == 0) return -EIO; - priv = kzalloc(sizeof(*priv), GFP_KERNEL); - if (!priv) - return -ENOMEM; + priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL); + if (IS_ERR(priv)) + return PTR_ERR(priv); - if (!request_region(pmbase + PMBASE_OFFSET, PMBASE_SIZE, DRV_NAME)) { + if (!devm_request_region(&pdev->dev, pmbase + PMBASE_OFFSET, + PMBASE_SIZE, DRV_NAME)) { dev_err(&pdev->dev, DRV_NAME " region 0x%x already in use!\n", pmbase + 0xF0); - err = -EBUSY; - goto out; + return -EBUSY; } - priv->iobase = ioport_map(pmbase + PMBASE_OFFSET, PMBASE_SIZE); - if (!priv->iobase) { + priv->iobase = devm_ioport_map(&pdev->dev, pmbase + PMBASE_OFFSET, + PMBASE_SIZE); + if (IS_ERR(priv->iobase)) { pr_err(DRV_NAME "Cannot map ioport\n"); - err = -EINVAL; - goto err_iomap; + return PTR_ERR(priv->iobase); } amd_rng.priv = (unsigned long)priv; - priv->pmbase = pmbase; priv->pcidev = pdev; pr_info(DRV_NAME " detected\n"); - err = hwrng_register(&amd_rng); - if (err) { - pr_err(DRV_NAME " registering failed (%d)\n", err); - goto err_hwrng; - } - return 0; - -err_hwrng: - ioport_unmap(priv->iobase); -err_iomap: - release_region(pmbase + PMBASE_OFFSET, PMBASE_SIZE); -out: - kfree(priv); - return err; + return devm_hwrng_register(&pdev->dev, &amd_rng); } static void __exit mod_exit(void) { - struct amd768_priv *priv; - - priv = (struct amd768_priv *)amd_rng.priv; - - hwrng_unregister(&amd_rng); - - ioport_unmap(priv->iobase); - - release_region(priv->pmbase + PMBASE_OFFSET, PMBASE_SIZE); - - kfree(priv); } module_init(mod_init); -- cgit v1.2.3-58-ga151 From 94879fa82275f161f3b4fdabab00cd4d625781f3 Mon Sep 17 00:00:00 2001 From: Wei Yongjun Date: Sat, 10 Sep 2016 12:03:42 +0000 Subject: hwrng: st - Fix missing clk_disable_unprepare() on error in st_rng_probe() Fix the missing clk_disable_unprepare() before return from st_rng_probe() in the error handling case. Signed-off-by: Wei Yongjun Acked-by: Patrice Chotard Acked-by: Peter Griffin Signed-off-by: Herbert Xu --- drivers/char/hw_random/st-rng.c | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers') diff --git a/drivers/char/hw_random/st-rng.c b/drivers/char/hw_random/st-rng.c index 7e8aa6b7b452..938ec10e733d 100644 --- a/drivers/char/hw_random/st-rng.c +++ b/drivers/char/hw_random/st-rng.c @@ -108,6 +108,7 @@ static int st_rng_probe(struct platform_device *pdev) ret = hwrng_register(&ddata->ops); if (ret) { dev_err(&pdev->dev, "Failed to register HW RNG\n"); + clk_disable_unprepare(clk); return ret; } -- cgit v1.2.3-58-ga151 From 6e9b5e76882cbdd99caaea50ef3917a31edead75 Mon Sep 17 00:00:00 2001 From: PrasannaKumar Muralidharan Date: Sun, 11 Sep 2016 20:53:21 +0530 Subject: hwrng: geode - Migrate to managed API Use devm_ioremap and devm_hwrng_register instead of ioremap and hwrng_register. This removes error handling code. Also moved code around by removing goto statements. This improves code readability. Signed-off-by: PrasannaKumar Muralidharan Signed-off-by: Herbert Xu --- drivers/char/hw_random/geode-rng.c | 50 ++++++++++++-------------------------- 1 file changed, 15 insertions(+), 35 deletions(-) (limited to 'drivers') diff --git a/drivers/char/hw_random/geode-rng.c b/drivers/char/hw_random/geode-rng.c index 0d0579fe465e..79e7482dc5c4 100644 --- a/drivers/char/hw_random/geode-rng.c +++ b/drivers/char/hw_random/geode-rng.c @@ -31,9 +31,6 @@ #include #include - -#define PFX KBUILD_MODNAME ": " - #define GEODE_RNG_DATA_REG 0x50 #define GEODE_RNG_STATUS_REG 0x54 @@ -85,7 +82,6 @@ static struct hwrng geode_rng = { static int __init mod_init(void) { - int err = -ENODEV; struct pci_dev *pdev = NULL; const struct pci_device_id *ent; void __iomem *mem; @@ -93,43 +89,27 @@ static int __init mod_init(void) for_each_pci_dev(pdev) { ent = pci_match_id(pci_tbl, pdev); - if (ent) - goto found; - } - /* Device not found. */ - goto out; - -found: - rng_base = pci_resource_start(pdev, 0); - if (rng_base == 0) - goto out; - err = -ENOMEM; - mem = ioremap(rng_base, 0x58); - if (!mem) - goto out; - geode_rng.priv = (unsigned long)mem; - - pr_info("AMD Geode RNG detected\n"); - err = hwrng_register(&geode_rng); - if (err) { - pr_err(PFX "RNG registering failed (%d)\n", - err); - goto err_unmap; + if (ent) { + rng_base = pci_resource_start(pdev, 0); + if (rng_base == 0) + return -ENODEV; + + mem = devm_ioremap(&pdev->dev, rng_base, 0x58); + if (IS_ERR(mem)) + return PTR_ERR(mem); + geode_rng.priv = (unsigned long)mem; + + pr_info("AMD Geode RNG detected\n"); + return devm_hwrng_register(&pdev->dev, &geode_rng); + } } -out: - return err; -err_unmap: - iounmap(mem); - goto out; + /* Device not found. */ + return -ENODEV; } static void __exit mod_exit(void) { - void __iomem *mem = (void __iomem *)geode_rng.priv; - - hwrng_unregister(&geode_rng); - iounmap(mem); } module_init(mod_init); -- cgit v1.2.3-58-ga151 From e9afc746299d39f415fdb13b1213137deb4fc497 Mon Sep 17 00:00:00 2001 From: PrasannaKumar Muralidharan Date: Sun, 11 Sep 2016 20:54:26 +0530 Subject: hwrng: geode - Use linux/io.h instead of asm/io.h Fix checkpatch.pl warning by changing from asm/io.h to linux/io.h. In the mean time arrange the includes in alphabetical order. Signed-off-by: PrasannaKumar Muralidharan Signed-off-by: Herbert Xu --- drivers/char/hw_random/geode-rng.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'drivers') diff --git a/drivers/char/hw_random/geode-rng.c b/drivers/char/hw_random/geode-rng.c index 79e7482dc5c4..0cae21086c05 100644 --- a/drivers/char/hw_random/geode-rng.c +++ b/drivers/char/hw_random/geode-rng.c @@ -24,12 +24,12 @@ * warranty of any kind, whether express or implied. */ -#include +#include +#include +#include #include +#include #include -#include -#include -#include #define GEODE_RNG_DATA_REG 0x50 #define GEODE_RNG_STATUS_REG 0x54 -- cgit v1.2.3-58-ga151 From 36e09e1f874baf581604a76ce81aae0538eb0a23 Mon Sep 17 00:00:00 2001 From: Masahiro Yamada Date: Tue, 13 Sep 2016 04:27:54 +0900 Subject: crypto: squash lines for simple wrapper functions Remove unneeded variables and assignments. Signed-off-by: Masahiro Yamada Signed-off-by: Herbert Xu --- crypto/crct10dif_generic.c | 5 +---- crypto/mcryptd.c | 7 +------ drivers/crypto/hifn_795x.c | 12 ++---------- 3 files changed, 4 insertions(+), 20 deletions(-) (limited to 'drivers') diff --git a/crypto/crct10dif_generic.c b/crypto/crct10dif_generic.c index c1229614c7e3..8e94e29dc6fc 100644 --- a/crypto/crct10dif_generic.c +++ b/crypto/crct10dif_generic.c @@ -107,10 +107,7 @@ static struct shash_alg alg = { static int __init crct10dif_mod_init(void) { - int ret; - - ret = crypto_register_shash(&alg); - return ret; + return crypto_register_shash(&alg); } static void __exit crct10dif_mod_fini(void) diff --git a/crypto/mcryptd.c b/crypto/mcryptd.c index 86fb59b109a9..94ee44acd465 100644 --- a/crypto/mcryptd.c +++ b/crypto/mcryptd.c @@ -612,12 +612,7 @@ EXPORT_SYMBOL_GPL(mcryptd_alloc_ahash); int ahash_mcryptd_digest(struct ahash_request *desc) { - int err; - - err = crypto_ahash_init(desc) ?: - ahash_mcryptd_finup(desc); - - return err; + return crypto_ahash_init(desc) ?: ahash_mcryptd_finup(desc); } int ahash_mcryptd_update(struct ahash_request *desc) diff --git a/drivers/crypto/hifn_795x.c b/drivers/crypto/hifn_795x.c index eee2c7e6c299..e09d4055b19e 100644 --- a/drivers/crypto/hifn_795x.c +++ b/drivers/crypto/hifn_795x.c @@ -636,20 +636,12 @@ struct hifn_request_context { static inline u32 hifn_read_0(struct hifn_device *dev, u32 reg) { - u32 ret; - - ret = readl(dev->bar[0] + reg); - - return ret; + return readl(dev->bar[0] + reg); } static inline u32 hifn_read_1(struct hifn_device *dev, u32 reg) { - u32 ret; - - ret = readl(dev->bar[1] + reg); - - return ret; + return readl(dev->bar[1] + reg); } static inline void hifn_write_0(struct hifn_device *dev, u32 reg, u32 val) -- cgit v1.2.3-58-ga151 From 59af156625ab1d11436b04806ee3c1c795b29a93 Mon Sep 17 00:00:00 2001 From: Wei Yongjun Date: Thu, 15 Sep 2016 03:27:15 +0000 Subject: crypto: omap-des - fix error return code in omap_des_probe() Fix to return error code -ENOMEM from the crypto_engine_alloc_init() error handling case instead of 0, as done elsewhere in this function. Fixes: f1b77aaca85a ("crypto: omap-des - Integrate with the crypto engine framework") Signed-off-by: Wei Yongjun Signed-off-by: Herbert Xu --- drivers/crypto/omap-des.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/crypto/omap-des.c b/drivers/crypto/omap-des.c index 2b20d960caa8..a6f65532fd16 100644 --- a/drivers/crypto/omap-des.c +++ b/drivers/crypto/omap-des.c @@ -1081,8 +1081,10 @@ static int omap_des_probe(struct platform_device *pdev) /* Initialize des crypto engine */ dd->engine = crypto_engine_alloc_init(dev, 1); - if (!dd->engine) + if (!dd->engine) { + err = -ENOMEM; goto err_engine; + } dd->engine->prepare_cipher_request = omap_des_prepare_req; dd->engine->cipher_one_request = omap_des_crypt_req; -- cgit v1.2.3-58-ga151 From c98ef8dbca18db133bb4bc7643b975ee2612be2f Mon Sep 17 00:00:00 2001 From: Wei Yongjun Date: Thu, 15 Sep 2016 03:27:32 +0000 Subject: crypto: omap-aes - fix error return code in omap_aes_probe() Fix to return error code -ENOMEM from the crypto_engine_alloc_init() error handling case instead of 0, as done elsewhere in this function. Fixes: 0529900a01cb ("crypto: omap-aes - Support crypto engine framework") Signed-off-by: Wei Yongjun Signed-off-by: Herbert Xu --- drivers/crypto/omap-aes.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/crypto/omap-aes.c b/drivers/crypto/omap-aes.c index 2033769e8381..fe32dd95ae4f 100644 --- a/drivers/crypto/omap-aes.c +++ b/drivers/crypto/omap-aes.c @@ -1215,8 +1215,10 @@ static int omap_aes_probe(struct platform_device *pdev) /* Initialize crypto engine */ dd->engine = crypto_engine_alloc_init(dev, 1); - if (!dd->engine) + if (!dd->engine) { + err = -ENOMEM; goto err_engine; + } dd->engine->prepare_cipher_request = omap_aes_prepare_req; dd->engine->cipher_one_request = omap_aes_crypt_req; -- cgit v1.2.3-58-ga151 From 664f570a9cee51a8c7caef042118abd2b48705b1 Mon Sep 17 00:00:00 2001 From: Wei Yongjun Date: Thu, 15 Sep 2016 03:28:04 +0000 Subject: crypto: ccp - use kmem_cache_zalloc instead of kmem_cache_alloc/memset Using kmem_cache_zalloc() instead of kmem_cache_alloc() and memset(). Signed-off-by: Wei Yongjun Acked-by: Gary R Hook Signed-off-by: Herbert Xu --- drivers/crypto/ccp/ccp-dmaengine.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) (limited to 'drivers') diff --git a/drivers/crypto/ccp/ccp-dmaengine.c b/drivers/crypto/ccp/ccp-dmaengine.c index ded26f46c735..2e5a05ce0e3e 100644 --- a/drivers/crypto/ccp/ccp-dmaengine.c +++ b/drivers/crypto/ccp/ccp-dmaengine.c @@ -299,12 +299,10 @@ static struct ccp_dma_desc *ccp_alloc_dma_desc(struct ccp_dma_chan *chan, { struct ccp_dma_desc *desc; - desc = kmem_cache_alloc(chan->ccp->dma_desc_cache, GFP_NOWAIT); + desc = kmem_cache_zalloc(chan->ccp->dma_desc_cache, GFP_NOWAIT); if (!desc) return NULL; - memset(desc, 0, sizeof(*desc)); - dma_async_tx_descriptor_init(&desc->tx_desc, &chan->dma_chan); desc->tx_desc.flags = flags; desc->tx_desc.tx_submit = ccp_tx_submit; -- cgit v1.2.3-58-ga151 From e7a33c4d0a97e80fe059f711621ad7a65b30a64d Mon Sep 17 00:00:00 2001 From: Markus Elfring Date: Thu, 15 Sep 2016 11:20:09 +0200 Subject: crypto: caam - Use kmalloc_array() in ahash_setkey() * A multiplication for the size determination of a memory allocation indicated that an array data structure should be processed. Thus use the corresponding function "kmalloc_array". This issue was detected by using the Coccinelle software. * Replace the specification of a data type by a pointer dereference to make the corresponding size determination a bit safer according to the Linux coding style convention. Signed-off-by: Markus Elfring Signed-off-by: Herbert Xu --- drivers/crypto/caam/caamhash.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/crypto/caam/caamhash.c b/drivers/crypto/caam/caamhash.c index 9d7fc9ec0b7e..f19df8f30f2d 100644 --- a/drivers/crypto/caam/caamhash.c +++ b/drivers/crypto/caam/caamhash.c @@ -525,8 +525,9 @@ static int ahash_setkey(struct crypto_ahash *ahash, #endif if (keylen > blocksize) { - hashed_key = kmalloc(sizeof(u8) * digestsize, GFP_KERNEL | - GFP_DMA); + hashed_key = kmalloc_array(digestsize, + sizeof(*hashed_key), + GFP_KERNEL | GFP_DMA); if (!hashed_key) return -ENOMEM; ret = hash_digest_key(ctx, key, &keylen, hashed_key, -- cgit v1.2.3-58-ga151 From d6e7a7d0c2c5170234f0afb94b2bcdaf93630a72 Mon Sep 17 00:00:00 2001 From: Markus Elfring Date: Thu, 15 Sep 2016 13:54:49 +0200 Subject: crypto: caam - Rename jump labels in ahash_setkey() Adjust jump labels according to the current Linux coding style convention. Signed-off-by: Markus Elfring Signed-off-by: Herbert Xu --- drivers/crypto/caam/caamhash.c | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) (limited to 'drivers') diff --git a/drivers/crypto/caam/caamhash.c b/drivers/crypto/caam/caamhash.c index f19df8f30f2d..60174706205e 100644 --- a/drivers/crypto/caam/caamhash.c +++ b/drivers/crypto/caam/caamhash.c @@ -533,7 +533,7 @@ static int ahash_setkey(struct crypto_ahash *ahash, ret = hash_digest_key(ctx, key, &keylen, hashed_key, digestsize); if (ret) - goto badkey; + goto bad_free_key; key = hashed_key; } @@ -551,14 +551,14 @@ static int ahash_setkey(struct crypto_ahash *ahash, ret = gen_split_hash_key(ctx, key, keylen); if (ret) - goto badkey; + goto bad_free_key; ctx->key_dma = dma_map_single(jrdev, ctx->key, ctx->split_key_pad_len, DMA_TO_DEVICE); if (dma_mapping_error(jrdev, ctx->key_dma)) { dev_err(jrdev, "unable to map key i/o memory\n"); ret = -ENOMEM; - goto map_err; + goto error_free_key; } #ifdef DEBUG print_hex_dump(KERN_ERR, "ctx.key@"__stringify(__LINE__)": ", @@ -571,11 +571,10 @@ static int ahash_setkey(struct crypto_ahash *ahash, dma_unmap_single(jrdev, ctx->key_dma, ctx->split_key_pad_len, DMA_TO_DEVICE); } - -map_err: + error_free_key: kfree(hashed_key); return ret; -badkey: + bad_free_key: kfree(hashed_key); crypto_ahash_set_flags(ahash, CRYPTO_TFM_RES_BAD_KEY_LEN); return -EINVAL; -- cgit v1.2.3-58-ga151 From 58b0e5d0abfc84094457c93d70c0f6726deb16ac Mon Sep 17 00:00:00 2001 From: Markus Elfring Date: Thu, 15 Sep 2016 14:43:38 +0200 Subject: crypto: caam - Rename a jump label in five functions Adjust jump labels according to the current Linux coding style convention. Signed-off-by: Markus Elfring Signed-off-by: Herbert Xu --- drivers/crypto/caam/caamhash.c | 49 +++++++++++++++++++----------------------- 1 file changed, 22 insertions(+), 27 deletions(-) (limited to 'drivers') diff --git a/drivers/crypto/caam/caamhash.c b/drivers/crypto/caam/caamhash.c index 60174706205e..933252ffe76d 100644 --- a/drivers/crypto/caam/caamhash.c +++ b/drivers/crypto/caam/caamhash.c @@ -889,7 +889,7 @@ static int ahash_update_ctx(struct ahash_request *req) ret = ctx_map_to_sec4_sg(desc, jrdev, state, ctx->ctx_len, edesc->sec4_sg, DMA_BIDIRECTIONAL); if (ret) - goto err; + goto unmap_ctx; state->buf_dma = try_buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1, @@ -919,7 +919,7 @@ static int ahash_update_ctx(struct ahash_request *req) if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) { dev_err(jrdev, "unable to map S/G table\n"); ret = -ENOMEM; - goto err; + goto unmap_ctx; } append_seq_in_ptr(desc, edesc->sec4_sg_dma, ctx->ctx_len + @@ -935,7 +935,7 @@ static int ahash_update_ctx(struct ahash_request *req) ret = caam_jr_enqueue(jrdev, desc, ahash_done_bi, req); if (ret) - goto err; + goto unmap_ctx; ret = -EINPROGRESS; } else if (*next_buflen) { @@ -953,8 +953,7 @@ static int ahash_update_ctx(struct ahash_request *req) #endif return ret; - - err: + unmap_ctx: ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_BIDIRECTIONAL); kfree(edesc); return ret; @@ -996,7 +995,7 @@ static int ahash_final_ctx(struct ahash_request *req) ret = ctx_map_to_sec4_sg(desc, jrdev, state, ctx->ctx_len, edesc->sec4_sg, DMA_TO_DEVICE); if (ret) - goto err; + goto unmap_ctx; state->buf_dma = try_buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1, buf, state->buf_dma, buflen, @@ -1009,7 +1008,7 @@ static int ahash_final_ctx(struct ahash_request *req) if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) { dev_err(jrdev, "unable to map S/G table\n"); ret = -ENOMEM; - goto err; + goto unmap_ctx; } append_seq_in_ptr(desc, edesc->sec4_sg_dma, ctx->ctx_len + buflen, @@ -1020,7 +1019,7 @@ static int ahash_final_ctx(struct ahash_request *req) if (dma_mapping_error(jrdev, edesc->dst_dma)) { dev_err(jrdev, "unable to map dst\n"); ret = -ENOMEM; - goto err; + goto unmap_ctx; } #ifdef DEBUG @@ -1030,11 +1029,10 @@ static int ahash_final_ctx(struct ahash_request *req) ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_src, req); if (ret) - goto err; + goto unmap_ctx; return -EINPROGRESS; - -err: + unmap_ctx: ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_FROM_DEVICE); kfree(edesc); return ret; @@ -1094,7 +1092,7 @@ static int ahash_finup_ctx(struct ahash_request *req) ret = ctx_map_to_sec4_sg(desc, jrdev, state, ctx->ctx_len, edesc->sec4_sg, DMA_TO_DEVICE); if (ret) - goto err; + goto unmap_ctx; state->buf_dma = try_buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1, buf, state->buf_dma, buflen, @@ -1104,14 +1102,14 @@ static int ahash_finup_ctx(struct ahash_request *req) sec4_sg_src_index, ctx->ctx_len + buflen, req->nbytes); if (ret) - goto err; + goto unmap_ctx; edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result, digestsize); if (dma_mapping_error(jrdev, edesc->dst_dma)) { dev_err(jrdev, "unable to map dst\n"); ret = -ENOMEM; - goto err; + goto unmap_ctx; } #ifdef DEBUG @@ -1121,11 +1119,10 @@ static int ahash_finup_ctx(struct ahash_request *req) ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_src, req); if (ret) - goto err; + goto unmap_ctx; return -EINPROGRESS; - -err: + unmap_ctx: ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_FROM_DEVICE); kfree(edesc); return ret; @@ -1350,14 +1347,14 @@ static int ahash_update_no_ctx(struct ahash_request *req) if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) { dev_err(jrdev, "unable to map S/G table\n"); ret = -ENOMEM; - goto err; + goto unmap_ctx; } append_seq_in_ptr(desc, edesc->sec4_sg_dma, to_hash, LDST_SGF); ret = map_seq_out_ptr_ctx(desc, jrdev, state, ctx->ctx_len); if (ret) - goto err; + goto unmap_ctx; #ifdef DEBUG print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ", @@ -1367,7 +1364,7 @@ static int ahash_update_no_ctx(struct ahash_request *req) ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_dst, req); if (ret) - goto err; + goto unmap_ctx; ret = -EINPROGRESS; state->update = ahash_update_ctx; @@ -1388,8 +1385,7 @@ static int ahash_update_no_ctx(struct ahash_request *req) #endif return ret; - -err: + unmap_ctx: ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_TO_DEVICE); kfree(edesc); return ret; @@ -1548,7 +1544,7 @@ static int ahash_update_first(struct ahash_request *req) ret = ahash_edesc_add_src(ctx, edesc, req, mapped_nents, 0, 0, to_hash); if (ret) - goto err; + goto unmap_ctx; if (*next_buflen) scatterwalk_map_and_copy(next_buf, req->src, to_hash, @@ -1558,7 +1554,7 @@ static int ahash_update_first(struct ahash_request *req) ret = map_seq_out_ptr_ctx(desc, jrdev, state, ctx->ctx_len); if (ret) - goto err; + goto unmap_ctx; #ifdef DEBUG print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ", @@ -1568,7 +1564,7 @@ static int ahash_update_first(struct ahash_request *req) ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_dst, req); if (ret) - goto err; + goto unmap_ctx; ret = -EINPROGRESS; state->update = ahash_update_ctx; @@ -1588,8 +1584,7 @@ static int ahash_update_first(struct ahash_request *req) #endif return ret; - -err: + unmap_ctx: ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_TO_DEVICE); kfree(edesc); return ret; -- cgit v1.2.3-58-ga151 From e6cc5b8df069cb0feff3d10da9e959cc2618625a Mon Sep 17 00:00:00 2001 From: Markus Elfring Date: Thu, 15 Sep 2016 14:56:12 +0200 Subject: crypto: caam - Return a value directly in caam_hash_cra_init() * Return a value at the end without storing it in an intermediate variable. * Delete the local variable "ret" which became unnecessary with this refactoring. Signed-off-by: Markus Elfring Signed-off-by: Herbert Xu --- drivers/crypto/caam/caamhash.c | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) (limited to 'drivers') diff --git a/drivers/crypto/caam/caamhash.c b/drivers/crypto/caam/caamhash.c index 933252ffe76d..b1dbc53938d7 100644 --- a/drivers/crypto/caam/caamhash.c +++ b/drivers/crypto/caam/caamhash.c @@ -1846,7 +1846,6 @@ static int caam_hash_cra_init(struct crypto_tfm *tfm) HASH_MSG_LEN + SHA256_DIGEST_SIZE, HASH_MSG_LEN + 64, HASH_MSG_LEN + SHA512_DIGEST_SIZE }; - int ret = 0; /* * Get a Job ring from Job Ring driver to ensure in-order @@ -1866,10 +1865,7 @@ static int caam_hash_cra_init(struct crypto_tfm *tfm) crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm), sizeof(struct caam_hash_state)); - - ret = ahash_set_sh_desc(ahash); - - return ret; + return ahash_set_sh_desc(ahash); } static void caam_hash_cra_exit(struct crypto_tfm *tfm) -- cgit v1.2.3-58-ga151 From 9e6df0fdfc4b3067de03f7be02f274e0a81737e7 Mon Sep 17 00:00:00 2001 From: Markus Elfring Date: Thu, 15 Sep 2016 15:24:02 +0200 Subject: crypto: caam - Delete an unnecessary initialisation in seven functions The local variable "ret" will be set to an appropriate value a bit later. Thus omit the explicit initialisation at the beginning. Signed-off-by: Markus Elfring Signed-off-by: Herbert Xu --- drivers/crypto/caam/caamhash.c | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) (limited to 'drivers') diff --git a/drivers/crypto/caam/caamhash.c b/drivers/crypto/caam/caamhash.c index b1dbc53938d7..adb8b19ee942 100644 --- a/drivers/crypto/caam/caamhash.c +++ b/drivers/crypto/caam/caamhash.c @@ -440,7 +440,7 @@ static int hash_digest_key(struct caam_hash_ctx *ctx, const u8 *key_in, u32 *desc; struct split_key_result result; dma_addr_t src_dma, dst_dma; - int ret = 0; + int ret; desc = kmalloc(CAAM_CMD_SZ * 8 + CAAM_PTR_SZ * 2, GFP_KERNEL | GFP_DMA); if (!desc) { @@ -517,7 +517,7 @@ static int ahash_setkey(struct crypto_ahash *ahash, struct device *jrdev = ctx->jrdev; int blocksize = crypto_tfm_alg_blocksize(&ahash->base); int digestsize = crypto_ahash_digestsize(ahash); - int ret = 0; + int ret; u8 *hashed_key = NULL; #ifdef DEBUG @@ -975,7 +975,7 @@ static int ahash_final_ctx(struct ahash_request *req) int sec4_sg_bytes, sec4_sg_src_index; int digestsize = crypto_ahash_digestsize(ahash); struct ahash_edesc *edesc; - int ret = 0; + int ret; sec4_sg_src_index = 1 + (buflen ? 1 : 0); sec4_sg_bytes = sec4_sg_src_index * sizeof(struct sec4_sg_entry); @@ -1055,7 +1055,7 @@ static int ahash_finup_ctx(struct ahash_request *req) int src_nents, mapped_nents; int digestsize = crypto_ahash_digestsize(ahash); struct ahash_edesc *edesc; - int ret = 0; + int ret; src_nents = sg_nents_for_len(req->src, req->nbytes); if (src_nents < 0) { @@ -1139,7 +1139,7 @@ static int ahash_digest(struct ahash_request *req) int digestsize = crypto_ahash_digestsize(ahash); int src_nents, mapped_nents; struct ahash_edesc *edesc; - int ret = 0; + int ret; src_nents = sg_nents_for_len(req->src, req->nbytes); if (src_nents < 0) { @@ -1218,7 +1218,7 @@ static int ahash_final_no_ctx(struct ahash_request *req) u32 *desc; int digestsize = crypto_ahash_digestsize(ahash); struct ahash_edesc *edesc; - int ret = 0; + int ret; /* allocate space for base edesc and hw desc commands, link tables */ edesc = ahash_edesc_alloc(ctx, 0, ctx->sh_desc_digest, @@ -1408,7 +1408,7 @@ static int ahash_finup_no_ctx(struct ahash_request *req) int sec4_sg_bytes, sec4_sg_src_index, src_nents, mapped_nents; int digestsize = crypto_ahash_digestsize(ahash); struct ahash_edesc *edesc; - int ret = 0; + int ret; src_nents = sg_nents_for_len(req->src, req->nbytes); if (src_nents < 0) { -- cgit v1.2.3-58-ga151 From 06435f3437599d01653d42670c604d0b28a878e1 Mon Sep 17 00:00:00 2001 From: Markus Elfring Date: Thu, 15 Sep 2016 16:00:55 +0200 Subject: crypto: caam - Move common error handling code in two functions Move statements for error handling which were identical in two if branches to the end of these functions. Signed-off-by: Markus Elfring Signed-off-by: Herbert Xu --- drivers/crypto/caam/caamhash.c | 26 ++++++++++++++------------ 1 file changed, 14 insertions(+), 12 deletions(-) (limited to 'drivers') diff --git a/drivers/crypto/caam/caamhash.c b/drivers/crypto/caam/caamhash.c index adb8b19ee942..660dc206969f 100644 --- a/drivers/crypto/caam/caamhash.c +++ b/drivers/crypto/caam/caamhash.c @@ -1231,9 +1231,7 @@ static int ahash_final_no_ctx(struct ahash_request *req) state->buf_dma = dma_map_single(jrdev, buf, buflen, DMA_TO_DEVICE); if (dma_mapping_error(jrdev, state->buf_dma)) { dev_err(jrdev, "unable to map src\n"); - ahash_unmap(jrdev, edesc, req, digestsize); - kfree(edesc); - return -ENOMEM; + goto unmap; } append_seq_in_ptr(desc, state->buf_dma, buflen, 0); @@ -1242,9 +1240,7 @@ static int ahash_final_no_ctx(struct ahash_request *req) digestsize); if (dma_mapping_error(jrdev, edesc->dst_dma)) { dev_err(jrdev, "unable to map dst\n"); - ahash_unmap(jrdev, edesc, req, digestsize); - kfree(edesc); - return -ENOMEM; + goto unmap; } edesc->src_nents = 0; @@ -1262,6 +1258,11 @@ static int ahash_final_no_ctx(struct ahash_request *req) } return ret; + unmap: + ahash_unmap(jrdev, edesc, req, digestsize); + kfree(edesc); + return -ENOMEM; + } /* submit ahash update if it the first job descriptor after update */ @@ -1453,18 +1454,14 @@ static int ahash_finup_no_ctx(struct ahash_request *req) req->nbytes); if (ret) { dev_err(jrdev, "unable to map S/G table\n"); - ahash_unmap(jrdev, edesc, req, digestsize); - kfree(edesc); - return -ENOMEM; + goto unmap; } edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result, digestsize); if (dma_mapping_error(jrdev, edesc->dst_dma)) { dev_err(jrdev, "unable to map dst\n"); - ahash_unmap(jrdev, edesc, req, digestsize); - kfree(edesc); - return -ENOMEM; + goto unmap; } #ifdef DEBUG @@ -1481,6 +1478,11 @@ static int ahash_finup_no_ctx(struct ahash_request *req) } return ret; + unmap: + ahash_unmap(jrdev, edesc, req, digestsize); + kfree(edesc); + return -ENOMEM; + } /* submit first update job descriptor after init */ -- cgit v1.2.3-58-ga151 From f7eca278a8f81e701848392b522f80078bf2589f Mon Sep 17 00:00:00 2001 From: Wei Yongjun Date: Fri, 16 Sep 2016 01:49:41 +0000 Subject: hwrng: amd - Fix return value check in mod_init() In case of error, the function devm_kzalloc() or devm_ioport_map() return NULL pointer not ERR_PTR(). The IS_ERR() test in the return value check should be replaced with NULL test. Fixes: 31b2a73c9c5f ("hwrng: amd - Migrate to managed API") Signed-off-by: Wei Yongjun Signed-off-by: Herbert Xu --- drivers/char/hw_random/amd-rng.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'drivers') diff --git a/drivers/char/hw_random/amd-rng.c b/drivers/char/hw_random/amd-rng.c index 4dbc5aa23339..4a99ac756f08 100644 --- a/drivers/char/hw_random/amd-rng.c +++ b/drivers/char/hw_random/amd-rng.c @@ -149,8 +149,8 @@ found: return -EIO; priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL); - if (IS_ERR(priv)) - return PTR_ERR(priv); + if (!priv) + return -ENOMEM; if (!devm_request_region(&pdev->dev, pmbase + PMBASE_OFFSET, PMBASE_SIZE, DRV_NAME)) { @@ -161,9 +161,9 @@ found: priv->iobase = devm_ioport_map(&pdev->dev, pmbase + PMBASE_OFFSET, PMBASE_SIZE); - if (IS_ERR(priv->iobase)) { + if (!priv->iobase) { pr_err(DRV_NAME "Cannot map ioport\n"); - return PTR_ERR(priv->iobase); + return -ENOMEM; } amd_rng.priv = (unsigned long)priv; -- cgit v1.2.3-58-ga151 From 104a32e85f250742bc37e14d19a5dec706bed76e Mon Sep 17 00:00:00 2001 From: Wei Yongjun Date: Fri, 16 Sep 2016 01:50:01 +0000 Subject: hwrng: geode - fix return value check in mod_init() In case of error, the function devm_ioremap() returns NULL pointer not ERR_PTR(). The IS_ERR() test in the return value check should be replaced with NULL test. Fixes: 6e9b5e76882c ("hwrng: geode - Migrate to managed API") Signed-off-by: Wei Yongjun Signed-off-by: Herbert Xu --- drivers/char/hw_random/geode-rng.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/char/hw_random/geode-rng.c b/drivers/char/hw_random/geode-rng.c index 0cae21086c05..e7a245942029 100644 --- a/drivers/char/hw_random/geode-rng.c +++ b/drivers/char/hw_random/geode-rng.c @@ -95,8 +95,8 @@ static int __init mod_init(void) return -ENODEV; mem = devm_ioremap(&pdev->dev, rng_base, 0x58); - if (IS_ERR(mem)) - return PTR_ERR(mem); + if (!mem) + return -ENOMEM; geode_rng.priv = (unsigned long)mem; pr_info("AMD Geode RNG detected\n"); -- cgit v1.2.3-58-ga151 From 7514e3688811e610640ec2201ca14dfebfe13442 Mon Sep 17 00:00:00 2001 From: Wei Yongjun Date: Sat, 17 Sep 2016 16:01:22 +0000 Subject: crypto: ccp - Fix return value check in ccp_dmaengine_register() Fix the retrn value check which testing the wrong variable in ccp_dmaengine_register(). Fixes: 58ea8abf4904 ("crypto: ccp - Register the CCP as a DMA resource") Signed-off-by: Wei Yongjun Acked-by: Gary R Hook Signed-off-by: Herbert Xu --- drivers/crypto/ccp/ccp-dmaengine.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/crypto/ccp/ccp-dmaengine.c b/drivers/crypto/ccp/ccp-dmaengine.c index 2e5a05ce0e3e..6553912804f7 100644 --- a/drivers/crypto/ccp/ccp-dmaengine.c +++ b/drivers/crypto/ccp/ccp-dmaengine.c @@ -648,7 +648,7 @@ int ccp_dmaengine_register(struct ccp_device *ccp) dma_desc_cache_name = devm_kasprintf(ccp->dev, GFP_KERNEL, "%s-dmaengine-desc-cache", ccp->name); - if (!dma_cmd_cache_name) { + if (!dma_desc_cache_name) { ret = -ENOMEM; goto err_cache; } -- cgit v1.2.3-58-ga151 From 8d46a5c872946ab5e1cc8ff54cac5b83d05b848d Mon Sep 17 00:00:00 2001 From: Baoyou Xie Date: Sun, 18 Sep 2016 20:52:44 +0800 Subject: crypto: sun4i-ss - mark sun4i_hash() static We get 1 warning when building kernel with W=1: drivers/crypto/sunxi-ss/sun4i-ss-hash.c:168:5: warning: no previous prototype for 'sun4i_hash' [-Wmissing-prototypes] In fact, this function is only used in the file in which it is declared and don't need a declaration, but can be made static. So this patch marks it 'static'. Signed-off-by: Baoyou Xie Signed-off-by: Herbert Xu --- drivers/crypto/sunxi-ss/sun4i-ss-hash.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/crypto/sunxi-ss/sun4i-ss-hash.c b/drivers/crypto/sunxi-ss/sun4i-ss-hash.c index 1afeb8e5f709..0de2f62d51ff 100644 --- a/drivers/crypto/sunxi-ss/sun4i-ss-hash.c +++ b/drivers/crypto/sunxi-ss/sun4i-ss-hash.c @@ -165,7 +165,7 @@ int sun4i_hash_import_sha1(struct ahash_request *areq, const void *in) * write remaining data in op->buf * final state op->len=56 */ -int sun4i_hash(struct ahash_request *areq) +static int sun4i_hash(struct ahash_request *areq) { u32 v, ivmode = 0; unsigned int i = 0; -- cgit v1.2.3-58-ga151 From 99a7ffffd5e85bb1e079752be51b67d5257b6a87 Mon Sep 17 00:00:00 2001 From: Tero Kristo Date: Mon, 19 Sep 2016 18:22:12 +0300 Subject: crypto: omap-sham - add context export/import stubs Initially these just return -ENOTSUPP to indicate that they don't really do anything yet. Some sort of implementation is required for the driver to at least probe. Signed-off-by: Tero Kristo Signed-off-by: Herbert Xu --- drivers/crypto/omap-sham.c | 19 +++++++++++++++++-- 1 file changed, 17 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/crypto/omap-sham.c b/drivers/crypto/omap-sham.c index cf9f617cfcd7..74653c9f2d80 100644 --- a/drivers/crypto/omap-sham.c +++ b/drivers/crypto/omap-sham.c @@ -1340,6 +1340,16 @@ static void omap_sham_cra_exit(struct crypto_tfm *tfm) } } +static int omap_sham_export(struct ahash_request *req, void *out) +{ + return -ENOTSUPP; +} + +static int omap_sham_import(struct ahash_request *req, const void *in) +{ + return -ENOTSUPP; +} + static struct ahash_alg algs_sha1_md5[] = { { .init = omap_sham_init, @@ -1998,8 +2008,13 @@ static int omap_sham_probe(struct platform_device *pdev) for (i = 0; i < dd->pdata->algs_info_size; i++) { for (j = 0; j < dd->pdata->algs_info[i].size; j++) { - err = crypto_register_ahash( - &dd->pdata->algs_info[i].algs_list[j]); + struct ahash_alg *alg; + + alg = &dd->pdata->algs_info[i].algs_list[j]; + alg->export = omap_sham_export; + alg->import = omap_sham_import; + alg->halg.statesize = sizeof(struct omap_sham_reqctx); + err = crypto_register_ahash(alg); if (err) goto err_algs; -- cgit v1.2.3-58-ga151 From 744e686a446cb4667ad42c530d372fbbc963507b Mon Sep 17 00:00:00 2001 From: Tero Kristo Date: Mon, 19 Sep 2016 18:22:13 +0300 Subject: crypto: omap-sham - align algorithms on word offset OMAP HW generally expects data for DMA to be on word boundary, so make the SHA driver inform crypto framework of the same preference. Signed-off-by: Tero Kristo Signed-off-by: Herbert Xu --- drivers/crypto/omap-sham.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) (limited to 'drivers') diff --git a/drivers/crypto/omap-sham.c b/drivers/crypto/omap-sham.c index 74653c9f2d80..3f2bf98db54b 100644 --- a/drivers/crypto/omap-sham.c +++ b/drivers/crypto/omap-sham.c @@ -1368,7 +1368,7 @@ static struct ahash_alg algs_sha1_md5[] = { CRYPTO_ALG_NEED_FALLBACK, .cra_blocksize = SHA1_BLOCK_SIZE, .cra_ctxsize = sizeof(struct omap_sham_ctx), - .cra_alignmask = 0, + .cra_alignmask = OMAP_ALIGN_MASK, .cra_module = THIS_MODULE, .cra_init = omap_sham_cra_init, .cra_exit = omap_sham_cra_exit, @@ -1467,7 +1467,7 @@ static struct ahash_alg algs_sha224_sha256[] = { CRYPTO_ALG_NEED_FALLBACK, .cra_blocksize = SHA224_BLOCK_SIZE, .cra_ctxsize = sizeof(struct omap_sham_ctx), - .cra_alignmask = 0, + .cra_alignmask = OMAP_ALIGN_MASK, .cra_module = THIS_MODULE, .cra_init = omap_sham_cra_init, .cra_exit = omap_sham_cra_exit, @@ -1489,7 +1489,7 @@ static struct ahash_alg algs_sha224_sha256[] = { CRYPTO_ALG_NEED_FALLBACK, .cra_blocksize = SHA256_BLOCK_SIZE, .cra_ctxsize = sizeof(struct omap_sham_ctx), - .cra_alignmask = 0, + .cra_alignmask = OMAP_ALIGN_MASK, .cra_module = THIS_MODULE, .cra_init = omap_sham_cra_init, .cra_exit = omap_sham_cra_exit, @@ -1562,7 +1562,7 @@ static struct ahash_alg algs_sha384_sha512[] = { CRYPTO_ALG_NEED_FALLBACK, .cra_blocksize = SHA384_BLOCK_SIZE, .cra_ctxsize = sizeof(struct omap_sham_ctx), - .cra_alignmask = 0, + .cra_alignmask = OMAP_ALIGN_MASK, .cra_module = THIS_MODULE, .cra_init = omap_sham_cra_init, .cra_exit = omap_sham_cra_exit, @@ -1584,7 +1584,7 @@ static struct ahash_alg algs_sha384_sha512[] = { CRYPTO_ALG_NEED_FALLBACK, .cra_blocksize = SHA512_BLOCK_SIZE, .cra_ctxsize = sizeof(struct omap_sham_ctx), - .cra_alignmask = 0, + .cra_alignmask = OMAP_ALIGN_MASK, .cra_module = THIS_MODULE, .cra_init = omap_sham_cra_init, .cra_exit = omap_sham_cra_exit, -- cgit v1.2.3-58-ga151 From 8addf571aeb7ef096201315737543b4c5303300a Mon Sep 17 00:00:00 2001 From: Tero Kristo Date: Mon, 19 Sep 2016 18:22:14 +0300 Subject: crypto: omap-sham - rename sgl to sgl_tmp for deprecation The current usage of sgl will be deprecated, and will be replaced by an array required by the sg based driver implementation. Rename the existing variable as sgl_tmp so that it can be removed from the driver easily later. Signed-off-by: Tero Kristo Signed-off-by: Herbert Xu --- drivers/crypto/omap-sham.c | 21 +++++++++++---------- 1 file changed, 11 insertions(+), 10 deletions(-) (limited to 'drivers') diff --git a/drivers/crypto/omap-sham.c b/drivers/crypto/omap-sham.c index 3f2bf98db54b..33bea520f50a 100644 --- a/drivers/crypto/omap-sham.c +++ b/drivers/crypto/omap-sham.c @@ -151,7 +151,7 @@ struct omap_sham_reqctx { /* walk state */ struct scatterlist *sg; - struct scatterlist sgl; + struct scatterlist sgl_tmp; unsigned int offset; /* offset in current sg */ unsigned int total; /* total request */ @@ -583,18 +583,19 @@ static int omap_sham_xmit_dma(struct omap_sham_dev *dd, dma_addr_t dma_addr, if (is_sg) { /* * The SG entry passed in may not have the 'length' member - * set correctly so use a local SG entry (sgl) with the + * set correctly so use a local SG entry (sgl_tmp) with the * proper value for 'length' instead. If this is not done, * the dmaengine may try to DMA the incorrect amount of data. */ - sg_init_table(&ctx->sgl, 1); - sg_assign_page(&ctx->sgl, sg_page(ctx->sg)); - ctx->sgl.offset = ctx->sg->offset; - sg_dma_len(&ctx->sgl) = len32; - sg_dma_address(&ctx->sgl) = sg_dma_address(ctx->sg); - - tx = dmaengine_prep_slave_sg(dd->dma_lch, &ctx->sgl, 1, - DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT | DMA_CTRL_ACK); + sg_init_table(&ctx->sgl_tmp, 1); + sg_assign_page(&ctx->sgl_tmp, sg_page(ctx->sg)); + ctx->sgl_tmp.offset = ctx->sg->offset; + sg_dma_len(&ctx->sgl_tmp) = len32; + sg_dma_address(&ctx->sgl_tmp) = sg_dma_address(ctx->sg); + + tx = dmaengine_prep_slave_sg(dd->dma_lch, &ctx->sgl_tmp, 1, + DMA_MEM_TO_DEV, + DMA_PREP_INTERRUPT | DMA_CTRL_ACK); } else { tx = dmaengine_prep_slave_single(dd->dma_lch, dma_addr, len32, DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT | DMA_CTRL_ACK); -- cgit v1.2.3-58-ga151 From f19de1bc67a042d00175139d1540cdb6ac97ed50 Mon Sep 17 00:00:00 2001 From: Tero Kristo Date: Mon, 19 Sep 2016 18:22:15 +0300 Subject: crypto: omap-sham - add support functions for sg based data handling Currently omap-sham uses a huge internal buffer for caching data, and pushing this out to the DMA as large chunks. This, unfortunately, doesn't work too well with the export/import functionality required for ahash algorithms, and must be changed towards more scatterlist centric approach. This patch adds support functions for (mostly) scatterlist based data handling. omap_sham_prepare_request() prepares a scatterlist for DMA transfer to SHA crypto accelerator. This requires checking the data / offset / length alignment of the data, splitting the data to SHA block size granularity, and adding any remaining data back to the buffer. With this patch, the code doesn't actually go live yet, the support code will be taken properly into use with additional patches that modify the SHA driver functionality itself. Signed-off-by: Tero Kristo Signed-off-by: Herbert Xu --- drivers/crypto/omap-sham.c | 263 +++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 263 insertions(+) (limited to 'drivers') diff --git a/drivers/crypto/omap-sham.c b/drivers/crypto/omap-sham.c index 33bea520f50a..855898977d38 100644 --- a/drivers/crypto/omap-sham.c +++ b/drivers/crypto/omap-sham.c @@ -112,6 +112,8 @@ #define FLAGS_DMA_READY 6 #define FLAGS_AUTO_XOR 7 #define FLAGS_BE32_SHA1 8 +#define FLAGS_SGS_COPIED 9 +#define FLAGS_SGS_ALLOCED 10 /* context flags */ #define FLAGS_FINUP 16 #define FLAGS_SG 17 @@ -151,8 +153,10 @@ struct omap_sham_reqctx { /* walk state */ struct scatterlist *sg; + struct scatterlist sgl[2]; struct scatterlist sgl_tmp; unsigned int offset; /* offset in current sg */ + int sg_len; unsigned int total; /* total request */ u8 buffer[0] OMAP_ALIGNED; @@ -223,6 +227,7 @@ struct omap_sham_dev { struct dma_chan *dma_lch; struct tasklet_struct done_task; u8 polling_mode; + u8 xmit_buf[BUFLEN]; unsigned long flags; struct crypto_queue queue; @@ -626,6 +631,260 @@ static int omap_sham_xmit_dma(struct omap_sham_dev *dd, dma_addr_t dma_addr, return -EINPROGRESS; } +static int omap_sham_copy_sg_lists(struct omap_sham_reqctx *ctx, + struct scatterlist *sg, int bs, int new_len) +{ + int n = sg_nents(sg); + struct scatterlist *tmp; + int offset = ctx->offset; + + if (ctx->bufcnt) + n++; + + ctx->sg = kmalloc_array(n, sizeof(*sg), GFP_KERNEL); + if (!ctx->sg) + return -ENOMEM; + + sg_init_table(ctx->sg, n); + + tmp = ctx->sg; + + ctx->sg_len = 0; + + if (ctx->bufcnt) { + sg_set_buf(tmp, ctx->dd->xmit_buf, ctx->bufcnt); + tmp = sg_next(tmp); + ctx->sg_len++; + } + + while (sg && new_len) { + int len = sg->length - offset; + + if (offset) { + offset -= sg->length; + if (offset < 0) + offset = 0; + } + + if (new_len < len) + len = new_len; + + if (len > 0) { + new_len -= len; + sg_set_page(tmp, sg_page(sg), len, sg->offset); + if (new_len <= 0) + sg_mark_end(tmp); + tmp = sg_next(tmp); + ctx->sg_len++; + } + + sg = sg_next(sg); + } + + set_bit(FLAGS_SGS_ALLOCED, &ctx->dd->flags); + + ctx->bufcnt = 0; + + return 0; +} + +static int omap_sham_copy_sgs(struct omap_sham_reqctx *ctx, + struct scatterlist *sg, int bs, int new_len) +{ + int pages; + void *buf; + int len; + + len = new_len + ctx->bufcnt; + + pages = get_order(ctx->total); + + buf = (void *)__get_free_pages(GFP_ATOMIC, pages); + if (!buf) { + pr_err("Couldn't allocate pages for unaligned cases.\n"); + return -ENOMEM; + } + + if (ctx->bufcnt) + memcpy(buf, ctx->dd->xmit_buf, ctx->bufcnt); + + scatterwalk_map_and_copy(buf + ctx->bufcnt, sg, ctx->offset, + ctx->total - ctx->bufcnt, 0); + sg_init_table(ctx->sgl, 1); + sg_set_buf(ctx->sgl, buf, len); + ctx->sg = ctx->sgl; + set_bit(FLAGS_SGS_COPIED, &ctx->dd->flags); + ctx->sg_len = 1; + ctx->bufcnt = 0; + ctx->offset = 0; + + return 0; +} + +static int omap_sham_align_sgs(struct scatterlist *sg, + int nbytes, int bs, bool final, + struct omap_sham_reqctx *rctx) +{ + int n = 0; + bool aligned = true; + bool list_ok = true; + struct scatterlist *sg_tmp = sg; + int new_len; + int offset = rctx->offset; + + if (!sg || !sg->length || !nbytes) + return 0; + + new_len = nbytes; + + if (offset) + list_ok = false; + + if (final) + new_len = DIV_ROUND_UP(new_len, bs) * bs; + else + new_len = new_len / bs * bs; + + while (nbytes > 0 && sg_tmp) { + n++; + + if (offset < sg_tmp->length) { + if (!IS_ALIGNED(offset + sg_tmp->offset, 4)) { + aligned = false; + break; + } + + if (!IS_ALIGNED(sg_tmp->length - offset, bs)) { + aligned = false; + break; + } + } + + if (offset) { + offset -= sg_tmp->length; + if (offset < 0) { + nbytes += offset; + offset = 0; + } + } else { + nbytes -= sg_tmp->length; + } + + sg_tmp = sg_next(sg_tmp); + + if (nbytes < 0) { + list_ok = false; + break; + } + } + + if (!aligned) + return omap_sham_copy_sgs(rctx, sg, bs, new_len); + else if (!list_ok) + return omap_sham_copy_sg_lists(rctx, sg, bs, new_len); + + rctx->sg_len = n; + rctx->sg = sg; + + return 0; +} + +static int omap_sham_prepare_request(struct ahash_request *req, bool update) +{ + struct omap_sham_reqctx *rctx = ahash_request_ctx(req); + int bs; + int ret; + int nbytes; + bool final = rctx->flags & BIT(FLAGS_FINUP); + int xmit_len, hash_later; + + if (!req) + return 0; + + bs = get_block_size(rctx); + + if (update) + nbytes = req->nbytes; + else + nbytes = 0; + + rctx->total = nbytes + rctx->bufcnt; + + if (!rctx->total) + return 0; + + if (nbytes && (!IS_ALIGNED(rctx->bufcnt, bs))) { + int len = bs - rctx->bufcnt % bs; + + if (len > nbytes) + len = nbytes; + scatterwalk_map_and_copy(rctx->buffer + rctx->bufcnt, req->src, + 0, len, 0); + rctx->bufcnt += len; + nbytes -= len; + rctx->offset = len; + } + + if (rctx->bufcnt) + memcpy(rctx->dd->xmit_buf, rctx->buffer, rctx->bufcnt); + + ret = omap_sham_align_sgs(req->src, nbytes, bs, final, rctx); + if (ret) + return ret; + + xmit_len = rctx->total; + + if (!IS_ALIGNED(xmit_len, bs)) { + if (final) + xmit_len = DIV_ROUND_UP(xmit_len, bs) * bs; + else + xmit_len = xmit_len / bs * bs; + } + + hash_later = rctx->total - xmit_len; + if (hash_later < 0) + hash_later = 0; + + if (rctx->bufcnt && nbytes) { + /* have data from previous operation and current */ + sg_init_table(rctx->sgl, 2); + sg_set_buf(rctx->sgl, rctx->dd->xmit_buf, rctx->bufcnt); + + sg_chain(rctx->sgl, 2, req->src); + + rctx->sg = rctx->sgl; + + rctx->sg_len++; + } else if (rctx->bufcnt) { + /* have buffered data only */ + sg_init_table(rctx->sgl, 1); + sg_set_buf(rctx->sgl, rctx->dd->xmit_buf, xmit_len); + + rctx->sg = rctx->sgl; + + rctx->sg_len = 1; + } + + if (hash_later) { + if (req->nbytes) { + scatterwalk_map_and_copy(rctx->buffer, req->src, + req->nbytes - hash_later, + hash_later, 0); + } else { + memcpy(rctx->buffer, rctx->buffer + xmit_len, + hash_later); + } + rctx->bufcnt = hash_later; + } else { + rctx->bufcnt = 0; + } + + if (!final) + rctx->total = xmit_len; + + return 0; +} + static size_t omap_sham_append_buffer(struct omap_sham_reqctx *ctx, const u8 *data, size_t length) { @@ -1040,6 +1299,10 @@ retry: dd->req = req; ctx = ahash_request_ctx(req); + err = omap_sham_prepare_request(NULL, ctx->op == OP_UPDATE); + if (err) + goto err1; + dev_dbg(dd->dev, "handling new req, op: %lu, nbytes: %d\n", ctx->op, req->nbytes); -- cgit v1.2.3-58-ga151 From 2c5bd1ef1511ecf3df24a1f2ee94c7f39d3318e1 Mon Sep 17 00:00:00 2001 From: Tero Kristo Date: Mon, 19 Sep 2016 18:22:16 +0300 Subject: crypto: omap-sham - change the DMA threshold value to a define Currently the threshold value was hardcoded in the driver. Having a define for it makes it easier to configure. Signed-off-by: Tero Kristo Signed-off-by: Herbert Xu --- drivers/crypto/omap-sham.c | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) (limited to 'drivers') diff --git a/drivers/crypto/omap-sham.c b/drivers/crypto/omap-sham.c index 855898977d38..5c95bf97c132 100644 --- a/drivers/crypto/omap-sham.c +++ b/drivers/crypto/omap-sham.c @@ -137,6 +137,7 @@ #define OMAP_ALIGNED __attribute__((aligned(sizeof(u32)))) #define BUFLEN PAGE_SIZE +#define OMAP_SHA_DMA_THRESHOLD 256 struct omap_sham_dev; @@ -1435,10 +1436,11 @@ static int omap_sham_final(struct ahash_request *req) /* * OMAP HW accel works only with buffers >= 9. * HMAC is always >= 9 because ipad == block size. - * If buffersize is less than 240, we use fallback SW encoding, - * as using DMA + HW in this case doesn't provide any benefit. + * If buffersize is less than DMA_THRESHOLD, we use fallback + * SW encoding, as using DMA + HW in this case doesn't provide + * any benefit. */ - if (!ctx->digcnt && ctx->bufcnt < 240) + if (!ctx->digcnt && ctx->bufcnt < OMAP_SHA_DMA_THRESHOLD) return omap_sham_final_shash(req); else if (ctx->bufcnt) return omap_sham_enqueue(req, OP_FINAL); -- cgit v1.2.3-58-ga151 From 8043bb1ae03cb842cd06a33fb1233e458f6f7f37 Mon Sep 17 00:00:00 2001 From: Tero Kristo Date: Mon, 19 Sep 2016 18:22:17 +0300 Subject: crypto: omap-sham - convert driver logic to use sgs for data xmit Currently, the internal buffer has been used for data transmission. Change this so that scatterlists are used instead, and change the driver to actually use the previously introduced helper functions for scatterlist preparation. This patch also removes the old buffer handling code which is no longer needed. Signed-off-by: Tero Kristo Signed-off-by: Herbert Xu --- drivers/crypto/omap-sham.c | 344 ++++++++++----------------------------------- 1 file changed, 74 insertions(+), 270 deletions(-) (limited to 'drivers') diff --git a/drivers/crypto/omap-sham.c b/drivers/crypto/omap-sham.c index 5c95bf97c132..412559e9b2a1 100644 --- a/drivers/crypto/omap-sham.c +++ b/drivers/crypto/omap-sham.c @@ -116,7 +116,6 @@ #define FLAGS_SGS_ALLOCED 10 /* context flags */ #define FLAGS_FINUP 16 -#define FLAGS_SG 17 #define FLAGS_MODE_SHIFT 18 #define FLAGS_MODE_MASK (SHA_REG_MODE_ALGO_MASK << FLAGS_MODE_SHIFT) @@ -150,13 +149,11 @@ struct omap_sham_reqctx { size_t digcnt; size_t bufcnt; size_t buflen; - dma_addr_t dma_addr; /* walk state */ struct scatterlist *sg; struct scatterlist sgl[2]; - struct scatterlist sgl_tmp; - unsigned int offset; /* offset in current sg */ + int offset; /* offset in current sg */ int sg_len; unsigned int total; /* total request */ @@ -516,12 +513,14 @@ static int omap_sham_poll_irq_omap4(struct omap_sham_dev *dd) SHA_REG_IRQSTATUS_INPUT_RDY); } -static int omap_sham_xmit_cpu(struct omap_sham_dev *dd, const u8 *buf, - size_t length, int final) +static int omap_sham_xmit_cpu(struct omap_sham_dev *dd, size_t length, + int final) { struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req); int count, len32, bs32, offset = 0; - const u32 *buffer = (const u32 *)buf; + const u32 *buffer; + int mlen; + struct sg_mapping_iter mi; dev_dbg(dd->dev, "xmit_cpu: digcnt: %d, length: %d, final: %d\n", ctx->digcnt, length, final); @@ -531,6 +530,7 @@ static int omap_sham_xmit_cpu(struct omap_sham_dev *dd, const u8 *buf, /* should be non-zero before next lines to disable clocks later */ ctx->digcnt += length; + ctx->total -= length; if (final) set_bit(FLAGS_FINAL, &dd->flags); /* catch last interrupt */ @@ -540,16 +540,35 @@ static int omap_sham_xmit_cpu(struct omap_sham_dev *dd, const u8 *buf, len32 = DIV_ROUND_UP(length, sizeof(u32)); bs32 = get_block_size(ctx) / sizeof(u32); + sg_miter_start(&mi, ctx->sg, ctx->sg_len, + SG_MITER_FROM_SG | SG_MITER_ATOMIC); + + mlen = 0; + while (len32) { if (dd->pdata->poll_irq(dd)) return -ETIMEDOUT; - for (count = 0; count < min(len32, bs32); count++, offset++) + for (count = 0; count < min(len32, bs32); count++, offset++) { + if (!mlen) { + sg_miter_next(&mi); + mlen = mi.length; + if (!mlen) { + pr_err("sg miter failure.\n"); + return -EINVAL; + } + offset = 0; + buffer = mi.addr; + } omap_sham_write(dd, SHA_REG_DIN(dd, count), buffer[offset]); + mlen -= 4; + } len32 -= min(len32, bs32); } + sg_miter_stop(&mi); + return -EINPROGRESS; } @@ -561,22 +580,27 @@ static void omap_sham_dma_callback(void *param) tasklet_schedule(&dd->done_task); } -static int omap_sham_xmit_dma(struct omap_sham_dev *dd, dma_addr_t dma_addr, - size_t length, int final, int is_sg) +static int omap_sham_xmit_dma(struct omap_sham_dev *dd, size_t length, + int final) { struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req); struct dma_async_tx_descriptor *tx; struct dma_slave_config cfg; - int len32, ret, dma_min = get_block_size(ctx); + int ret; dev_dbg(dd->dev, "xmit_dma: digcnt: %d, length: %d, final: %d\n", ctx->digcnt, length, final); + if (!dma_map_sg(dd->dev, ctx->sg, ctx->sg_len, DMA_TO_DEVICE)) { + dev_err(dd->dev, "dma_map_sg error\n"); + return -EINVAL; + } + memset(&cfg, 0, sizeof(cfg)); cfg.dst_addr = dd->phys_base + SHA_REG_DIN(dd, 0); cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; - cfg.dst_maxburst = dma_min / DMA_SLAVE_BUSWIDTH_4_BYTES; + cfg.dst_maxburst = get_block_size(ctx) / DMA_SLAVE_BUSWIDTH_4_BYTES; ret = dmaengine_slave_config(dd->dma_lch, &cfg); if (ret) { @@ -584,31 +608,12 @@ static int omap_sham_xmit_dma(struct omap_sham_dev *dd, dma_addr_t dma_addr, return ret; } - len32 = DIV_ROUND_UP(length, dma_min) * dma_min; - - if (is_sg) { - /* - * The SG entry passed in may not have the 'length' member - * set correctly so use a local SG entry (sgl_tmp) with the - * proper value for 'length' instead. If this is not done, - * the dmaengine may try to DMA the incorrect amount of data. - */ - sg_init_table(&ctx->sgl_tmp, 1); - sg_assign_page(&ctx->sgl_tmp, sg_page(ctx->sg)); - ctx->sgl_tmp.offset = ctx->sg->offset; - sg_dma_len(&ctx->sgl_tmp) = len32; - sg_dma_address(&ctx->sgl_tmp) = sg_dma_address(ctx->sg); - - tx = dmaengine_prep_slave_sg(dd->dma_lch, &ctx->sgl_tmp, 1, - DMA_MEM_TO_DEV, - DMA_PREP_INTERRUPT | DMA_CTRL_ACK); - } else { - tx = dmaengine_prep_slave_single(dd->dma_lch, dma_addr, len32, - DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT | DMA_CTRL_ACK); - } + tx = dmaengine_prep_slave_sg(dd->dma_lch, ctx->sg, ctx->sg_len, + DMA_MEM_TO_DEV, + DMA_PREP_INTERRUPT | DMA_CTRL_ACK); if (!tx) { - dev_err(dd->dev, "prep_slave_sg/single() failed\n"); + dev_err(dd->dev, "prep_slave_sg failed\n"); return -EINVAL; } @@ -618,6 +623,7 @@ static int omap_sham_xmit_dma(struct omap_sham_dev *dd, dma_addr_t dma_addr, dd->pdata->write_ctrl(dd, length, final, 1); ctx->digcnt += length; + ctx->total -= length; if (final) set_bit(FLAGS_FINAL, &dd->flags); /* catch last interrupt */ @@ -886,208 +892,13 @@ static int omap_sham_prepare_request(struct ahash_request *req, bool update) return 0; } -static size_t omap_sham_append_buffer(struct omap_sham_reqctx *ctx, - const u8 *data, size_t length) -{ - size_t count = min(length, ctx->buflen - ctx->bufcnt); - - count = min(count, ctx->total); - if (count <= 0) - return 0; - memcpy(ctx->buffer + ctx->bufcnt, data, count); - ctx->bufcnt += count; - - return count; -} - -static size_t omap_sham_append_sg(struct omap_sham_reqctx *ctx) -{ - size_t count; - const u8 *vaddr; - - while (ctx->sg) { - vaddr = kmap_atomic(sg_page(ctx->sg)); - vaddr += ctx->sg->offset; - - count = omap_sham_append_buffer(ctx, - vaddr + ctx->offset, - ctx->sg->length - ctx->offset); - - kunmap_atomic((void *)vaddr); - - if (!count) - break; - ctx->offset += count; - ctx->total -= count; - if (ctx->offset == ctx->sg->length) { - ctx->sg = sg_next(ctx->sg); - if (ctx->sg) - ctx->offset = 0; - else - ctx->total = 0; - } - } - - return 0; -} - -static int omap_sham_xmit_dma_map(struct omap_sham_dev *dd, - struct omap_sham_reqctx *ctx, - size_t length, int final) -{ - int ret; - - ctx->dma_addr = dma_map_single(dd->dev, ctx->buffer, ctx->buflen, - DMA_TO_DEVICE); - if (dma_mapping_error(dd->dev, ctx->dma_addr)) { - dev_err(dd->dev, "dma %u bytes error\n", ctx->buflen); - return -EINVAL; - } - - ctx->flags &= ~BIT(FLAGS_SG); - - ret = omap_sham_xmit_dma(dd, ctx->dma_addr, length, final, 0); - if (ret != -EINPROGRESS) - dma_unmap_single(dd->dev, ctx->dma_addr, ctx->buflen, - DMA_TO_DEVICE); - - return ret; -} - -static int omap_sham_update_dma_slow(struct omap_sham_dev *dd) -{ - struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req); - unsigned int final; - size_t count; - - omap_sham_append_sg(ctx); - - final = (ctx->flags & BIT(FLAGS_FINUP)) && !ctx->total; - - dev_dbg(dd->dev, "slow: bufcnt: %u, digcnt: %d, final: %d\n", - ctx->bufcnt, ctx->digcnt, final); - - if (final || (ctx->bufcnt == ctx->buflen && ctx->total)) { - count = ctx->bufcnt; - ctx->bufcnt = 0; - return omap_sham_xmit_dma_map(dd, ctx, count, final); - } - - return 0; -} - -/* Start address alignment */ -#define SG_AA(sg) (IS_ALIGNED(sg->offset, sizeof(u32))) -/* SHA1 block size alignment */ -#define SG_SA(sg, bs) (IS_ALIGNED(sg->length, bs)) - -static int omap_sham_update_dma_start(struct omap_sham_dev *dd) -{ - struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req); - unsigned int length, final, tail; - struct scatterlist *sg; - int ret, bs; - - if (!ctx->total) - return 0; - - if (ctx->bufcnt || ctx->offset) - return omap_sham_update_dma_slow(dd); - - /* - * Don't use the sg interface when the transfer size is less - * than the number of elements in a DMA frame. Otherwise, - * the dmaengine infrastructure will calculate that it needs - * to transfer 0 frames which ultimately fails. - */ - if (ctx->total < get_block_size(ctx)) - return omap_sham_update_dma_slow(dd); - - dev_dbg(dd->dev, "fast: digcnt: %d, bufcnt: %u, total: %u\n", - ctx->digcnt, ctx->bufcnt, ctx->total); - - sg = ctx->sg; - bs = get_block_size(ctx); - - if (!SG_AA(sg)) - return omap_sham_update_dma_slow(dd); - - if (!sg_is_last(sg) && !SG_SA(sg, bs)) - /* size is not BLOCK_SIZE aligned */ - return omap_sham_update_dma_slow(dd); - - length = min(ctx->total, sg->length); - - if (sg_is_last(sg)) { - if (!(ctx->flags & BIT(FLAGS_FINUP))) { - /* not last sg must be BLOCK_SIZE aligned */ - tail = length & (bs - 1); - /* without finup() we need one block to close hash */ - if (!tail) - tail = bs; - length -= tail; - } - } - - if (!dma_map_sg(dd->dev, ctx->sg, 1, DMA_TO_DEVICE)) { - dev_err(dd->dev, "dma_map_sg error\n"); - return -EINVAL; - } - - ctx->flags |= BIT(FLAGS_SG); - - ctx->total -= length; - ctx->offset = length; /* offset where to start slow */ - - final = (ctx->flags & BIT(FLAGS_FINUP)) && !ctx->total; - - ret = omap_sham_xmit_dma(dd, sg_dma_address(ctx->sg), length, final, 1); - if (ret != -EINPROGRESS) - dma_unmap_sg(dd->dev, ctx->sg, 1, DMA_TO_DEVICE); - - return ret; -} - -static int omap_sham_update_cpu(struct omap_sham_dev *dd) -{ - struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req); - int bufcnt, final; - - if (!ctx->total) - return 0; - - omap_sham_append_sg(ctx); - - final = (ctx->flags & BIT(FLAGS_FINUP)) && !ctx->total; - - dev_dbg(dd->dev, "cpu: bufcnt: %u, digcnt: %d, final: %d\n", - ctx->bufcnt, ctx->digcnt, final); - - if (final || (ctx->bufcnt == ctx->buflen && ctx->total)) { - bufcnt = ctx->bufcnt; - ctx->bufcnt = 0; - return omap_sham_xmit_cpu(dd, ctx->buffer, bufcnt, final); - } - - return 0; -} - static int omap_sham_update_dma_stop(struct omap_sham_dev *dd) { struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req); + dma_unmap_sg(dd->dev, ctx->sg, ctx->sg_len, DMA_TO_DEVICE); - if (ctx->flags & BIT(FLAGS_SG)) { - dma_unmap_sg(dd->dev, ctx->sg, 1, DMA_TO_DEVICE); - if (ctx->sg->length == ctx->offset) { - ctx->sg = sg_next(ctx->sg); - if (ctx->sg) - ctx->offset = 0; - } - } else { - dma_unmap_single(dd->dev, ctx->dma_addr, ctx->buflen, - DMA_TO_DEVICE); - } + clear_bit(FLAGS_DMA_ACTIVE, &dd->flags); return 0; } @@ -1148,6 +959,8 @@ static int omap_sham_init(struct ahash_request *req) ctx->bufcnt = 0; ctx->digcnt = 0; + ctx->total = 0; + ctx->offset = 0; ctx->buflen = BUFLEN; if (tctx->flags & BIT(FLAGS_HMAC)) { @@ -1170,14 +983,19 @@ static int omap_sham_update_req(struct omap_sham_dev *dd) struct ahash_request *req = dd->req; struct omap_sham_reqctx *ctx = ahash_request_ctx(req); int err; + bool final = ctx->flags & BIT(FLAGS_FINUP); dev_dbg(dd->dev, "update_req: total: %u, digcnt: %d, finup: %d\n", ctx->total, ctx->digcnt, (ctx->flags & BIT(FLAGS_FINUP)) != 0); + if (ctx->total < get_block_size(ctx) || + ctx->total < OMAP_SHA_DMA_THRESHOLD) + ctx->flags |= BIT(FLAGS_CPU); + if (ctx->flags & BIT(FLAGS_CPU)) - err = omap_sham_update_cpu(dd); + err = omap_sham_xmit_cpu(dd, ctx->total, final); else - err = omap_sham_update_dma_start(dd); + err = omap_sham_xmit_dma(dd, ctx->total, final); /* wait for dma completion before can take more data */ dev_dbg(dd->dev, "update: err: %d, digcnt: %d\n", err, ctx->digcnt); @@ -1191,7 +1009,7 @@ static int omap_sham_final_req(struct omap_sham_dev *dd) struct omap_sham_reqctx *ctx = ahash_request_ctx(req); int err = 0, use_dma = 1; - if ((ctx->bufcnt <= get_block_size(ctx)) || dd->polling_mode) + if ((ctx->total <= get_block_size(ctx)) || dd->polling_mode) /* * faster to handle last block with cpu or * use cpu when dma is not present. @@ -1199,9 +1017,9 @@ static int omap_sham_final_req(struct omap_sham_dev *dd) use_dma = 0; if (use_dma) - err = omap_sham_xmit_dma_map(dd, ctx, ctx->bufcnt, 1); + err = omap_sham_xmit_dma(dd, ctx->total, 1); else - err = omap_sham_xmit_cpu(dd, ctx->buffer, ctx->bufcnt, 1); + err = omap_sham_xmit_cpu(dd, ctx->total, 1); ctx->bufcnt = 0; @@ -1249,6 +1067,17 @@ static void omap_sham_finish_req(struct ahash_request *req, int err) struct omap_sham_reqctx *ctx = ahash_request_ctx(req); struct omap_sham_dev *dd = ctx->dd; + if (test_bit(FLAGS_SGS_COPIED, &dd->flags)) + free_pages((unsigned long)sg_virt(ctx->sg), + get_order(ctx->sg->length)); + + if (test_bit(FLAGS_SGS_ALLOCED, &dd->flags)) + kfree(ctx->sg); + + ctx->sg = NULL; + + dd->flags &= ~(BIT(FLAGS_SGS_ALLOCED) | BIT(FLAGS_SGS_COPIED)); + if (!err) { dd->pdata->copy_hash(req, 1); if (test_bit(FLAGS_FINAL, &dd->flags)) @@ -1300,7 +1129,7 @@ retry: dd->req = req; ctx = ahash_request_ctx(req); - err = omap_sham_prepare_request(NULL, ctx->op == OP_UPDATE); + err = omap_sham_prepare_request(req, ctx->op == OP_UPDATE); if (err) goto err1; @@ -1356,34 +1185,15 @@ static int omap_sham_update(struct ahash_request *req) { struct omap_sham_reqctx *ctx = ahash_request_ctx(req); struct omap_sham_dev *dd = ctx->dd; - int bs = get_block_size(ctx); if (!req->nbytes) return 0; - ctx->total = req->nbytes; - ctx->sg = req->src; - ctx->offset = 0; - - if (ctx->flags & BIT(FLAGS_FINUP)) { - if ((ctx->digcnt + ctx->bufcnt + ctx->total) < 240) { - /* - * OMAP HW accel works only with buffers >= 9 - * will switch to bypass in final() - * final has the same request and data - */ - omap_sham_append_sg(ctx); - return 0; - } else if ((ctx->bufcnt + ctx->total <= bs) || - dd->polling_mode) { - /* - * faster to use CPU for short transfers or - * use cpu when dma is not present. - */ - ctx->flags |= BIT(FLAGS_CPU); - } - } else if (ctx->bufcnt + ctx->total < ctx->buflen) { - omap_sham_append_sg(ctx); + if (ctx->total + req->nbytes < ctx->buflen) { + scatterwalk_map_and_copy(ctx->buffer + ctx->bufcnt, req->src, + 0, req->nbytes, 0); + ctx->bufcnt += req->nbytes; + ctx->total += req->nbytes; return 0; } @@ -1917,12 +1727,8 @@ static void omap_sham_done_task(unsigned long data) } if (test_bit(FLAGS_CPU, &dd->flags)) { - if (test_and_clear_bit(FLAGS_OUTPUT_READY, &dd->flags)) { - /* hash or semi-hash ready */ - err = omap_sham_update_cpu(dd); - if (err != -EINPROGRESS) - goto finish; - } + if (test_and_clear_bit(FLAGS_OUTPUT_READY, &dd->flags)) + goto finish; } else if (test_bit(FLAGS_DMA_READY, &dd->flags)) { if (test_and_clear_bit(FLAGS_DMA_ACTIVE, &dd->flags)) { omap_sham_update_dma_stop(dd); @@ -1934,8 +1740,6 @@ static void omap_sham_done_task(unsigned long data) if (test_and_clear_bit(FLAGS_OUTPUT_READY, &dd->flags)) { /* hash or semi-hash ready */ clear_bit(FLAGS_DMA_READY, &dd->flags); - err = omap_sham_update_dma_start(dd); - if (err != -EINPROGRESS) goto finish; } } -- cgit v1.2.3-58-ga151 From a84d351f6b8624cf3e365971f8eab7af8ef690cf Mon Sep 17 00:00:00 2001 From: Tero Kristo Date: Mon, 19 Sep 2016 18:22:18 +0300 Subject: crypto: omap-sham - add support for export/import Now that the driver has been converted to use scatterlists for data handling, add proper implementation for the export/import stubs also. Signed-off-by: Tero Kristo Signed-off-by: Herbert Xu --- drivers/crypto/omap-sham.c | 16 +++++++++++++--- 1 file changed, 13 insertions(+), 3 deletions(-) (limited to 'drivers') diff --git a/drivers/crypto/omap-sham.c b/drivers/crypto/omap-sham.c index 412559e9b2a1..8eefd793cc82 100644 --- a/drivers/crypto/omap-sham.c +++ b/drivers/crypto/omap-sham.c @@ -1418,12 +1418,21 @@ static void omap_sham_cra_exit(struct crypto_tfm *tfm) static int omap_sham_export(struct ahash_request *req, void *out) { - return -ENOTSUPP; + struct omap_sham_reqctx *rctx = ahash_request_ctx(req); + + memcpy(out, rctx, sizeof(*rctx) + rctx->bufcnt); + + return 0; } static int omap_sham_import(struct ahash_request *req, const void *in) { - return -ENOTSUPP; + struct omap_sham_reqctx *rctx = ahash_request_ctx(req); + const struct omap_sham_reqctx *ctx_in = in; + + memcpy(rctx, in, sizeof(*rctx) + ctx_in->bufcnt); + + return 0; } static struct ahash_alg algs_sha1_md5[] = { @@ -2083,7 +2092,8 @@ static int omap_sham_probe(struct platform_device *pdev) alg = &dd->pdata->algs_info[i].algs_list[j]; alg->export = omap_sham_export; alg->import = omap_sham_import; - alg->halg.statesize = sizeof(struct omap_sham_reqctx); + alg->halg.statesize = sizeof(struct omap_sham_reqctx) + + BUFLEN; err = crypto_register_ahash(alg); if (err) goto err_algs; -- cgit v1.2.3-58-ga151 From 182e283f3e550d70ba5b7e6be66ea74726646166 Mon Sep 17 00:00:00 2001 From: Tero Kristo Date: Mon, 19 Sep 2016 18:22:19 +0300 Subject: crypto: omap-sham - shrink the internal buffer size The current internal buffer size is way too large for crypto core, so shrink it to be smaller. This makes the buffer to fit into the space reserved for the export/import buffers also. Signed-off-by: Tero Kristo Signed-off-by: Herbert Xu --- drivers/crypto/omap-sham.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/crypto/omap-sham.c b/drivers/crypto/omap-sham.c index 8eefd793cc82..d0b16e5e4ee5 100644 --- a/drivers/crypto/omap-sham.c +++ b/drivers/crypto/omap-sham.c @@ -135,7 +135,7 @@ #define OMAP_ALIGN_MASK (sizeof(u32)-1) #define OMAP_ALIGNED __attribute__((aligned(sizeof(u32)))) -#define BUFLEN PAGE_SIZE +#define BUFLEN SHA512_BLOCK_SIZE #define OMAP_SHA_DMA_THRESHOLD 256 struct omap_sham_dev; -- cgit v1.2.3-58-ga151 From ad8529fde9e3601180a839867a8ab041109aebb5 Mon Sep 17 00:00:00 2001 From: Dave Gerlach Date: Tue, 20 Sep 2016 10:25:40 -0500 Subject: hwrng: omap - Only fail if pm_runtime_get_sync returns < 0 Currently omap-rng checks the return value of pm_runtime_get_sync and reports failure if anything is returned, however it should be checking if ret < 0 as pm_runtime_get_sync return 0 on success but also can return 1 if the device was already active which is not a failure case. Only values < 0 are actual failures. Fixes: 61dc0a446e5d ("hwrng: omap - Fix assumption that runtime_get_sync will always succeed") Signed-off-by: Dave Gerlach Signed-off-by: Herbert Xu --- drivers/char/hw_random/omap-rng.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/char/hw_random/omap-rng.c b/drivers/char/hw_random/omap-rng.c index 01d4be2c354b..f5c26a5f6875 100644 --- a/drivers/char/hw_random/omap-rng.c +++ b/drivers/char/hw_random/omap-rng.c @@ -385,7 +385,7 @@ static int omap_rng_probe(struct platform_device *pdev) pm_runtime_enable(&pdev->dev); ret = pm_runtime_get_sync(&pdev->dev); - if (ret) { + if (ret < 0) { dev_err(&pdev->dev, "Failed to runtime_get device: %d\n", ret); pm_runtime_put_noidle(&pdev->dev); goto err_ioremap; @@ -443,7 +443,7 @@ static int __maybe_unused omap_rng_resume(struct device *dev) int ret; ret = pm_runtime_get_sync(dev); - if (ret) { + if (ret < 0) { dev_err(dev, "Failed to runtime_get device: %d\n", ret); pm_runtime_put_noidle(dev); return ret; -- cgit v1.2.3-58-ga151 From 5ecf8ef9103cb018cbd82b6eace529ff4c5b5c66 Mon Sep 17 00:00:00 2001 From: Catalin Vasile Date: Thu, 22 Sep 2016 11:57:58 +0300 Subject: crypto: caam - fix sg dump Ensure scatterlists have a virtual memory mapping before dumping. Signed-off-by: Catalin Vasile Signed-off-by: Herbert Xu --- drivers/crypto/caam/caamalg.c | 79 +++++++++++++++++++++++++++++++++---------- 1 file changed, 61 insertions(+), 18 deletions(-) (limited to 'drivers') diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c index f1116e7f7cd5..eb97562414d2 100644 --- a/drivers/crypto/caam/caamalg.c +++ b/drivers/crypto/caam/caamalg.c @@ -111,6 +111,42 @@ #else #define debug(format, arg...) #endif + +#ifdef DEBUG +#include + +static void dbg_dump_sg(const char *level, const char *prefix_str, + int prefix_type, int rowsize, int groupsize, + struct scatterlist *sg, size_t tlen, bool ascii, + bool may_sleep) +{ + struct scatterlist *it; + void *it_page; + size_t len; + void *buf; + + for (it = sg; it != NULL && tlen > 0 ; it = sg_next(sg)) { + /* + * make sure the scatterlist's page + * has a valid virtual memory mapping + */ + it_page = kmap_atomic(sg_page(it)); + if (unlikely(!it_page)) { + printk(KERN_ERR "dbg_dump_sg: kmap failed\n"); + return; + } + + buf = it_page + it->offset; + len = min(tlen, it->length); + print_hex_dump(level, prefix_str, prefix_type, rowsize, + groupsize, buf, len, ascii); + tlen -= len; + + kunmap_atomic(it_page); + } +} +#endif + static struct list_head alg_list; struct caam_alg_entry { @@ -1982,9 +2018,9 @@ static void ablkcipher_encrypt_done(struct device *jrdev, u32 *desc, u32 err, print_hex_dump(KERN_ERR, "dstiv @"__stringify(__LINE__)": ", DUMP_PREFIX_ADDRESS, 16, 4, req->info, edesc->src_nents > 1 ? 100 : ivsize, 1); - print_hex_dump(KERN_ERR, "dst @"__stringify(__LINE__)": ", - DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src), - edesc->dst_nents > 1 ? 100 : req->nbytes, 1); + dbg_dump_sg(KERN_ERR, "dst @"__stringify(__LINE__)": ", + DUMP_PREFIX_ADDRESS, 16, 4, req->dst, + edesc->dst_nents > 1 ? 100 : req->nbytes, 1, true); #endif ablkcipher_unmap(jrdev, edesc, req); @@ -2014,9 +2050,9 @@ static void ablkcipher_decrypt_done(struct device *jrdev, u32 *desc, u32 err, print_hex_dump(KERN_ERR, "dstiv @"__stringify(__LINE__)": ", DUMP_PREFIX_ADDRESS, 16, 4, req->info, ivsize, 1); - print_hex_dump(KERN_ERR, "dst @"__stringify(__LINE__)": ", - DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src), - edesc->dst_nents > 1 ? 100 : req->nbytes, 1); + dbg_dump_sg(KERN_ERR, "dst @"__stringify(__LINE__)": ", + DUMP_PREFIX_ADDRESS, 16, 4, req->dst, + edesc->dst_nents > 1 ? 100 : req->nbytes, 1, true); #endif ablkcipher_unmap(jrdev, edesc, req); @@ -2171,12 +2207,15 @@ static void init_ablkcipher_job(u32 *sh_desc, dma_addr_t ptr, int len, sec4_sg_index = 0; #ifdef DEBUG + bool may_sleep = ((req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG | + CRYPTO_TFM_REQ_MAY_SLEEP)) != 0); print_hex_dump(KERN_ERR, "presciv@"__stringify(__LINE__)": ", DUMP_PREFIX_ADDRESS, 16, 4, req->info, ivsize, 1); - print_hex_dump(KERN_ERR, "src @"__stringify(__LINE__)": ", - DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src), - edesc->src_nents ? 100 : req->nbytes, 1); + printk(KERN_ERR "asked=%d, nbytes%d\n", (int)edesc->src_nents ? 100 : req->nbytes, req->nbytes); + dbg_dump_sg(KERN_ERR, "src @"__stringify(__LINE__)": ", + DUMP_PREFIX_ADDRESS, 16, 4, req->src, + edesc->src_nents ? 100 : req->nbytes, 1, may_sleep); #endif len = desc_len(sh_desc); @@ -2228,12 +2267,14 @@ static void init_ablkcipher_giv_job(u32 *sh_desc, dma_addr_t ptr, int len, sec4_sg_index = 0; #ifdef DEBUG + bool may_sleep = ((req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG | + CRYPTO_TFM_REQ_MAY_SLEEP)) != 0); print_hex_dump(KERN_ERR, "presciv@" __stringify(__LINE__) ": ", DUMP_PREFIX_ADDRESS, 16, 4, req->info, ivsize, 1); - print_hex_dump(KERN_ERR, "src @" __stringify(__LINE__) ": ", - DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src), - edesc->src_nents ? 100 : req->nbytes, 1); + dbg_dump_sg(KERN_ERR, "src @" __stringify(__LINE__) ": ", + DUMP_PREFIX_ADDRESS, 16, 4, req->src, + edesc->src_nents ? 100 : req->nbytes, 1, may_sleep); #endif len = desc_len(sh_desc); @@ -2503,18 +2544,20 @@ static int aead_decrypt(struct aead_request *req) u32 *desc; int ret = 0; +#ifdef DEBUG + bool may_sleep = ((req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG | + CRYPTO_TFM_REQ_MAY_SLEEP)) != 0); + dbg_dump_sg(KERN_ERR, "dec src@"__stringify(__LINE__)": ", + DUMP_PREFIX_ADDRESS, 16, 4, req->src, + req->assoclen + req->cryptlen, 1, may_sleep); +#endif + /* allocate extended descriptor */ edesc = aead_edesc_alloc(req, AUTHENC_DESC_JOB_IO_LEN, &all_contig, false); if (IS_ERR(edesc)) return PTR_ERR(edesc); -#ifdef DEBUG - print_hex_dump(KERN_ERR, "dec src@"__stringify(__LINE__)": ", - DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src), - req->assoclen + req->cryptlen, 1); -#endif - /* Create and submit job descriptor*/ init_authenc_job(req, edesc, all_contig, false); #ifdef DEBUG -- cgit v1.2.3-58-ga151 From 80da44c29d997e28c4442825f35f4ac339813877 Mon Sep 17 00:00:00 2001 From: Marcelo Cerri Date: Wed, 28 Sep 2016 13:42:10 -0300 Subject: crypto: vmx - Fix memory corruption caused by p8_ghash This patch changes the p8_ghash driver to use ghash-generic as a fixed fallback implementation. This allows the correct value of descsize to be defined directly in its shash_alg structure and avoids problems with incorrect buffer sizes when its state is exported or imported. Reported-by: Jan Stancek Fixes: cc333cd68dfa ("crypto: vmx - Adding GHASH routines for VMX module") Cc: stable@vger.kernel.org Signed-off-by: Marcelo Cerri Signed-off-by: Herbert Xu --- drivers/crypto/vmx/ghash.c | 31 ++++++++++++++++--------------- 1 file changed, 16 insertions(+), 15 deletions(-) (limited to 'drivers') diff --git a/drivers/crypto/vmx/ghash.c b/drivers/crypto/vmx/ghash.c index 6c999cb01b80..27a94a119009 100644 --- a/drivers/crypto/vmx/ghash.c +++ b/drivers/crypto/vmx/ghash.c @@ -26,16 +26,13 @@ #include #include #include +#include #include #include #include #define IN_INTERRUPT in_interrupt() -#define GHASH_BLOCK_SIZE (16) -#define GHASH_DIGEST_SIZE (16) -#define GHASH_KEY_LEN (16) - void gcm_init_p8(u128 htable[16], const u64 Xi[2]); void gcm_gmult_p8(u64 Xi[2], const u128 htable[16]); void gcm_ghash_p8(u64 Xi[2], const u128 htable[16], @@ -55,16 +52,11 @@ struct p8_ghash_desc_ctx { static int p8_ghash_init_tfm(struct crypto_tfm *tfm) { - const char *alg; + const char *alg = "ghash-generic"; struct crypto_shash *fallback; struct crypto_shash *shash_tfm = __crypto_shash_cast(tfm); struct p8_ghash_ctx *ctx = crypto_tfm_ctx(tfm); - if (!(alg = crypto_tfm_alg_name(tfm))) { - printk(KERN_ERR "Failed to get algorithm name.\n"); - return -ENOENT; - } - fallback = crypto_alloc_shash(alg, 0, CRYPTO_ALG_NEED_FALLBACK); if (IS_ERR(fallback)) { printk(KERN_ERR @@ -78,10 +70,18 @@ static int p8_ghash_init_tfm(struct crypto_tfm *tfm) crypto_shash_set_flags(fallback, crypto_shash_get_flags((struct crypto_shash *) tfm)); - ctx->fallback = fallback; - shash_tfm->descsize = sizeof(struct p8_ghash_desc_ctx) - + crypto_shash_descsize(fallback); + /* Check if the descsize defined in the algorithm is still enough. */ + if (shash_tfm->descsize < sizeof(struct p8_ghash_desc_ctx) + + crypto_shash_descsize(fallback)) { + printk(KERN_ERR + "Desc size of the fallback implementation (%s) does not match the expected value: %lu vs %u\n", + alg, + shash_tfm->descsize - sizeof(struct p8_ghash_desc_ctx), + crypto_shash_descsize(fallback)); + return -EINVAL; + } + ctx->fallback = fallback; return 0; } @@ -113,7 +113,7 @@ static int p8_ghash_setkey(struct crypto_shash *tfm, const u8 *key, { struct p8_ghash_ctx *ctx = crypto_tfm_ctx(crypto_shash_tfm(tfm)); - if (keylen != GHASH_KEY_LEN) + if (keylen != GHASH_BLOCK_SIZE) return -EINVAL; preempt_disable(); @@ -211,7 +211,8 @@ struct shash_alg p8_ghash_alg = { .update = p8_ghash_update, .final = p8_ghash_final, .setkey = p8_ghash_setkey, - .descsize = sizeof(struct p8_ghash_desc_ctx), + .descsize = sizeof(struct p8_ghash_desc_ctx) + + sizeof(struct ghash_desc_ctx), .base = { .cra_name = "ghash", .cra_driver_name = "p8_ghash", -- cgit v1.2.3-58-ga151 From 3387879524ec07fd9ba371eddd17e717abdd5e4f Mon Sep 17 00:00:00 2001 From: Arvind Yadav Date: Wed, 28 Sep 2016 16:01:42 +0530 Subject: crypto: caam - Unmap region obtained by of_iomap Free memory mapping, if probe is not successful. Signed-off-by: Arvind Yadav Signed-off-by: Herbert Xu --- drivers/crypto/caam/jr.c | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers') diff --git a/drivers/crypto/caam/jr.c b/drivers/crypto/caam/jr.c index 320228875e9a..757c27f9953d 100644 --- a/drivers/crypto/caam/jr.c +++ b/drivers/crypto/caam/jr.c @@ -506,6 +506,7 @@ static int caam_jr_probe(struct platform_device *pdev) error = caam_jr_init(jrdev); /* now turn on hardware */ if (error) { irq_dispose_mapping(jrpriv->irq); + iounmap(ctrl); return error; } -- cgit v1.2.3-58-ga151 From 8df4f27c04c0e061b4b2c142bfbae1602bb1b776 Mon Sep 17 00:00:00 2001 From: Marcelo Cerri Date: Wed, 28 Sep 2016 13:42:11 -0300 Subject: crypto: vmx - Ensure ghash-generic is enabled Select CRYPTO_GHASH for vmx_crypto since p8_ghash uses it as the fallback implementation. Signed-off-by: Marcelo Cerri Signed-off-by: Herbert Xu --- drivers/crypto/vmx/Kconfig | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers') diff --git a/drivers/crypto/vmx/Kconfig b/drivers/crypto/vmx/Kconfig index a83ead109d5f..c3d524ea6998 100644 --- a/drivers/crypto/vmx/Kconfig +++ b/drivers/crypto/vmx/Kconfig @@ -1,6 +1,7 @@ config CRYPTO_DEV_VMX_ENCRYPT tristate "Encryption acceleration support on P8 CPU" depends on CRYPTO_DEV_VMX + select CRYPTO_GHASH default m help Support for VMX cryptographic acceleration instructions on Power8 CPU. -- cgit v1.2.3-58-ga151 From 9ddb9dc6be095ebe393f7eb582df09cc4847c5e9 Mon Sep 17 00:00:00 2001 From: Gary R Hook Date: Wed, 28 Sep 2016 11:53:47 -0500 Subject: crypto: ccp - clean up data structure Change names of data structure instances. Add const keyword where appropriate. Add error handling path. Signed-off-by: Gary R Hook Signed-off-by: Herbert Xu --- drivers/crypto/ccp/ccp-dev-v3.c | 2 +- drivers/crypto/ccp/ccp-dev-v5.c | 9 ++++++--- drivers/crypto/ccp/ccp-dev.h | 6 +++--- drivers/crypto/ccp/ccp-pci.c | 4 ++-- 4 files changed, 12 insertions(+), 9 deletions(-) (limited to 'drivers') diff --git a/drivers/crypto/ccp/ccp-dev-v3.c b/drivers/crypto/ccp/ccp-dev-v3.c index 578522d8f22e..b6615b15a3c2 100644 --- a/drivers/crypto/ccp/ccp-dev-v3.c +++ b/drivers/crypto/ccp/ccp-dev-v3.c @@ -566,7 +566,7 @@ static const struct ccp_actions ccp3_actions = { .irqhandler = ccp_irq_handler, }; -struct ccp_vdata ccpv3 = { +const struct ccp_vdata ccpv3 = { .version = CCP_VERSION(3, 0), .setup = NULL, .perform = &ccp3_actions, diff --git a/drivers/crypto/ccp/ccp-dev-v5.c b/drivers/crypto/ccp/ccp-dev-v5.c index f499e34df389..a90ca9e67ade 100644 --- a/drivers/crypto/ccp/ccp-dev-v5.c +++ b/drivers/crypto/ccp/ccp-dev-v5.c @@ -835,10 +835,13 @@ static int ccp5_init(struct ccp_device *ccp) /* Register the DMA engine support */ ret = ccp_dmaengine_register(ccp); if (ret) - goto e_kthread; + goto e_hwrng; return 0; +e_hwrng: + ccp_unregister_rng(ccp); + e_kthread: for (i = 0; i < ccp->cmd_q_count; i++) if (ccp->cmd_q[i].kthread) @@ -994,7 +997,7 @@ static const struct ccp_actions ccp5_actions = { .irqhandler = ccp5_irq_handler, }; -struct ccp_vdata ccpv5 = { +const struct ccp_vdata ccpv5a = { .version = CCP_VERSION(5, 0), .setup = ccp5_config, .perform = &ccp5_actions, @@ -1002,7 +1005,7 @@ struct ccp_vdata ccpv5 = { .offset = 0x0, }; -struct ccp_vdata ccpv5other = { +const struct ccp_vdata ccpv5b = { .version = CCP_VERSION(5, 0), .setup = ccp5other_config, .perform = &ccp5_actions, diff --git a/drivers/crypto/ccp/ccp-dev.h b/drivers/crypto/ccp/ccp-dev.h index ebc93652833b..08f58b0d4271 100644 --- a/drivers/crypto/ccp/ccp-dev.h +++ b/drivers/crypto/ccp/ccp-dev.h @@ -639,8 +639,8 @@ struct ccp_vdata { const unsigned int offset; }; -extern struct ccp_vdata ccpv3; -extern struct ccp_vdata ccpv5; -extern struct ccp_vdata ccpv5other; +extern const struct ccp_vdata ccpv3; +extern const struct ccp_vdata ccpv5a; +extern const struct ccp_vdata ccpv5b; #endif diff --git a/drivers/crypto/ccp/ccp-pci.c b/drivers/crypto/ccp/ccp-pci.c index 239cbf2630bf..28a9996c1085 100644 --- a/drivers/crypto/ccp/ccp-pci.c +++ b/drivers/crypto/ccp/ccp-pci.c @@ -325,8 +325,8 @@ static int ccp_pci_resume(struct pci_dev *pdev) static const struct pci_device_id ccp_pci_table[] = { { PCI_VDEVICE(AMD, 0x1537), (kernel_ulong_t)&ccpv3 }, - { PCI_VDEVICE(AMD, 0x1456), (kernel_ulong_t)&ccpv5 }, - { PCI_VDEVICE(AMD, 0x1468), (kernel_ulong_t)&ccpv5other }, + { PCI_VDEVICE(AMD, 0x1456), (kernel_ulong_t)&ccpv5a }, + { PCI_VDEVICE(AMD, 0x1468), (kernel_ulong_t)&ccpv5b }, /* Last entry must be zero */ { 0, } }; -- cgit v1.2.3-58-ga151 From 81422badb39078fde1ffcecda3caac555226fc7b Mon Sep 17 00:00:00 2001 From: Gary R Hook Date: Wed, 28 Sep 2016 11:53:56 -0500 Subject: crypto: ccp - Make syslog errors human-readable Add human-readable strings to log messages about CCP errors Signed-off-by: Gary R Hook Signed-off-by: Herbert Xu --- drivers/crypto/ccp/ccp-dev-v3.c | 3 +++ drivers/crypto/ccp/ccp-dev-v5.c | 3 +++ drivers/crypto/ccp/ccp-dev.c | 53 +++++++++++++++++++++++++++++++++++++++++ drivers/crypto/ccp/ccp-dev.h | 2 ++ 4 files changed, 61 insertions(+) (limited to 'drivers') diff --git a/drivers/crypto/ccp/ccp-dev-v3.c b/drivers/crypto/ccp/ccp-dev-v3.c index b6615b15a3c2..8d2dbacc6161 100644 --- a/drivers/crypto/ccp/ccp-dev-v3.c +++ b/drivers/crypto/ccp/ccp-dev-v3.c @@ -124,6 +124,9 @@ static int ccp_do_cmd(struct ccp_op *op, u32 *cr, unsigned int cr_count) /* On error delete all related jobs from the queue */ cmd = (cmd_q->id << DEL_Q_ID_SHIFT) | op->jobid; + if (cmd_q->cmd_error) + ccp_log_error(cmd_q->ccp, + cmd_q->cmd_error); iowrite32(cmd, ccp->io_regs + DEL_CMD_Q_JOB); diff --git a/drivers/crypto/ccp/ccp-dev-v5.c b/drivers/crypto/ccp/ccp-dev-v5.c index a90ca9e67ade..faf3cb3ddce2 100644 --- a/drivers/crypto/ccp/ccp-dev-v5.c +++ b/drivers/crypto/ccp/ccp-dev-v5.c @@ -243,6 +243,9 @@ static int ccp5_do_cmd(struct ccp5_desc *desc, ret = wait_event_interruptible(cmd_q->int_queue, cmd_q->int_rcvd); if (ret || cmd_q->cmd_error) { + if (cmd_q->cmd_error) + ccp_log_error(cmd_q->ccp, + cmd_q->cmd_error); /* A version 5 device doesn't use Job IDs... */ if (!ret) ret = -EIO; diff --git a/drivers/crypto/ccp/ccp-dev.c b/drivers/crypto/ccp/ccp-dev.c index 5d36eeff6d26..cafa633aae10 100644 --- a/drivers/crypto/ccp/ccp-dev.c +++ b/drivers/crypto/ccp/ccp-dev.c @@ -40,6 +40,59 @@ struct ccp_tasklet_data { struct ccp_cmd *cmd; }; +/* Human-readable error strings */ +char *ccp_error_codes[] = { + "", + "ERR 01: ILLEGAL_ENGINE", + "ERR 02: ILLEGAL_KEY_ID", + "ERR 03: ILLEGAL_FUNCTION_TYPE", + "ERR 04: ILLEGAL_FUNCTION_MODE", + "ERR 05: ILLEGAL_FUNCTION_ENCRYPT", + "ERR 06: ILLEGAL_FUNCTION_SIZE", + "ERR 07: Zlib_MISSING_INIT_EOM", + "ERR 08: ILLEGAL_FUNCTION_RSVD", + "ERR 09: ILLEGAL_BUFFER_LENGTH", + "ERR 10: VLSB_FAULT", + "ERR 11: ILLEGAL_MEM_ADDR", + "ERR 12: ILLEGAL_MEM_SEL", + "ERR 13: ILLEGAL_CONTEXT_ID", + "ERR 14: ILLEGAL_KEY_ADDR", + "ERR 15: 0xF Reserved", + "ERR 16: Zlib_ILLEGAL_MULTI_QUEUE", + "ERR 17: Zlib_ILLEGAL_JOBID_CHANGE", + "ERR 18: CMD_TIMEOUT", + "ERR 19: IDMA0_AXI_SLVERR", + "ERR 20: IDMA0_AXI_DECERR", + "ERR 21: 0x15 Reserved", + "ERR 22: IDMA1_AXI_SLAVE_FAULT", + "ERR 23: IDMA1_AIXI_DECERR", + "ERR 24: 0x18 Reserved", + "ERR 25: ZLIBVHB_AXI_SLVERR", + "ERR 26: ZLIBVHB_AXI_DECERR", + "ERR 27: 0x1B Reserved", + "ERR 27: ZLIB_UNEXPECTED_EOM", + "ERR 27: ZLIB_EXTRA_DATA", + "ERR 30: ZLIB_BTYPE", + "ERR 31: ZLIB_UNDEFINED_SYMBOL", + "ERR 32: ZLIB_UNDEFINED_DISTANCE_S", + "ERR 33: ZLIB_CODE_LENGTH_SYMBOL", + "ERR 34: ZLIB _VHB_ILLEGAL_FETCH", + "ERR 35: ZLIB_UNCOMPRESSED_LEN", + "ERR 36: ZLIB_LIMIT_REACHED", + "ERR 37: ZLIB_CHECKSUM_MISMATCH0", + "ERR 38: ODMA0_AXI_SLVERR", + "ERR 39: ODMA0_AXI_DECERR", + "ERR 40: 0x28 Reserved", + "ERR 41: ODMA1_AXI_SLVERR", + "ERR 42: ODMA1_AXI_DECERR", + "ERR 43: LSB_PARITY_ERR", +}; + +void ccp_log_error(struct ccp_device *d, int e) +{ + dev_err(d->dev, "CCP error: %s (0x%x)\n", ccp_error_codes[e], e); +} + /* List of CCPs, CCP count, read-write access lock, and access functions * * Lock structure: get ccp_unit_lock for reading whenever we need to diff --git a/drivers/crypto/ccp/ccp-dev.h b/drivers/crypto/ccp/ccp-dev.h index 08f58b0d4271..da5f4a678083 100644 --- a/drivers/crypto/ccp/ccp-dev.h +++ b/drivers/crypto/ccp/ccp-dev.h @@ -601,6 +601,8 @@ void ccp_platform_exit(void); void ccp_add_device(struct ccp_device *ccp); void ccp_del_device(struct ccp_device *ccp); +extern void ccp_log_error(struct ccp_device *, int); + struct ccp_device *ccp_alloc_struct(struct device *dev); bool ccp_queues_suspended(struct ccp_device *ccp); int ccp_cmd_queue_thread(void *data); -- cgit v1.2.3-58-ga151 From f97581cfa6e7db9818520597b8a44f8268d75013 Mon Sep 17 00:00:00 2001 From: Tudor Ambarus Date: Fri, 30 Sep 2016 12:09:39 +0300 Subject: crypto: caam - treat SGT address pointer as u64 Even for i.MX, CAAM is able to use address pointers greater than 32 bits, the address pointer field being interpreted as a double word. Enforce u64 address pointer in the sec4_sg_entry struct. This patch fixes the SGT address pointer endianness issue for 32bit platforms where core endianness != caam endianness. Signed-off-by: Tudor Ambarus Signed-off-by: Herbert Xu --- drivers/crypto/caam/desc.h | 6 ------ drivers/crypto/caam/regs.h | 8 ++++++++ drivers/crypto/caam/sg_sw_sec4.h | 2 +- 3 files changed, 9 insertions(+), 7 deletions(-) (limited to 'drivers') diff --git a/drivers/crypto/caam/desc.h b/drivers/crypto/caam/desc.h index 26427c11ad87..513b6646bb36 100644 --- a/drivers/crypto/caam/desc.h +++ b/drivers/crypto/caam/desc.h @@ -23,13 +23,7 @@ #define SEC4_SG_OFFSET_MASK 0x00001fff struct sec4_sg_entry { -#if !defined(CONFIG_ARCH_DMA_ADDR_T_64BIT) && \ - defined(CONFIG_CRYPTO_DEV_FSL_CAAM_IMX) - u32 rsvd1; - dma_addr_t ptr; -#else u64 ptr; -#endif /* CONFIG_CRYPTO_DEV_FSL_CAAM_IMX */ u32 len; u32 bpid_offset; }; diff --git a/drivers/crypto/caam/regs.h b/drivers/crypto/caam/regs.h index b3c5016f6458..84d2f838a063 100644 --- a/drivers/crypto/caam/regs.h +++ b/drivers/crypto/caam/regs.h @@ -196,6 +196,14 @@ static inline u64 rd_reg64(void __iomem *reg) #define caam_dma_to_cpu(value) caam32_to_cpu(value) #endif /* CONFIG_ARCH_DMA_ADDR_T_64BIT */ +#ifdef CONFIG_CRYPTO_DEV_FSL_CAAM_IMX +#define cpu_to_caam_dma64(value) \ + (((u64)cpu_to_caam32(lower_32_bits(value)) << 32) | \ + (u64)cpu_to_caam32(upper_32_bits(value))) +#else +#define cpu_to_caam_dma64(value) cpu_to_caam64(value) +#endif + /* * jr_outentry * Represents each entry in a JobR output ring diff --git a/drivers/crypto/caam/sg_sw_sec4.h b/drivers/crypto/caam/sg_sw_sec4.h index 19dc64fede0d..41cd5a356d05 100644 --- a/drivers/crypto/caam/sg_sw_sec4.h +++ b/drivers/crypto/caam/sg_sw_sec4.h @@ -15,7 +15,7 @@ struct sec4_sg_entry; static inline void dma_to_sec4_sg_one(struct sec4_sg_entry *sec4_sg_ptr, dma_addr_t dma, u32 len, u16 offset) { - sec4_sg_ptr->ptr = cpu_to_caam_dma(dma); + sec4_sg_ptr->ptr = cpu_to_caam_dma64(dma); sec4_sg_ptr->len = cpu_to_caam32(len); sec4_sg_ptr->bpid_offset = cpu_to_caam32(offset & SEC4_SG_OFFSET_MASK); #ifdef DEBUG -- cgit v1.2.3-58-ga151