Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
10 changes: 10 additions & 0 deletions drivers/crypto/qce/aead.c
Original file line number Diff line number Diff line change
Expand Up @@ -63,6 +63,10 @@ static void qce_aead_done(void *data)
sg_free_table(&rctx->dst_tbl);
}

error = qce_bam_unlock(qce);
if (error)
dev_err(qce->dev, "aead: failed to unlock the BAM\n");

error = qce_check_status(qce, &status);
if (error < 0 && (error != -EBADMSG))
dev_err(qce->dev, "aead operation error (%x)\n", status);
Expand Down Expand Up @@ -188,6 +192,8 @@ qce_aead_ccm_prepare_buf_assoclen(struct aead_request *req)
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
struct qce_aead_reqctx *rctx = aead_request_ctx_dma(req);
struct qce_aead_ctx *ctx = crypto_aead_ctx(tfm);
struct qce_alg_template *tmpl = to_aead_tmpl(crypto_aead_reqtfm(req));
struct qce_device *qce = tmpl->qce;
unsigned int assoclen = rctx->assoclen;
unsigned int adata_header_len, cryptlen, totallen;
gfp_t gfp;
Expand All @@ -200,6 +206,10 @@ qce_aead_ccm_prepare_buf_assoclen(struct aead_request *req)
cryptlen = rctx->cryptlen;
totallen = cryptlen + req->assoclen;

ret = qce_bam_lock(qce);
if (ret)
return ret;

/* Get the msg */
msg_sg = scatterwalk_ffwd(__sg, req->src, req->assoclen);

Expand Down
39 changes: 28 additions & 11 deletions drivers/crypto/qce/common.c
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,7 @@
#include "cipher.h"
#include "common.h"
#include "core.h"
#include "dma.h"
#include "regs-v5.h"
#include "sha.h"
#include "aead.h"
Expand All @@ -25,7 +26,7 @@ static inline u32 qce_read(struct qce_device *qce, u32 offset)

static inline void qce_write(struct qce_device *qce, u32 offset, u32 val)
{
writel(val, qce->base + offset);
qce_write_dma(qce, offset, val);
}

static inline void qce_write_array(struct qce_device *qce, u32 offset,
Expand Down Expand Up @@ -82,6 +83,8 @@ static void qce_setup_config(struct qce_device *qce)
{
u32 config;

qce_clear_bam_transaction(qce);

/* get big endianness */
config = qce_config_reg(qce, 0);

Expand All @@ -90,12 +93,14 @@ static void qce_setup_config(struct qce_device *qce)
qce_write(qce, REG_CONFIG, config);
}

static inline void qce_crypto_go(struct qce_device *qce, bool result_dump)
static int qce_crypto_go(struct qce_device *qce, bool result_dump)
{
if (result_dump)
qce_write(qce, REG_GOPROC, BIT(GO_SHIFT) | BIT(RESULTS_DUMP_SHIFT));
else
qce_write(qce, REG_GOPROC, BIT(GO_SHIFT));

return qce_submit_cmd_desc(qce);
}

#if defined(CONFIG_CRYPTO_DEV_QCE_SHA) || defined(CONFIG_CRYPTO_DEV_QCE_AEAD)
Expand Down Expand Up @@ -223,9 +228,7 @@ static int qce_setup_regs_ahash(struct crypto_async_request *async_req)
config = qce_config_reg(qce, 1);
qce_write(qce, REG_CONFIG, config);

qce_crypto_go(qce, true);

return 0;
return qce_crypto_go(qce, true);
}
#endif

Expand Down Expand Up @@ -386,9 +389,7 @@ static int qce_setup_regs_skcipher(struct crypto_async_request *async_req)
config = qce_config_reg(qce, 1);
qce_write(qce, REG_CONFIG, config);

qce_crypto_go(qce, true);

return 0;
return qce_crypto_go(qce, true);
}
#endif

Expand Down Expand Up @@ -535,9 +536,7 @@ static int qce_setup_regs_aead(struct crypto_async_request *async_req)
qce_write(qce, REG_CONFIG, config);

/* Start the process */
qce_crypto_go(qce, !IS_CCM(flags));

return 0;
return qce_crypto_go(qce, !IS_CCM(flags));
}
#endif

Expand Down Expand Up @@ -593,3 +592,21 @@ void qce_get_version(struct qce_device *qce, u32 *major, u32 *minor, u32 *step)
*minor = (val & CORE_MINOR_REV_MASK) >> CORE_MINOR_REV_SHIFT;
*step = (val & CORE_STEP_REV_MASK) >> CORE_STEP_REV_SHIFT;
}

int qce_bam_lock(struct qce_device *qce)
{
qce_clear_bam_transaction(qce);
/* Dummy write to acquire the lock on the BAM pipe. */
qce_write(qce, REG_VERSION, 0);

return qce_submit_cmd_desc_lock(qce);
}

int qce_bam_unlock(struct qce_device *qce)
{
qce_clear_bam_transaction(qce);
/* Dummy write to release the lock on the BAM pipe. */
qce_write(qce, REG_VERSION, 0);

return qce_submit_cmd_desc_unlock(qce);
}
113 changes: 98 additions & 15 deletions drivers/crypto/qce/core.c
Original file line number Diff line number Diff line change
Expand Up @@ -12,8 +12,10 @@
#include <linux/module.h>
#include <linux/mod_devicetable.h>
#include <linux/platform_device.h>
#include <linux/pm.h>
#include <linux/pm_runtime.h>
#include <linux/pm_clock.h>
#include <linux/types.h>
#include <crypto/algapi.h>
#include <crypto/internal/hash.h>

#include "core.h"
Expand Down Expand Up @@ -90,6 +92,11 @@ static int qce_handle_queue(struct qce_device *qce,
struct crypto_async_request *async_req, *backlog;
int ret = 0, err;

ACQUIRE(pm_runtime_active_try, pm)(qce->dev);
ret = ACQUIRE_ERR(pm_runtime_active_auto_try, &pm);
if (ret)
return ret;

scoped_guard(mutex, &qce->lock) {
if (req)
ret = crypto_enqueue_request(&qce->queue, req);
Expand Down Expand Up @@ -186,10 +193,19 @@ static int qce_check_version(struct qce_device *qce)
return 0;
}

static void qce_crypto_unmap_dma(void *data)
{
struct qce_device *qce = data;

dma_unmap_resource(qce->dev, qce->base_dma, qce->dma_size,
DMA_BIDIRECTIONAL, 0);
}

static int qce_crypto_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct qce_device *qce;
struct resource *res;
int ret;

qce = devm_kzalloc(dev, sizeof(*qce), GFP_KERNEL);
Expand All @@ -199,35 +215,46 @@ static int qce_crypto_probe(struct platform_device *pdev)
qce->dev = dev;
platform_set_drvdata(pdev, qce);

qce->base = devm_platform_ioremap_resource(pdev, 0);
qce->base = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
if (IS_ERR(qce->base))
return PTR_ERR(qce->base);

ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
if (ret < 0)
return ret;

qce->core = devm_clk_get_optional_enabled(qce->dev, "core");
if (IS_ERR(qce->core))
return PTR_ERR(qce->core);
/* PM clock helpers: register device clocks */
ret = devm_pm_clk_create(dev);
if (ret)
return ret;

ret = pm_clk_add(dev, "core");
if (ret)
return ret;

qce->iface = devm_clk_get_optional_enabled(qce->dev, "iface");
if (IS_ERR(qce->iface))
return PTR_ERR(qce->iface);
ret = pm_clk_add(dev, "iface");
if (ret)
return ret;

qce->bus = devm_clk_get_optional_enabled(qce->dev, "bus");
if (IS_ERR(qce->bus))
return PTR_ERR(qce->bus);
ret = pm_clk_add(dev, "bus");
if (ret)
return ret;

qce->mem_path = devm_of_icc_get(qce->dev, "memory");
qce->mem_path = devm_of_icc_get(dev, "memory");
if (IS_ERR(qce->mem_path))
return PTR_ERR(qce->mem_path);

ret = icc_set_bw(qce->mem_path, QCE_DEFAULT_MEM_BANDWIDTH, QCE_DEFAULT_MEM_BANDWIDTH);
/* Enable runtime PM after clocks and ICC are acquired */
ret = devm_pm_runtime_enable(dev);
if (ret)
return ret;

ret = devm_qce_dma_request(qce->dev, &qce->dma);
ACQUIRE(pm_runtime_active_try, pm)(dev);
ret = ACQUIRE_ERR(pm_runtime_active_auto_try, &pm);
if (ret)
return ret;

ret = devm_qce_dma_request(qce);
if (ret)
return ret;

Expand All @@ -245,9 +272,64 @@ static int qce_crypto_probe(struct platform_device *pdev)
qce->async_req_enqueue = qce_async_request_enqueue;
qce->async_req_done = qce_async_request_done;

return devm_qce_register_algs(qce);
ret = devm_qce_register_algs(qce);
if (ret)
return ret;

qce->dma_size = resource_size(res);
qce->base_dma = dma_map_resource(dev, res->start, qce->dma_size,
DMA_BIDIRECTIONAL, 0);
qce->base_phys = res->start;
ret = dma_mapping_error(dev, qce->base_dma);
if (ret)
return ret;

ret = devm_add_action_or_reset(qce->dev, qce_crypto_unmap_dma, qce);
if (ret)
return ret;

/* Configure autosuspend after successful init */
pm_runtime_set_autosuspend_delay(dev, 100);
pm_runtime_use_autosuspend(dev);
pm_runtime_mark_last_busy(dev);

return 0;
}

static int __maybe_unused qce_runtime_suspend(struct device *dev)
{
struct qce_device *qce = dev_get_drvdata(dev);

icc_disable(qce->mem_path);

return pm_clk_suspend(dev);
}

static int __maybe_unused qce_runtime_resume(struct device *dev)
{
struct qce_device *qce = dev_get_drvdata(dev);
int ret = 0;

ret = pm_clk_resume(dev);
if (ret)
return ret;

ret = icc_set_bw(qce->mem_path, QCE_DEFAULT_MEM_BANDWIDTH, QCE_DEFAULT_MEM_BANDWIDTH);
if (ret)
goto err_icc;

return 0;

err_icc:
pm_clk_suspend(dev);
return ret;
}

static const struct dev_pm_ops qce_crypto_pm_ops = {
SET_RUNTIME_PM_OPS(qce_runtime_suspend, qce_runtime_resume, NULL)
SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, pm_runtime_force_resume)
};

static const struct of_device_id qce_crypto_of_match[] = {
{ .compatible = "qcom,crypto-v5.1", },
{ .compatible = "qcom,crypto-v5.4", },
Expand All @@ -261,6 +343,7 @@ static struct platform_driver qce_crypto_driver = {
.driver = {
.name = KBUILD_MODNAME,
.of_match_table = qce_crypto_of_match,
.pm = &qce_crypto_pm_ops,
},
};
module_platform_driver(qce_crypto_driver);
Expand Down
11 changes: 11 additions & 0 deletions drivers/crypto/qce/core.h
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@

#include <linux/mutex.h>
#include <linux/workqueue.h>
#include <crypto/algapi.h>

#include "dma.h"

Expand All @@ -26,6 +27,11 @@
* @dma: pointer to dma data
* @burst_size: the crypto burst size
* @pipe_pair_id: which pipe pair id the device using
* @base_dma: base DMA address
* @base_phys: base physical address
* @dma_size: size of memory mapped for DMA
* @read_buf: Buffer for DMA to write back to
* @read_buf_dma: Mapped address of the read buffer
* @async_req_enqueue: invoked by every algorithm to enqueue a request
* @async_req_done: invoked by every algorithm to finish its request
*/
Expand All @@ -42,6 +48,11 @@ struct qce_device {
struct qce_dma_data dma;
int burst_size;
unsigned int pipe_pair_id;
dma_addr_t base_dma;
phys_addr_t base_phys;
size_t dma_size;
__le32 *read_buf;
dma_addr_t read_buf_dma;
int (*async_req_enqueue)(struct qce_device *qce,
struct crypto_async_request *req);
void (*async_req_done)(struct qce_device *qce, int ret);
Expand Down
Loading