^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-or-later
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Handle async block request by crypto hardware engine.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Copyright (C) 2016 Linaro, Inc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * Author: Baolin Wang <baolin.wang@linaro.org>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/err.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/delay.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <crypto/engine.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <uapi/linux/sched/types.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include "internal.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #define CRYPTO_ENGINE_MAX_QLEN 10
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) * crypto_finalize_request - finalize one request if the request is done
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) * @engine: the hardware engine
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) * @req: the request need to be finalized
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) * @err: error number
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) static void crypto_finalize_request(struct crypto_engine *engine,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) struct crypto_async_request *req, int err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) bool finalize_req = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) struct crypto_engine_ctx *enginectx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) * If hardware cannot enqueue more requests
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) * and retry mechanism is not supported
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) * make sure we are completing the current request
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) if (!engine->retry_support) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) spin_lock_irqsave(&engine->queue_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) if (engine->cur_req == req) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) finalize_req = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) engine->cur_req = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) spin_unlock_irqrestore(&engine->queue_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) if (finalize_req || engine->retry_support) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) enginectx = crypto_tfm_ctx(req->tfm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) if (enginectx->op.prepare_request &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) enginectx->op.unprepare_request) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) ret = enginectx->op.unprepare_request(engine, req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) dev_err(engine->dev, "failed to unprepare request\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) req->complete(req, err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) kthread_queue_work(engine->kworker, &engine->pump_requests);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) * crypto_pump_requests - dequeue one request from engine queue to process
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) * @engine: the hardware engine
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) * @in_kthread: true if we are in the context of the request pump thread
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) * This function checks if there is any request in the engine queue that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) * needs processing and if so call out to the driver to initialize hardware
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) * and handle each request.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) static void crypto_pump_requests(struct crypto_engine *engine,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) bool in_kthread)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) struct crypto_async_request *async_req, *backlog;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) bool was_busy = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) struct crypto_engine_ctx *enginectx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) spin_lock_irqsave(&engine->queue_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) /* Make sure we are not already running a request */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) if (!engine->retry_support && engine->cur_req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) /* If another context is idling then defer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) if (engine->idling) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) kthread_queue_work(engine->kworker, &engine->pump_requests);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) /* Check if the engine queue is idle */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) if (!crypto_queue_len(&engine->queue) || !engine->running) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) if (!engine->busy)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) /* Only do teardown in the thread */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) if (!in_kthread) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) kthread_queue_work(engine->kworker,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) &engine->pump_requests);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) engine->busy = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) engine->idling = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) spin_unlock_irqrestore(&engine->queue_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) if (engine->unprepare_crypt_hardware &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) engine->unprepare_crypt_hardware(engine))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) dev_err(engine->dev, "failed to unprepare crypt hardware\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) spin_lock_irqsave(&engine->queue_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) engine->idling = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) start_request:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) /* Get the fist request from the engine queue to handle */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) backlog = crypto_get_backlog(&engine->queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) async_req = crypto_dequeue_request(&engine->queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) if (!async_req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) * If hardware doesn't support the retry mechanism,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) * keep track of the request we are processing now.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) * We'll need it on completion (crypto_finalize_request).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) if (!engine->retry_support)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) engine->cur_req = async_req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) if (backlog)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) backlog->complete(backlog, -EINPROGRESS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) if (engine->busy)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) was_busy = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) engine->busy = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) spin_unlock_irqrestore(&engine->queue_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) /* Until here we get the request need to be encrypted successfully */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) if (!was_busy && engine->prepare_crypt_hardware) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) ret = engine->prepare_crypt_hardware(engine);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) dev_err(engine->dev, "failed to prepare crypt hardware\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) goto req_err_2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) enginectx = crypto_tfm_ctx(async_req->tfm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) if (enginectx->op.prepare_request) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) ret = enginectx->op.prepare_request(engine, async_req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) dev_err(engine->dev, "failed to prepare request: %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) goto req_err_2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) if (!enginectx->op.do_one_request) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) dev_err(engine->dev, "failed to do request\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) goto req_err_1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) ret = enginectx->op.do_one_request(engine, async_req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) /* Request unsuccessfully executed by hardware */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) * If hardware queue is full (-ENOSPC), requeue request
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) * regardless of backlog flag.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) * Otherwise, unprepare and complete the request.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) if (!engine->retry_support ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) (ret != -ENOSPC)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) dev_err(engine->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) "Failed to do one request from queue: %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) goto req_err_1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) * If retry mechanism is supported,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) * unprepare current request and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) * enqueue it back into crypto-engine queue.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) if (enginectx->op.unprepare_request) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) ret = enginectx->op.unprepare_request(engine,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) async_req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) dev_err(engine->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) "failed to unprepare request\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) spin_lock_irqsave(&engine->queue_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) * If hardware was unable to execute request, enqueue it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) * back in front of crypto-engine queue, to keep the order
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) * of requests.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) crypto_enqueue_request_head(&engine->queue, async_req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) kthread_queue_work(engine->kworker, &engine->pump_requests);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) goto retry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) req_err_1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) if (enginectx->op.unprepare_request) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) ret = enginectx->op.unprepare_request(engine, async_req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) dev_err(engine->dev, "failed to unprepare request\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) req_err_2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) async_req->complete(async_req, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) retry:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) /* If retry mechanism is supported, send new requests to engine */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) if (engine->retry_support) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) spin_lock_irqsave(&engine->queue_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) goto start_request;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) spin_unlock_irqrestore(&engine->queue_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) * Batch requests is possible only if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) * hardware can enqueue multiple requests
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) if (engine->do_batch_requests) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) ret = engine->do_batch_requests(engine);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) dev_err(engine->dev, "failed to do batch requests: %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) static void crypto_pump_work(struct kthread_work *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) struct crypto_engine *engine =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) container_of(work, struct crypto_engine, pump_requests);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) crypto_pump_requests(engine, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) * crypto_transfer_request - transfer the new request into the engine queue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) * @engine: the hardware engine
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) * @req: the request need to be listed into the engine queue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) static int crypto_transfer_request(struct crypto_engine *engine,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) struct crypto_async_request *req,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) bool need_pump)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) spin_lock_irqsave(&engine->queue_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) if (!engine->running) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) spin_unlock_irqrestore(&engine->queue_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) return -ESHUTDOWN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) ret = crypto_enqueue_request(&engine->queue, req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) if (!engine->busy && need_pump)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) kthread_queue_work(engine->kworker, &engine->pump_requests);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) spin_unlock_irqrestore(&engine->queue_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) * crypto_transfer_request_to_engine - transfer one request to list
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) * into the engine queue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) * @engine: the hardware engine
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) * @req: the request need to be listed into the engine queue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) static int crypto_transfer_request_to_engine(struct crypto_engine *engine,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) struct crypto_async_request *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) return crypto_transfer_request(engine, req, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) * crypto_transfer_aead_request_to_engine - transfer one aead_request
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) * to list into the engine queue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) * @engine: the hardware engine
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) * @req: the request need to be listed into the engine queue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) int crypto_transfer_aead_request_to_engine(struct crypto_engine *engine,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) struct aead_request *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) return crypto_transfer_request_to_engine(engine, &req->base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) EXPORT_SYMBOL_GPL(crypto_transfer_aead_request_to_engine);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) * crypto_transfer_akcipher_request_to_engine - transfer one akcipher_request
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) * to list into the engine queue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) * @engine: the hardware engine
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) * @req: the request need to be listed into the engine queue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) int crypto_transfer_akcipher_request_to_engine(struct crypto_engine *engine,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) struct akcipher_request *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) return crypto_transfer_request_to_engine(engine, &req->base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) EXPORT_SYMBOL_GPL(crypto_transfer_akcipher_request_to_engine);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) * crypto_transfer_hash_request_to_engine - transfer one ahash_request
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) * to list into the engine queue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) * @engine: the hardware engine
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) * @req: the request need to be listed into the engine queue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) int crypto_transfer_hash_request_to_engine(struct crypto_engine *engine,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) struct ahash_request *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) return crypto_transfer_request_to_engine(engine, &req->base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) EXPORT_SYMBOL_GPL(crypto_transfer_hash_request_to_engine);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) * crypto_transfer_skcipher_request_to_engine - transfer one skcipher_request
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) * to list into the engine queue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) * @engine: the hardware engine
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) * @req: the request need to be listed into the engine queue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) int crypto_transfer_skcipher_request_to_engine(struct crypto_engine *engine,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) struct skcipher_request *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) return crypto_transfer_request_to_engine(engine, &req->base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) EXPORT_SYMBOL_GPL(crypto_transfer_skcipher_request_to_engine);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) * crypto_finalize_aead_request - finalize one aead_request if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) * the request is done
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) * @engine: the hardware engine
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) * @req: the request need to be finalized
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) * @err: error number
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) void crypto_finalize_aead_request(struct crypto_engine *engine,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) struct aead_request *req, int err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) return crypto_finalize_request(engine, &req->base, err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) EXPORT_SYMBOL_GPL(crypto_finalize_aead_request);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) * crypto_finalize_akcipher_request - finalize one akcipher_request if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) * the request is done
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) * @engine: the hardware engine
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) * @req: the request need to be finalized
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) * @err: error number
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) void crypto_finalize_akcipher_request(struct crypto_engine *engine,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) struct akcipher_request *req, int err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) return crypto_finalize_request(engine, &req->base, err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) EXPORT_SYMBOL_GPL(crypto_finalize_akcipher_request);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) * crypto_finalize_hash_request - finalize one ahash_request if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) * the request is done
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) * @engine: the hardware engine
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) * @req: the request need to be finalized
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) * @err: error number
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) void crypto_finalize_hash_request(struct crypto_engine *engine,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) struct ahash_request *req, int err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) return crypto_finalize_request(engine, &req->base, err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) EXPORT_SYMBOL_GPL(crypto_finalize_hash_request);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) * crypto_finalize_skcipher_request - finalize one skcipher_request if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) * the request is done
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) * @engine: the hardware engine
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) * @req: the request need to be finalized
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) * @err: error number
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) void crypto_finalize_skcipher_request(struct crypto_engine *engine,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) struct skcipher_request *req, int err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) return crypto_finalize_request(engine, &req->base, err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) EXPORT_SYMBOL_GPL(crypto_finalize_skcipher_request);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) * crypto_engine_start - start the hardware engine
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) * @engine: the hardware engine need to be started
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) * Return 0 on success, else on fail.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) int crypto_engine_start(struct crypto_engine *engine)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) spin_lock_irqsave(&engine->queue_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) if (engine->running || engine->busy) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) spin_unlock_irqrestore(&engine->queue_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) engine->running = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) spin_unlock_irqrestore(&engine->queue_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) kthread_queue_work(engine->kworker, &engine->pump_requests);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) EXPORT_SYMBOL_GPL(crypto_engine_start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) * crypto_engine_stop - stop the hardware engine
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) * @engine: the hardware engine need to be stopped
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) * Return 0 on success, else on fail.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) int crypto_engine_stop(struct crypto_engine *engine)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) unsigned int limit = 500;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) spin_lock_irqsave(&engine->queue_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) * If the engine queue is not empty or the engine is on busy state,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) * we need to wait for a while to pump the requests of engine queue.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) while ((crypto_queue_len(&engine->queue) || engine->busy) && limit--) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) spin_unlock_irqrestore(&engine->queue_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) msleep(20);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) spin_lock_irqsave(&engine->queue_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) if (crypto_queue_len(&engine->queue) || engine->busy)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) ret = -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) engine->running = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) spin_unlock_irqrestore(&engine->queue_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) dev_warn(engine->dev, "could not stop engine\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) EXPORT_SYMBOL_GPL(crypto_engine_stop);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) * crypto_engine_alloc_init_and_set - allocate crypto hardware engine structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) * and initialize it by setting the maximum number of entries in the software
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) * crypto-engine queue.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) * @dev: the device attached with one hardware engine
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) * @retry_support: whether hardware has support for retry mechanism
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) * @cbk_do_batch: pointer to a callback function to be invoked when executing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) * a batch of requests.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) * This has the form:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) * callback(struct crypto_engine *engine)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) * where:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) * @engine: the crypto engine structure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) * @rt: whether this queue is set to run as a realtime task
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) * @qlen: maximum size of the crypto-engine queue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) * This must be called from context that can sleep.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) * Return: the crypto engine structure on success, else NULL.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) struct crypto_engine *crypto_engine_alloc_init_and_set(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) bool retry_support,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) int (*cbk_do_batch)(struct crypto_engine *engine),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) bool rt, int qlen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) struct crypto_engine *engine;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) if (!dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) engine = devm_kzalloc(dev, sizeof(*engine), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) if (!engine)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) engine->dev = dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) engine->rt = rt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) engine->running = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) engine->busy = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) engine->idling = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) engine->retry_support = retry_support;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) engine->priv_data = dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) * Batch requests is possible only if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) * hardware has support for retry mechanism.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) engine->do_batch_requests = retry_support ? cbk_do_batch : NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) snprintf(engine->name, sizeof(engine->name),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) "%s-engine", dev_name(dev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) crypto_init_queue(&engine->queue, qlen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) spin_lock_init(&engine->queue_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) engine->kworker = kthread_create_worker(0, "%s", engine->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) if (IS_ERR(engine->kworker)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) dev_err(dev, "failed to create crypto request pump task\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) kthread_init_work(&engine->pump_requests, crypto_pump_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) if (engine->rt) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) dev_info(dev, "will run requests pump with realtime priority\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) sched_set_fifo(engine->kworker->task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) return engine;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) EXPORT_SYMBOL_GPL(crypto_engine_alloc_init_and_set);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) * crypto_engine_alloc_init - allocate crypto hardware engine structure and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) * initialize it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) * @dev: the device attached with one hardware engine
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) * @rt: whether this queue is set to run as a realtime task
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) * This must be called from context that can sleep.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) * Return: the crypto engine structure on success, else NULL.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) struct crypto_engine *crypto_engine_alloc_init(struct device *dev, bool rt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) return crypto_engine_alloc_init_and_set(dev, false, NULL, rt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) CRYPTO_ENGINE_MAX_QLEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) EXPORT_SYMBOL_GPL(crypto_engine_alloc_init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) * crypto_engine_exit - free the resources of hardware engine when exit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) * @engine: the hardware engine need to be freed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) * Return 0 for success.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) int crypto_engine_exit(struct crypto_engine *engine)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) ret = crypto_engine_stop(engine);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) kthread_destroy_worker(engine->kworker);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) EXPORT_SYMBOL_GPL(crypto_engine_exit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) MODULE_LICENSE("GPL");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) MODULE_DESCRIPTION("Crypto hardware engine framework");