^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0+
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * caam - Freescale FSL CAAM support for hw_random
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Copyright 2011 Freescale Semiconductor, Inc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * Copyright 2018-2019 NXP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * Based on caamalg.c crypto API driver.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/hw_random.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/completion.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/atomic.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/kfifo.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include "compat.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include "regs.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include "intern.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include "desc_constr.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include "jr.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include "error.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #define CAAM_RNG_MAX_FIFO_STORE_SIZE 16
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) * Length of used descriptors, see caam_init_desc()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) #define CAAM_RNG_DESC_LEN (CAAM_CMD_SZ + \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) CAAM_CMD_SZ + \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) CAAM_CMD_SZ + CAAM_PTR_SZ_MAX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) /* rng per-device context */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) struct caam_rng_ctx {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) struct hwrng rng;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) struct device *jrdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) struct device *ctrldev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) void *desc_async;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) void *desc_sync;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) struct work_struct worker;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) struct kfifo fifo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) struct caam_rng_job_ctx {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) struct completion *done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) int *err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) static struct caam_rng_ctx *to_caam_rng_ctx(struct hwrng *r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) return (struct caam_rng_ctx *)r->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) static void caam_rng_done(struct device *jrdev, u32 *desc, u32 err,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) void *context)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) struct caam_rng_job_ctx *jctx = context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) *jctx->err = caam_jr_strstatus(jrdev, err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) complete(jctx->done);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) static u32 *caam_init_desc(u32 *desc, dma_addr_t dst_dma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) init_job_desc(desc, 0); /* + 1 cmd_sz */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) /* Generate random bytes: + 1 cmd_sz */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) append_operation(desc, OP_ALG_ALGSEL_RNG | OP_TYPE_CLASS1_ALG |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) OP_ALG_PR_ON);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) /* Store bytes: + 1 cmd_sz + caam_ptr_sz */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) append_fifo_store(desc, dst_dma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) CAAM_RNG_MAX_FIFO_STORE_SIZE, FIFOST_TYPE_RNGSTORE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) print_hex_dump_debug("rng job desc@: ", DUMP_PREFIX_ADDRESS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) 16, 4, desc, desc_bytes(desc), 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) return desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) static int caam_rng_read_one(struct device *jrdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) void *dst, int len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) void *desc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) struct completion *done)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) dma_addr_t dst_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) int err, ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) struct caam_rng_job_ctx jctx = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) .done = done,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) .err = &ret,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) len = CAAM_RNG_MAX_FIFO_STORE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) dst_dma = dma_map_single(jrdev, dst, len, DMA_FROM_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) if (dma_mapping_error(jrdev, dst_dma)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) dev_err(jrdev, "unable to map destination memory\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) init_completion(done);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) err = caam_jr_enqueue(jrdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) caam_init_desc(desc, dst_dma),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) caam_rng_done, &jctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) if (err == -EINPROGRESS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) wait_for_completion(done);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) dma_unmap_single(jrdev, dst_dma, len, DMA_FROM_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) return err ?: (ret ?: len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) static void caam_rng_fill_async(struct caam_rng_ctx *ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) struct scatterlist sg[1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) struct completion done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) int len, nents;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) sg_init_table(sg, ARRAY_SIZE(sg));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) nents = kfifo_dma_in_prepare(&ctx->fifo, sg, ARRAY_SIZE(sg),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) CAAM_RNG_MAX_FIFO_STORE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) if (!nents)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) len = caam_rng_read_one(ctx->jrdev, sg_virt(&sg[0]),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) sg[0].length,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) ctx->desc_async,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) &done);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) if (len < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) kfifo_dma_in_finish(&ctx->fifo, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) static void caam_rng_worker(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) struct caam_rng_ctx *ctx = container_of(work, struct caam_rng_ctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) worker);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) caam_rng_fill_async(ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) static int caam_read(struct hwrng *rng, void *dst, size_t max, bool wait)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) struct caam_rng_ctx *ctx = to_caam_rng_ctx(rng);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) int out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) if (wait) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) struct completion done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) return caam_rng_read_one(ctx->jrdev, dst, max,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) ctx->desc_sync, &done);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) out = kfifo_out(&ctx->fifo, dst, max);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) if (kfifo_is_empty(&ctx->fifo))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) schedule_work(&ctx->worker);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) return out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) static void caam_cleanup(struct hwrng *rng)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) struct caam_rng_ctx *ctx = to_caam_rng_ctx(rng);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) flush_work(&ctx->worker);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) caam_jr_free(ctx->jrdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) kfifo_free(&ctx->fifo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) static int caam_init(struct hwrng *rng)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) struct caam_rng_ctx *ctx = to_caam_rng_ctx(rng);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) ctx->desc_sync = devm_kzalloc(ctx->ctrldev, CAAM_RNG_DESC_LEN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) GFP_DMA | GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) if (!ctx->desc_sync)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) ctx->desc_async = devm_kzalloc(ctx->ctrldev, CAAM_RNG_DESC_LEN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) GFP_DMA | GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) if (!ctx->desc_async)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) if (kfifo_alloc(&ctx->fifo, CAAM_RNG_MAX_FIFO_STORE_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) GFP_DMA | GFP_KERNEL))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) INIT_WORK(&ctx->worker, caam_rng_worker);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) ctx->jrdev = caam_jr_alloc();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) err = PTR_ERR_OR_ZERO(ctx->jrdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) kfifo_free(&ctx->fifo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) pr_err("Job Ring Device allocation for transform failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) * Fill async buffer to have early randomness data for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) * hw_random
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) caam_rng_fill_async(ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) int caam_rng_init(struct device *ctrldev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) void caam_rng_exit(struct device *ctrldev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) devres_release_group(ctrldev, caam_rng_init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) int caam_rng_init(struct device *ctrldev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) struct caam_rng_ctx *ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) u32 rng_inst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) struct caam_drv_private *priv = dev_get_drvdata(ctrldev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) /* Check for an instantiated RNG before registration */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) if (priv->era < 10)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) rng_inst = (rd_reg32(&priv->ctrl->perfmon.cha_num_ls) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) CHA_ID_LS_RNG_MASK) >> CHA_ID_LS_RNG_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) rng_inst = rd_reg32(&priv->ctrl->vreg.rng) & CHA_VER_NUM_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) if (!rng_inst)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) if (!devres_open_group(ctrldev, caam_rng_init, GFP_KERNEL))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) ctx = devm_kzalloc(ctrldev, sizeof(*ctx), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) if (!ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) ctx->ctrldev = ctrldev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) ctx->rng.name = "rng-caam";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) ctx->rng.init = caam_init;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) ctx->rng.cleanup = caam_cleanup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) ctx->rng.read = caam_read;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) ctx->rng.priv = (unsigned long)ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) ctx->rng.quality = 1024;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) dev_info(ctrldev, "registering rng-caam\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) ret = devm_hwrng_register(ctrldev, &ctx->rng);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) caam_rng_exit(ctrldev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) devres_close_group(ctrldev, caam_rng_init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) }