^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) * RNG driver for AMD RNGs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * Copyright 2005 (c) MontaVista Software, Inc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * with the majority of the code coming from:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * Hardware driver for the Intel/AMD/VIA Random Number Generators (RNG)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) * (c) Copyright 2003 Red Hat Inc <jgarzik@redhat.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) * derived from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) * Hardware driver for the AMD 768 Random Number Generator (RNG)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) * (c) Copyright 2001 Red Hat Inc
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) * derived from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) * Hardware driver for Intel i810 Random Number Generator (RNG)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) * Copyright 2000,2001 Jeff Garzik <jgarzik@pobox.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) * Copyright 2000,2001 Philipp Rumpf <prumpf@mandrakesoft.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) * This file is licensed under the terms of the GNU General Public
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) * License version 2. This program is licensed "as is" without any
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) * warranty of any kind, whether express or implied.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #include <linux/delay.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #include <linux/hw_random.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) #include <linux/pci.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) #define DRV_NAME "AMD768-HWRNG"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) #define RNGDATA 0x00
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) #define RNGDONE 0x04
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) #define PMBASE_OFFSET 0xF0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) #define PMBASE_SIZE 8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) * Data for PCI driver interface
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) * This data only exists for exporting the supported
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) * PCI ids via MODULE_DEVICE_TABLE. We do not actually
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) * register a pci_driver, because someone else might one day
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) * want to register another driver on the same PCI id.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) static const struct pci_device_id pci_tbl[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) { PCI_VDEVICE(AMD, 0x7443), 0, },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) { PCI_VDEVICE(AMD, 0x746b), 0, },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) { 0, }, /* terminate list */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) MODULE_DEVICE_TABLE(pci, pci_tbl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) struct amd768_priv {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) void __iomem *iobase;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) struct pci_dev *pcidev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) u32 pmbase;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) static int amd_rng_read(struct hwrng *rng, void *buf, size_t max, bool wait)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) u32 *data = buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) struct amd768_priv *priv = (struct amd768_priv *)rng->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) size_t read = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) /* We will wait at maximum one time per read */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) int timeout = max / 4 + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) * RNG data is available when RNGDONE is set to 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) * New random numbers are generated approximately 128 microseconds
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) * after RNGDATA is read
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) while (read < max) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) if (ioread32(priv->iobase + RNGDONE) == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) if (wait) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) /* Delay given by datasheet */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) usleep_range(128, 196);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) if (timeout-- == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) return read;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) *data = ioread32(priv->iobase + RNGDATA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) data++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) read += 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) return read;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) static int amd_rng_init(struct hwrng *rng)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) struct amd768_priv *priv = (struct amd768_priv *)rng->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) u8 rnen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) pci_read_config_byte(priv->pcidev, 0x40, &rnen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) rnen |= BIT(7); /* RNG on */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) pci_write_config_byte(priv->pcidev, 0x40, rnen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) pci_read_config_byte(priv->pcidev, 0x41, &rnen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) rnen |= BIT(7); /* PMIO enable */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) pci_write_config_byte(priv->pcidev, 0x41, rnen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) static void amd_rng_cleanup(struct hwrng *rng)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) struct amd768_priv *priv = (struct amd768_priv *)rng->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) u8 rnen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) pci_read_config_byte(priv->pcidev, 0x40, &rnen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) rnen &= ~BIT(7); /* RNG off */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) pci_write_config_byte(priv->pcidev, 0x40, rnen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) static struct hwrng amd_rng = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) .name = "amd",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) .init = amd_rng_init,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) .cleanup = amd_rng_cleanup,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) .read = amd_rng_read,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) static int __init mod_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) int err = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) struct pci_dev *pdev = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) const struct pci_device_id *ent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) u32 pmbase;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) struct amd768_priv *priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) for_each_pci_dev(pdev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) ent = pci_match_id(pci_tbl, pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) if (ent)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) goto found;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) /* Device not found. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) found:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) err = pci_read_config_dword(pdev, 0x58, &pmbase);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) pmbase &= 0x0000FF00;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) if (pmbase == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) priv = kzalloc(sizeof(*priv), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) if (!priv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) if (!request_region(pmbase + PMBASE_OFFSET, PMBASE_SIZE, DRV_NAME)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) dev_err(&pdev->dev, DRV_NAME " region 0x%x already in use!\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) pmbase + 0xF0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) err = -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) priv->iobase = ioport_map(pmbase + PMBASE_OFFSET, PMBASE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) if (!priv->iobase) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) pr_err(DRV_NAME "Cannot map ioport\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) goto err_iomap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) amd_rng.priv = (unsigned long)priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) priv->pmbase = pmbase;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) priv->pcidev = pdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) pr_info(DRV_NAME " detected\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) err = hwrng_register(&amd_rng);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) pr_err(DRV_NAME " registering failed (%d)\n", err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) goto err_hwrng;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) err_hwrng:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) ioport_unmap(priv->iobase);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) err_iomap:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) release_region(pmbase + PMBASE_OFFSET, PMBASE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) kfree(priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) static void __exit mod_exit(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) struct amd768_priv *priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) priv = (struct amd768_priv *)amd_rng.priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) hwrng_unregister(&amd_rng);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) ioport_unmap(priv->iobase);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) release_region(priv->pmbase + PMBASE_OFFSET, PMBASE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) kfree(priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) module_init(mod_init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) module_exit(mod_exit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) MODULE_AUTHOR("The Linux Kernel team");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) MODULE_DESCRIPTION("H/W RNG driver for AMD chipsets");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) MODULE_LICENSE("GPL");