^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) #include <linux/err.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) #include <linux/pci.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) #include <linux/io.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) #include <linux/gfp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) #include <linux/export.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #include <linux/of_address.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) enum devm_ioremap_type {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) DEVM_IOREMAP = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) DEVM_IOREMAP_UC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) DEVM_IOREMAP_WC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) void devm_ioremap_release(struct device *dev, void *res)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) iounmap(*(void __iomem **)res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) static int devm_ioremap_match(struct device *dev, void *res, void *match_data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) return *(void **)res == match_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) static void __iomem *__devm_ioremap(struct device *dev, resource_size_t offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) resource_size_t size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) enum devm_ioremap_type type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) void __iomem **ptr, *addr = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) ptr = devres_alloc(devm_ioremap_release, sizeof(*ptr), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) if (!ptr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) switch (type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) case DEVM_IOREMAP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) addr = ioremap(offset, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) case DEVM_IOREMAP_UC:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) addr = ioremap_uc(offset, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) case DEVM_IOREMAP_WC:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) addr = ioremap_wc(offset, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) if (addr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) *ptr = addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) devres_add(dev, ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) devres_free(ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) return addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) * devm_ioremap - Managed ioremap()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) * @dev: Generic device to remap IO address for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) * @offset: Resource address to map
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) * @size: Size of map
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) * Managed ioremap(). Map is automatically unmapped on driver detach.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) void __iomem *devm_ioremap(struct device *dev, resource_size_t offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) resource_size_t size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) return __devm_ioremap(dev, offset, size, DEVM_IOREMAP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) EXPORT_SYMBOL(devm_ioremap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) * devm_ioremap_uc - Managed ioremap_uc()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) * @dev: Generic device to remap IO address for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) * @offset: Resource address to map
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) * @size: Size of map
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) * Managed ioremap_uc(). Map is automatically unmapped on driver detach.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) void __iomem *devm_ioremap_uc(struct device *dev, resource_size_t offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) resource_size_t size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) return __devm_ioremap(dev, offset, size, DEVM_IOREMAP_UC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) EXPORT_SYMBOL_GPL(devm_ioremap_uc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) * devm_ioremap_wc - Managed ioremap_wc()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) * @dev: Generic device to remap IO address for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) * @offset: Resource address to map
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) * @size: Size of map
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) * Managed ioremap_wc(). Map is automatically unmapped on driver detach.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) void __iomem *devm_ioremap_wc(struct device *dev, resource_size_t offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) resource_size_t size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) return __devm_ioremap(dev, offset, size, DEVM_IOREMAP_WC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) EXPORT_SYMBOL(devm_ioremap_wc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) * devm_iounmap - Managed iounmap()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) * @dev: Generic device to unmap for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) * @addr: Address to unmap
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) * Managed iounmap(). @addr must have been mapped using devm_ioremap*().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) void devm_iounmap(struct device *dev, void __iomem *addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) WARN_ON(devres_destroy(dev, devm_ioremap_release, devm_ioremap_match,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) (__force void *)addr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) iounmap(addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) EXPORT_SYMBOL(devm_iounmap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) static void __iomem *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) __devm_ioremap_resource(struct device *dev, const struct resource *res,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) enum devm_ioremap_type type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) resource_size_t size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) void __iomem *dest_ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) char *pretty_name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) BUG_ON(!dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) if (!res || resource_type(res) != IORESOURCE_MEM) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) dev_err(dev, "invalid resource\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) return IOMEM_ERR_PTR(-EINVAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) size = resource_size(res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) if (res->name)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) pretty_name = devm_kasprintf(dev, GFP_KERNEL, "%s %s",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) dev_name(dev), res->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) pretty_name = devm_kstrdup(dev, dev_name(dev), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) if (!pretty_name)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) return IOMEM_ERR_PTR(-ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) if (!devm_request_mem_region(dev, res->start, size, pretty_name)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) dev_err(dev, "can't request region for resource %pR\n", res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) return IOMEM_ERR_PTR(-EBUSY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) dest_ptr = __devm_ioremap(dev, res->start, size, type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) if (!dest_ptr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) dev_err(dev, "ioremap failed for resource %pR\n", res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) devm_release_mem_region(dev, res->start, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) dest_ptr = IOMEM_ERR_PTR(-ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) return dest_ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) * devm_ioremap_resource() - check, request region, and ioremap resource
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) * @dev: generic device to handle the resource for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) * @res: resource to be handled
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) * Checks that a resource is a valid memory region, requests the memory
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) * region and ioremaps it. All operations are managed and will be undone
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) * on driver detach.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) * Usage example:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) * res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) * base = devm_ioremap_resource(&pdev->dev, res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) * if (IS_ERR(base))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) * return PTR_ERR(base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) * Return: a pointer to the remapped memory or an ERR_PTR() encoded error code
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) * on failure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) void __iomem *devm_ioremap_resource(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) const struct resource *res)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) return __devm_ioremap_resource(dev, res, DEVM_IOREMAP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) EXPORT_SYMBOL(devm_ioremap_resource);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) * devm_ioremap_resource_wc() - write-combined variant of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) * devm_ioremap_resource()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) * @dev: generic device to handle the resource for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) * @res: resource to be handled
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) * Return: a pointer to the remapped memory or an ERR_PTR() encoded error code
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) * on failure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) void __iomem *devm_ioremap_resource_wc(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) const struct resource *res)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) return __devm_ioremap_resource(dev, res, DEVM_IOREMAP_WC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) * devm_of_iomap - Requests a resource and maps the memory mapped IO
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) * for a given device_node managed by a given device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) * Checks that a resource is a valid memory region, requests the memory
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) * region and ioremaps it. All operations are managed and will be undone
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) * on driver detach of the device.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) * This is to be used when a device requests/maps resources described
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) * by other device tree nodes (children or otherwise).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) * @dev: The device "managing" the resource
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) * @node: The device-tree node where the resource resides
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) * @index: index of the MMIO range in the "reg" property
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) * @size: Returns the size of the resource (pass NULL if not needed)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) * Usage example:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) * base = devm_of_iomap(&pdev->dev, node, 0, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) * if (IS_ERR(base))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) * return PTR_ERR(base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) * Please Note: This is not a one-to-one replacement for of_iomap() because the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) * of_iomap() function does not track whether the region is already mapped. If
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) * two drivers try to map the same memory, the of_iomap() function will succeed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) * but the devm_of_iomap() function will return -EBUSY.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) * Return: a pointer to the requested and mapped memory or an ERR_PTR() encoded
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) * error code on failure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) void __iomem *devm_of_iomap(struct device *dev, struct device_node *node, int index,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) resource_size_t *size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) struct resource res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) if (of_address_to_resource(node, index, &res))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) return IOMEM_ERR_PTR(-EINVAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) if (size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) *size = resource_size(&res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) return devm_ioremap_resource(dev, &res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) EXPORT_SYMBOL(devm_of_iomap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) #ifdef CONFIG_HAS_IOPORT_MAP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) * Generic iomap devres
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) static void devm_ioport_map_release(struct device *dev, void *res)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) ioport_unmap(*(void __iomem **)res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) static int devm_ioport_map_match(struct device *dev, void *res,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) void *match_data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) return *(void **)res == match_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) * devm_ioport_map - Managed ioport_map()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) * @dev: Generic device to map ioport for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) * @port: Port to map
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) * @nr: Number of ports to map
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) * Managed ioport_map(). Map is automatically unmapped on driver
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) * detach.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) * Return: a pointer to the remapped memory or NULL on failure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) void __iomem *devm_ioport_map(struct device *dev, unsigned long port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) unsigned int nr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) void __iomem **ptr, *addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) ptr = devres_alloc(devm_ioport_map_release, sizeof(*ptr), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) if (!ptr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) addr = ioport_map(port, nr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) if (addr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) *ptr = addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) devres_add(dev, ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) devres_free(ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) return addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) EXPORT_SYMBOL(devm_ioport_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) * devm_ioport_unmap - Managed ioport_unmap()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) * @dev: Generic device to unmap for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) * @addr: Address to unmap
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) * Managed ioport_unmap(). @addr must have been mapped using
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) * devm_ioport_map().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) void devm_ioport_unmap(struct device *dev, void __iomem *addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) ioport_unmap(addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) WARN_ON(devres_destroy(dev, devm_ioport_map_release,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) devm_ioport_map_match, (__force void *)addr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) EXPORT_SYMBOL(devm_ioport_unmap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) #endif /* CONFIG_HAS_IOPORT_MAP */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) #ifdef CONFIG_PCI
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) * PCI iomap devres
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) #define PCIM_IOMAP_MAX PCI_STD_NUM_BARS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) struct pcim_iomap_devres {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) void __iomem *table[PCIM_IOMAP_MAX];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) static void pcim_iomap_release(struct device *gendev, void *res)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) struct pci_dev *dev = to_pci_dev(gendev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) struct pcim_iomap_devres *this = res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) for (i = 0; i < PCIM_IOMAP_MAX; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) if (this->table[i])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) pci_iounmap(dev, this->table[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) * pcim_iomap_table - access iomap allocation table
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) * @pdev: PCI device to access iomap table for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) * Access iomap allocation table for @dev. If iomap table doesn't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) * exist and @pdev is managed, it will be allocated. All iomaps
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) * recorded in the iomap table are automatically unmapped on driver
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) * detach.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) * This function might sleep when the table is first allocated but can
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) * be safely called without context and guaranteed to succed once
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) * allocated.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) void __iomem * const *pcim_iomap_table(struct pci_dev *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) struct pcim_iomap_devres *dr, *new_dr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) dr = devres_find(&pdev->dev, pcim_iomap_release, NULL, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) if (dr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) return dr->table;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) new_dr = devres_alloc(pcim_iomap_release, sizeof(*new_dr), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) if (!new_dr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) dr = devres_get(&pdev->dev, new_dr, NULL, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) return dr->table;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) EXPORT_SYMBOL(pcim_iomap_table);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) * pcim_iomap - Managed pcim_iomap()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) * @pdev: PCI device to iomap for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) * @bar: BAR to iomap
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) * @maxlen: Maximum length of iomap
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) * Managed pci_iomap(). Map is automatically unmapped on driver
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) * detach.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) void __iomem *pcim_iomap(struct pci_dev *pdev, int bar, unsigned long maxlen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) void __iomem **tbl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) BUG_ON(bar >= PCIM_IOMAP_MAX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) tbl = (void __iomem **)pcim_iomap_table(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) if (!tbl || tbl[bar]) /* duplicate mappings not allowed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) tbl[bar] = pci_iomap(pdev, bar, maxlen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) return tbl[bar];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) EXPORT_SYMBOL(pcim_iomap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) * pcim_iounmap - Managed pci_iounmap()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) * @pdev: PCI device to iounmap for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) * @addr: Address to unmap
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) * Managed pci_iounmap(). @addr must have been mapped using pcim_iomap().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) void pcim_iounmap(struct pci_dev *pdev, void __iomem *addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) void __iomem **tbl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) pci_iounmap(pdev, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) tbl = (void __iomem **)pcim_iomap_table(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) BUG_ON(!tbl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) for (i = 0; i < PCIM_IOMAP_MAX; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) if (tbl[i] == addr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) tbl[i] = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) WARN_ON(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) EXPORT_SYMBOL(pcim_iounmap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) * pcim_iomap_regions - Request and iomap PCI BARs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) * @pdev: PCI device to map IO resources for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) * @mask: Mask of BARs to request and iomap
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) * @name: Name used when requesting regions
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) * Request and iomap regions specified by @mask.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) int pcim_iomap_regions(struct pci_dev *pdev, int mask, const char *name)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) void __iomem * const *iomap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) int i, rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) iomap = pcim_iomap_table(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) if (!iomap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) unsigned long len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) if (!(mask & (1 << i)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) rc = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) len = pci_resource_len(pdev, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) if (!len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) goto err_inval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) rc = pci_request_region(pdev, i, name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) goto err_inval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) rc = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) if (!pcim_iomap(pdev, i, 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) goto err_region;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) err_region:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) pci_release_region(pdev, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) err_inval:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) while (--i >= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) if (!(mask & (1 << i)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) pcim_iounmap(pdev, iomap[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) pci_release_region(pdev, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) EXPORT_SYMBOL(pcim_iomap_regions);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) * pcim_iomap_regions_request_all - Request all BARs and iomap specified ones
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) * @pdev: PCI device to map IO resources for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) * @mask: Mask of BARs to iomap
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) * @name: Name used when requesting regions
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) * Request all PCI BARs and iomap regions specified by @mask.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) int pcim_iomap_regions_request_all(struct pci_dev *pdev, int mask,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) const char *name)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) int request_mask = ((1 << 6) - 1) & ~mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) rc = pci_request_selected_regions(pdev, request_mask, name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) rc = pcim_iomap_regions(pdev, mask, name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) pci_release_selected_regions(pdev, request_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) EXPORT_SYMBOL(pcim_iomap_regions_request_all);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) * pcim_iounmap_regions - Unmap and release PCI BARs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) * @pdev: PCI device to map IO resources for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) * @mask: Mask of BARs to unmap and release
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) * Unmap and release regions specified by @mask.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) void pcim_iounmap_regions(struct pci_dev *pdev, int mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) void __iomem * const *iomap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) iomap = pcim_iomap_table(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) if (!iomap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) for (i = 0; i < PCIM_IOMAP_MAX; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) if (!(mask & (1 << i)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) pcim_iounmap(pdev, iomap[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) pci_release_region(pdev, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) EXPORT_SYMBOL(pcim_iounmap_regions);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) #endif /* CONFIG_PCI */