Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4)  * Copyright 2016-2020 HabanaLabs, Ltd.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5)  * All Rights Reserved.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10) #include "habanalabs.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12) static bool is_dram_va(struct hl_device *hdev, u64 virt_addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14) 	struct asic_fixed_properties *prop = &hdev->asic_prop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16) 	return hl_mem_area_inside_range(virt_addr, prop->dmmu.page_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17) 					prop->dmmu.start_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18) 					prop->dmmu.end_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22)  * hl_mmu_init() - initialize the MMU module.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23)  * @hdev: habanalabs device structure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25)  * This function does the following:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26)  * - Create a pool of pages for pgt_infos.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27)  * - Create a shadow table for pgt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29)  * Return: 0 for success, non-zero for failure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31) int hl_mmu_init(struct hl_device *hdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33) 	if (hdev->mmu_enable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34) 		return hdev->mmu_func.init(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40)  * hl_mmu_fini() - release the MMU module.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41)  * @hdev: habanalabs device structure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43)  * This function does the following:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44)  * - Disable MMU in H/W.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45)  * - Free the pgt_infos pool.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47)  * All contexts should be freed before calling this function.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49) void hl_mmu_fini(struct hl_device *hdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51) 	if (hdev->mmu_enable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52) 		hdev->mmu_func.fini(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56)  * hl_mmu_ctx_init() - initialize a context for using the MMU module.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57)  * @ctx: pointer to the context structure to initialize.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59)  * Initialize a mutex to protect the concurrent mapping flow, a hash to hold all
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60)  * page tables hops related to this context.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61)  * Return: 0 on success, non-zero otherwise.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63) int hl_mmu_ctx_init(struct hl_ctx *ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65) 	struct hl_device *hdev = ctx->hdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67) 	if (hdev->mmu_enable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68) 		return hdev->mmu_func.ctx_init(ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74)  * hl_mmu_ctx_fini - disable a ctx from using the mmu module
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76)  * @ctx: pointer to the context structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78)  * This function does the following:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79)  * - Free any pgts which were not freed yet
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80)  * - Free the mutex
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81)  * - Free DRAM default page mapping hops
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83) void hl_mmu_ctx_fini(struct hl_ctx *ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85) 	struct hl_device *hdev = ctx->hdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87) 	if (hdev->mmu_enable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88) 		hdev->mmu_func.ctx_fini(ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92)  * hl_mmu_unmap - unmaps a virtual addr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94)  * @ctx: pointer to the context structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95)  * @virt_addr: virt addr to map from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96)  * @page_size: size of the page to unmap
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97)  * @flush_pte: whether to do a PCI flush
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99)  * This function does the following:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100)  * - Check that the virt addr is mapped
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101)  * - Unmap the virt addr and frees pgts if possible
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102)  * - Returns 0 on success, -EINVAL if the given addr is not mapped
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104)  * Because this function changes the page tables in the device and because it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105)  * changes the MMU hash, it must be protected by a lock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106)  * However, because it maps only a single page, the lock should be implemented
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107)  * in a higher level in order to protect the entire mapping of the memory area
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109)  * For optimization reasons PCI flush may be requested once after unmapping of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110)  * large area.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) int hl_mmu_unmap(struct hl_ctx *ctx, u64 virt_addr, u32 page_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) 		bool flush_pte)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) 	struct hl_device *hdev = ctx->hdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) 	struct asic_fixed_properties *prop = &hdev->asic_prop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) 	struct hl_mmu_properties *mmu_prop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) 	u64 real_virt_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) 	u32 real_page_size, npages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) 	int i, rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) 	bool is_dram_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) 	if (!hdev->mmu_enable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) 	is_dram_addr = is_dram_va(hdev, virt_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) 	if (is_dram_addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) 		mmu_prop = &prop->dmmu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) 	else if ((page_size % prop->pmmu_huge.page_size) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) 		mmu_prop = &prop->pmmu_huge;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) 		mmu_prop = &prop->pmmu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) 	 * The H/W handles mapping of specific page sizes. Hence if the page
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) 	 * size is bigger, we break it to sub-pages and unmap them separately.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) 	if ((page_size % mmu_prop->page_size) == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) 		real_page_size = mmu_prop->page_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) 		dev_err(hdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) 			"page size of %u is not %uKB aligned, can't unmap\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) 			page_size, mmu_prop->page_size >> 10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) 		return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) 	npages = page_size / real_page_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) 	real_virt_addr = virt_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) 	for (i = 0 ; i < npages ; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) 		rc = hdev->mmu_func.unmap(ctx, real_virt_addr, is_dram_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) 		if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) 		real_virt_addr += real_page_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) 	if (flush_pte)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) 		hdev->mmu_func.flush(ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) 	return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167)  * hl_mmu_map - maps a virtual addr to physical addr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169)  * @ctx: pointer to the context structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170)  * @virt_addr: virt addr to map from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171)  * @phys_addr: phys addr to map to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172)  * @page_size: physical page size
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173)  * @flush_pte: whether to do a PCI flush
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175)  * This function does the following:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176)  * - Check that the virt addr is not mapped
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177)  * - Allocate pgts as necessary in order to map the virt addr to the phys
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178)  * - Returns 0 on success, -EINVAL if addr is already mapped, or -ENOMEM.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180)  * Because this function changes the page tables in the device and because it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181)  * changes the MMU hash, it must be protected by a lock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182)  * However, because it maps only a single page, the lock should be implemented
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183)  * in a higher level in order to protect the entire mapping of the memory area
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185)  * For optimization reasons PCI flush may be requested once after mapping of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186)  * large area.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) int hl_mmu_map(struct hl_ctx *ctx, u64 virt_addr, u64 phys_addr, u32 page_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) 		bool flush_pte)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) 	struct hl_device *hdev = ctx->hdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) 	struct asic_fixed_properties *prop = &hdev->asic_prop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) 	struct hl_mmu_properties *mmu_prop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) 	u64 real_virt_addr, real_phys_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) 	u32 real_page_size, npages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) 	int i, rc, mapped_cnt = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) 	bool is_dram_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) 	if (!hdev->mmu_enable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) 	is_dram_addr = is_dram_va(hdev, virt_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) 	if (is_dram_addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) 		mmu_prop = &prop->dmmu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) 	else if ((page_size % prop->pmmu_huge.page_size) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) 		mmu_prop = &prop->pmmu_huge;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) 		mmu_prop = &prop->pmmu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) 	 * The H/W handles mapping of specific page sizes. Hence if the page
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) 	 * size is bigger, we break it to sub-pages and map them separately.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) 	if ((page_size % mmu_prop->page_size) == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) 		real_page_size = mmu_prop->page_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) 		dev_err(hdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) 			"page size of %u is not %uKB aligned, can't unmap\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) 			page_size, mmu_prop->page_size >> 10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) 		return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) 	WARN_ONCE((phys_addr & (real_page_size - 1)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) 		"Mapping 0x%llx with page size of 0x%x is erroneous! Address must be divisible by page size",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) 		phys_addr, real_page_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) 	npages = page_size / real_page_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) 	real_virt_addr = virt_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) 	real_phys_addr = phys_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) 	for (i = 0 ; i < npages ; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) 		rc = hdev->mmu_func.map(ctx, real_virt_addr, real_phys_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) 				real_page_size, is_dram_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) 		if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) 			goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) 		real_virt_addr += real_page_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) 		real_phys_addr += real_page_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) 		mapped_cnt++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) 	if (flush_pte)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) 		hdev->mmu_func.flush(ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) 	real_virt_addr = virt_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) 	for (i = 0 ; i < mapped_cnt ; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) 		if (hdev->mmu_func.unmap(ctx, real_virt_addr, is_dram_addr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) 			dev_warn_ratelimited(hdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) 				"failed to unmap va: 0x%llx\n", real_virt_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) 		real_virt_addr += real_page_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) 	hdev->mmu_func.flush(ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) 	return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265)  * hl_mmu_swap_out - marks all mapping of the given ctx as swapped out
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267)  * @ctx: pointer to the context structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) void hl_mmu_swap_out(struct hl_ctx *ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) 	struct hl_device *hdev = ctx->hdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) 	if (hdev->mmu_enable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) 		hdev->mmu_func.swap_out(ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279)  * hl_mmu_swap_in - marks all mapping of the given ctx as swapped in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281)  * @ctx: pointer to the context structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) void hl_mmu_swap_in(struct hl_ctx *ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) 	struct hl_device *hdev = ctx->hdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) 	if (hdev->mmu_enable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) 		hdev->mmu_func.swap_in(ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) int hl_mmu_if_set_funcs(struct hl_device *hdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) 	if (!hdev->mmu_enable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) 	switch (hdev->asic_type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) 	case ASIC_GOYA:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) 	case ASIC_GAUDI:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) 		hl_mmu_v1_set_funcs(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) 		dev_err(hdev->dev, "Unrecognized ASIC type %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) 			hdev->asic_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) 		return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) }