^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /**************************************************************************
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Copyright (c) 2007, Intel Corporation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) **************************************************************************/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #include <linux/highmem.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include "mmu.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include "psb_drv.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include "psb_reg.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) * Code for the SGX MMU:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) * clflush on one processor only:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) * clflush should apparently flush the cache line on all processors in an
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) * SMP system.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) * kmap atomic:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) * The usage of the slots must be completely encapsulated within a spinlock, and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) * no other functions that may be using the locks for other purposed may be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) * called from within the locked region.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) * Since the slots are per processor, this will guarantee that we are the only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) * user.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) * TODO: Inserting ptes from an interrupt handler:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) * This may be desirable for some SGX functionality where the GPU can fault in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) * needed pages. For that, we need to make an atomic insert_pages function, that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) * may fail.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) * If it fails, the caller need to insert the page using a workqueue function,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) * but on average it should be fast.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) static inline uint32_t psb_mmu_pt_index(uint32_t offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) return (offset >> PSB_PTE_SHIFT) & 0x3FF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) static inline uint32_t psb_mmu_pd_index(uint32_t offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) return offset >> PSB_PDE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) #if defined(CONFIG_X86)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) static inline void psb_clflush(void *addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) __asm__ __volatile__("clflush (%0)\n" : : "r"(addr) : "memory");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) static inline void psb_mmu_clflush(struct psb_mmu_driver *driver, void *addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) if (!driver->has_clflush)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) mb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) psb_clflush(addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) mb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) static inline void psb_mmu_clflush(struct psb_mmu_driver *driver, void *addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) {;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) static void psb_mmu_flush_pd_locked(struct psb_mmu_driver *driver, int force)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) struct drm_device *dev = driver->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) struct drm_psb_private *dev_priv = dev->dev_private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) if (atomic_read(&driver->needs_tlbflush) || force) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) uint32_t val = PSB_RSGX32(PSB_CR_BIF_CTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) PSB_WSGX32(val | _PSB_CB_CTRL_INVALDC, PSB_CR_BIF_CTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) /* Make sure data cache is turned off before enabling it */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) PSB_WSGX32(val & ~_PSB_CB_CTRL_INVALDC, PSB_CR_BIF_CTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) (void)PSB_RSGX32(PSB_CR_BIF_CTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) if (driver->msvdx_mmu_invaldc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) atomic_set(driver->msvdx_mmu_invaldc, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) atomic_set(&driver->needs_tlbflush, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) #if 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) static void psb_mmu_flush_pd(struct psb_mmu_driver *driver, int force)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) down_write(&driver->sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) psb_mmu_flush_pd_locked(driver, force);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) up_write(&driver->sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) void psb_mmu_flush(struct psb_mmu_driver *driver)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) struct drm_device *dev = driver->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) struct drm_psb_private *dev_priv = dev->dev_private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) uint32_t val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) down_write(&driver->sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) val = PSB_RSGX32(PSB_CR_BIF_CTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) if (atomic_read(&driver->needs_tlbflush))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) PSB_WSGX32(val | _PSB_CB_CTRL_INVALDC, PSB_CR_BIF_CTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) PSB_WSGX32(val | _PSB_CB_CTRL_FLUSH, PSB_CR_BIF_CTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) /* Make sure data cache is turned off and MMU is flushed before
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) restoring bank interface control register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) PSB_WSGX32(val & ~(_PSB_CB_CTRL_FLUSH | _PSB_CB_CTRL_INVALDC),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) PSB_CR_BIF_CTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) (void)PSB_RSGX32(PSB_CR_BIF_CTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) atomic_set(&driver->needs_tlbflush, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) if (driver->msvdx_mmu_invaldc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) atomic_set(driver->msvdx_mmu_invaldc, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) up_write(&driver->sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) void psb_mmu_set_pd_context(struct psb_mmu_pd *pd, int hw_context)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) struct drm_device *dev = pd->driver->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) struct drm_psb_private *dev_priv = dev->dev_private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) uint32_t offset = (hw_context == 0) ? PSB_CR_BIF_DIR_LIST_BASE0 :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) PSB_CR_BIF_DIR_LIST_BASE1 + hw_context * 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) down_write(&pd->driver->sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) PSB_WSGX32(page_to_pfn(pd->p) << PAGE_SHIFT, offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) psb_mmu_flush_pd_locked(pd->driver, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) pd->hw_context = hw_context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) up_write(&pd->driver->sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) static inline unsigned long psb_pd_addr_end(unsigned long addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) unsigned long end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) addr = (addr + PSB_PDE_MASK + 1) & ~PSB_PDE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) return (addr < end) ? addr : end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) static inline uint32_t psb_mmu_mask_pte(uint32_t pfn, int type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) uint32_t mask = PSB_PTE_VALID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) if (type & PSB_MMU_CACHED_MEMORY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) mask |= PSB_PTE_CACHED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) if (type & PSB_MMU_RO_MEMORY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) mask |= PSB_PTE_RO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) if (type & PSB_MMU_WO_MEMORY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) mask |= PSB_PTE_WO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) return (pfn << PAGE_SHIFT) | mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) struct psb_mmu_pd *psb_mmu_alloc_pd(struct psb_mmu_driver *driver,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) int trap_pagefaults, int invalid_type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) struct psb_mmu_pd *pd = kmalloc(sizeof(*pd), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) uint32_t *v;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) if (!pd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) pd->p = alloc_page(GFP_DMA32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) if (!pd->p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) goto out_err1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) pd->dummy_pt = alloc_page(GFP_DMA32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) if (!pd->dummy_pt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) goto out_err2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) pd->dummy_page = alloc_page(GFP_DMA32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) if (!pd->dummy_page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) goto out_err3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) if (!trap_pagefaults) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) pd->invalid_pde = psb_mmu_mask_pte(page_to_pfn(pd->dummy_pt),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) invalid_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) pd->invalid_pte = psb_mmu_mask_pte(page_to_pfn(pd->dummy_page),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) invalid_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) pd->invalid_pde = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) pd->invalid_pte = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) v = kmap(pd->dummy_pt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) for (i = 0; i < (PAGE_SIZE / sizeof(uint32_t)); ++i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) v[i] = pd->invalid_pte;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) kunmap(pd->dummy_pt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) v = kmap(pd->p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) for (i = 0; i < (PAGE_SIZE / sizeof(uint32_t)); ++i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) v[i] = pd->invalid_pde;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) kunmap(pd->p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) clear_page(kmap(pd->dummy_page));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) kunmap(pd->dummy_page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) pd->tables = vmalloc_user(sizeof(struct psb_mmu_pt *) * 1024);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) if (!pd->tables)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) goto out_err4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) pd->hw_context = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) pd->pd_mask = PSB_PTE_VALID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) pd->driver = driver;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) return pd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) out_err4:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) __free_page(pd->dummy_page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) out_err3:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) __free_page(pd->dummy_pt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) out_err2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) __free_page(pd->p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) out_err1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) kfree(pd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) static void psb_mmu_free_pt(struct psb_mmu_pt *pt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) __free_page(pt->p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) kfree(pt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) void psb_mmu_free_pagedir(struct psb_mmu_pd *pd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) struct psb_mmu_driver *driver = pd->driver;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) struct drm_device *dev = driver->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) struct drm_psb_private *dev_priv = dev->dev_private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) struct psb_mmu_pt *pt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) down_write(&driver->sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) if (pd->hw_context != -1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) PSB_WSGX32(0, PSB_CR_BIF_DIR_LIST_BASE0 + pd->hw_context * 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) psb_mmu_flush_pd_locked(driver, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) /* Should take the spinlock here, but we don't need to do that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) since we have the semaphore in write mode. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) for (i = 0; i < 1024; ++i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) pt = pd->tables[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) if (pt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) psb_mmu_free_pt(pt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) vfree(pd->tables);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) __free_page(pd->dummy_page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) __free_page(pd->dummy_pt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) __free_page(pd->p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) kfree(pd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) up_write(&driver->sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) static struct psb_mmu_pt *psb_mmu_alloc_pt(struct psb_mmu_pd *pd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) struct psb_mmu_pt *pt = kmalloc(sizeof(*pt), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) void *v;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) uint32_t clflush_add = pd->driver->clflush_add >> PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) uint32_t clflush_count = PAGE_SIZE / clflush_add;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) spinlock_t *lock = &pd->driver->lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) uint8_t *clf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) uint32_t *ptes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) if (!pt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) pt->p = alloc_page(GFP_DMA32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) if (!pt->p) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) kfree(pt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) spin_lock(lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) v = kmap_atomic(pt->p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) clf = (uint8_t *) v;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) ptes = (uint32_t *) v;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) for (i = 0; i < (PAGE_SIZE / sizeof(uint32_t)); ++i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) *ptes++ = pd->invalid_pte;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) #if defined(CONFIG_X86)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) if (pd->driver->has_clflush && pd->hw_context != -1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) mb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) for (i = 0; i < clflush_count; ++i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) psb_clflush(clf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) clf += clflush_add;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) mb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) kunmap_atomic(v);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) spin_unlock(lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) pt->count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) pt->pd = pd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) pt->index = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) return pt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) struct psb_mmu_pt *psb_mmu_pt_alloc_map_lock(struct psb_mmu_pd *pd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) unsigned long addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) uint32_t index = psb_mmu_pd_index(addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) struct psb_mmu_pt *pt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) uint32_t *v;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) spinlock_t *lock = &pd->driver->lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) spin_lock(lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) pt = pd->tables[index];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) while (!pt) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) spin_unlock(lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) pt = psb_mmu_alloc_pt(pd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) if (!pt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) spin_lock(lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) if (pd->tables[index]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) spin_unlock(lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) psb_mmu_free_pt(pt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) spin_lock(lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) pt = pd->tables[index];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) v = kmap_atomic(pd->p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) pd->tables[index] = pt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) v[index] = (page_to_pfn(pt->p) << 12) | pd->pd_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) pt->index = index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) kunmap_atomic((void *) v);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) if (pd->hw_context != -1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) psb_mmu_clflush(pd->driver, (void *)&v[index]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) atomic_set(&pd->driver->needs_tlbflush, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) pt->v = kmap_atomic(pt->p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) return pt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) static struct psb_mmu_pt *psb_mmu_pt_map_lock(struct psb_mmu_pd *pd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) unsigned long addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) uint32_t index = psb_mmu_pd_index(addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) struct psb_mmu_pt *pt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) spinlock_t *lock = &pd->driver->lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) spin_lock(lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) pt = pd->tables[index];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) if (!pt) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) spin_unlock(lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) pt->v = kmap_atomic(pt->p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) return pt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) static void psb_mmu_pt_unmap_unlock(struct psb_mmu_pt *pt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) struct psb_mmu_pd *pd = pt->pd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) uint32_t *v;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) kunmap_atomic(pt->v);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) if (pt->count == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) v = kmap_atomic(pd->p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) v[pt->index] = pd->invalid_pde;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) pd->tables[pt->index] = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) if (pd->hw_context != -1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) psb_mmu_clflush(pd->driver, (void *)&v[pt->index]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) atomic_set(&pd->driver->needs_tlbflush, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) kunmap_atomic(v);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) spin_unlock(&pd->driver->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) psb_mmu_free_pt(pt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) spin_unlock(&pd->driver->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) static inline void psb_mmu_set_pte(struct psb_mmu_pt *pt, unsigned long addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) uint32_t pte)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) pt->v[psb_mmu_pt_index(addr)] = pte;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) static inline void psb_mmu_invalidate_pte(struct psb_mmu_pt *pt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) unsigned long addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) pt->v[psb_mmu_pt_index(addr)] = pt->pd->invalid_pte;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) struct psb_mmu_pd *psb_mmu_get_default_pd(struct psb_mmu_driver *driver)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) struct psb_mmu_pd *pd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) down_read(&driver->sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) pd = driver->default_pd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) up_read(&driver->sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) return pd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) /* Returns the physical address of the PD shared by sgx/msvdx */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) uint32_t psb_get_default_pd_addr(struct psb_mmu_driver *driver)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) struct psb_mmu_pd *pd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) pd = psb_mmu_get_default_pd(driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) return page_to_pfn(pd->p) << PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) void psb_mmu_driver_takedown(struct psb_mmu_driver *driver)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) struct drm_device *dev = driver->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) struct drm_psb_private *dev_priv = dev->dev_private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) PSB_WSGX32(driver->bif_ctrl, PSB_CR_BIF_CTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) psb_mmu_free_pagedir(driver->default_pd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) kfree(driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) struct psb_mmu_driver *psb_mmu_driver_init(struct drm_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) int trap_pagefaults,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) int invalid_type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) atomic_t *msvdx_mmu_invaldc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) struct psb_mmu_driver *driver;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) struct drm_psb_private *dev_priv = dev->dev_private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) driver = kmalloc(sizeof(*driver), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) if (!driver)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) driver->dev = dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) driver->default_pd = psb_mmu_alloc_pd(driver, trap_pagefaults,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) invalid_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) if (!driver->default_pd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) goto out_err1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) spin_lock_init(&driver->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) init_rwsem(&driver->sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) down_write(&driver->sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) atomic_set(&driver->needs_tlbflush, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) driver->msvdx_mmu_invaldc = msvdx_mmu_invaldc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) driver->bif_ctrl = PSB_RSGX32(PSB_CR_BIF_CTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) PSB_WSGX32(driver->bif_ctrl | _PSB_CB_CTRL_CLEAR_FAULT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) PSB_CR_BIF_CTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) PSB_WSGX32(driver->bif_ctrl & ~_PSB_CB_CTRL_CLEAR_FAULT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) PSB_CR_BIF_CTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) driver->has_clflush = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) #if defined(CONFIG_X86)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) if (boot_cpu_has(X86_FEATURE_CLFLUSH)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) uint32_t tfms, misc, cap0, cap4, clflush_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) * clflush size is determined at kernel setup for x86_64 but not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) * for i386. We have to do it here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) cpuid(0x00000001, &tfms, &misc, &cap0, &cap4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) clflush_size = ((misc >> 8) & 0xff) * 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) driver->has_clflush = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) driver->clflush_add =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) PAGE_SIZE * clflush_size / sizeof(uint32_t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) driver->clflush_mask = driver->clflush_add - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) driver->clflush_mask = ~driver->clflush_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) up_write(&driver->sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) return driver;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) out_err1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) kfree(driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) #if defined(CONFIG_X86)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) static void psb_mmu_flush_ptes(struct psb_mmu_pd *pd, unsigned long address,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) uint32_t num_pages, uint32_t desired_tile_stride,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) uint32_t hw_tile_stride)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) struct psb_mmu_pt *pt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) uint32_t rows = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) uint32_t i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) unsigned long addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) unsigned long end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) unsigned long next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) unsigned long add;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) unsigned long row_add;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) unsigned long clflush_add = pd->driver->clflush_add;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) unsigned long clflush_mask = pd->driver->clflush_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) if (!pd->driver->has_clflush)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) if (hw_tile_stride)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) rows = num_pages / desired_tile_stride;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) desired_tile_stride = num_pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) add = desired_tile_stride << PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) row_add = hw_tile_stride << PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) mb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) for (i = 0; i < rows; ++i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) addr = address;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) end = addr + add;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) next = psb_pd_addr_end(addr, end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) pt = psb_mmu_pt_map_lock(pd, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) if (!pt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) psb_clflush(&pt->v[psb_mmu_pt_index(addr)]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) } while (addr += clflush_add,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) (addr & clflush_mask) < next);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) psb_mmu_pt_unmap_unlock(pt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) } while (addr = next, next != end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) address += row_add;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) mb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) static void psb_mmu_flush_ptes(struct psb_mmu_pd *pd, unsigned long address,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) uint32_t num_pages, uint32_t desired_tile_stride,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) uint32_t hw_tile_stride)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) drm_ttm_cache_flush();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) void psb_mmu_remove_pfn_sequence(struct psb_mmu_pd *pd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) unsigned long address, uint32_t num_pages)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) struct psb_mmu_pt *pt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) unsigned long addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) unsigned long end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) unsigned long next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) unsigned long f_address = address;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) down_read(&pd->driver->sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) addr = address;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) end = addr + (num_pages << PAGE_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) next = psb_pd_addr_end(addr, end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) pt = psb_mmu_pt_alloc_map_lock(pd, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) if (!pt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) psb_mmu_invalidate_pte(pt, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) --pt->count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) } while (addr += PAGE_SIZE, addr < next);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) psb_mmu_pt_unmap_unlock(pt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) } while (addr = next, next != end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) if (pd->hw_context != -1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) psb_mmu_flush_ptes(pd, f_address, num_pages, 1, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) up_read(&pd->driver->sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) if (pd->hw_context != -1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) psb_mmu_flush(pd->driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) void psb_mmu_remove_pages(struct psb_mmu_pd *pd, unsigned long address,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) uint32_t num_pages, uint32_t desired_tile_stride,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) uint32_t hw_tile_stride)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) struct psb_mmu_pt *pt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) uint32_t rows = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) uint32_t i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) unsigned long addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) unsigned long end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) unsigned long next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) unsigned long add;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) unsigned long row_add;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) unsigned long f_address = address;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) if (hw_tile_stride)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) rows = num_pages / desired_tile_stride;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) desired_tile_stride = num_pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) add = desired_tile_stride << PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) row_add = hw_tile_stride << PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) down_read(&pd->driver->sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) /* Make sure we only need to flush this processor's cache */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) for (i = 0; i < rows; ++i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) addr = address;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) end = addr + add;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) next = psb_pd_addr_end(addr, end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) pt = psb_mmu_pt_map_lock(pd, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) if (!pt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) psb_mmu_invalidate_pte(pt, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) --pt->count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) } while (addr += PAGE_SIZE, addr < next);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) psb_mmu_pt_unmap_unlock(pt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) } while (addr = next, next != end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) address += row_add;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) if (pd->hw_context != -1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) psb_mmu_flush_ptes(pd, f_address, num_pages,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) desired_tile_stride, hw_tile_stride);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) up_read(&pd->driver->sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) if (pd->hw_context != -1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) psb_mmu_flush(pd->driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) int psb_mmu_insert_pfn_sequence(struct psb_mmu_pd *pd, uint32_t start_pfn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) unsigned long address, uint32_t num_pages,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) int type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) struct psb_mmu_pt *pt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) uint32_t pte;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) unsigned long addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) unsigned long end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) unsigned long next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) unsigned long f_address = address;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) int ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) down_read(&pd->driver->sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) addr = address;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) end = addr + (num_pages << PAGE_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) next = psb_pd_addr_end(addr, end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) pt = psb_mmu_pt_alloc_map_lock(pd, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) if (!pt) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) pte = psb_mmu_mask_pte(start_pfn++, type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) psb_mmu_set_pte(pt, addr, pte);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) pt->count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) } while (addr += PAGE_SIZE, addr < next);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) psb_mmu_pt_unmap_unlock(pt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) } while (addr = next, next != end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) if (pd->hw_context != -1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) psb_mmu_flush_ptes(pd, f_address, num_pages, 1, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) up_read(&pd->driver->sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) if (pd->hw_context != -1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) psb_mmu_flush(pd->driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) int psb_mmu_insert_pages(struct psb_mmu_pd *pd, struct page **pages,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) unsigned long address, uint32_t num_pages,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) uint32_t desired_tile_stride, uint32_t hw_tile_stride,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) int type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) struct psb_mmu_pt *pt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) uint32_t rows = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) uint32_t i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) uint32_t pte;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) unsigned long addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) unsigned long end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) unsigned long next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) unsigned long add;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) unsigned long row_add;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) unsigned long f_address = address;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) int ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) if (hw_tile_stride) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) if (num_pages % desired_tile_stride != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) rows = num_pages / desired_tile_stride;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) desired_tile_stride = num_pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) add = desired_tile_stride << PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) row_add = hw_tile_stride << PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) down_read(&pd->driver->sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) for (i = 0; i < rows; ++i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) addr = address;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) end = addr + add;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) next = psb_pd_addr_end(addr, end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) pt = psb_mmu_pt_alloc_map_lock(pd, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) if (!pt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) pte = psb_mmu_mask_pte(page_to_pfn(*pages++),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) psb_mmu_set_pte(pt, addr, pte);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) pt->count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) } while (addr += PAGE_SIZE, addr < next);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) psb_mmu_pt_unmap_unlock(pt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) } while (addr = next, next != end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) address += row_add;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) if (pd->hw_context != -1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) psb_mmu_flush_ptes(pd, f_address, num_pages,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) desired_tile_stride, hw_tile_stride);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) up_read(&pd->driver->sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) if (pd->hw_context != -1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) psb_mmu_flush(pd->driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) int psb_mmu_virtual_to_pfn(struct psb_mmu_pd *pd, uint32_t virtual,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) unsigned long *pfn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) struct psb_mmu_pt *pt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) uint32_t tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) spinlock_t *lock = &pd->driver->lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) down_read(&pd->driver->sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) pt = psb_mmu_pt_map_lock(pd, virtual);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) if (!pt) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) uint32_t *v;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) spin_lock(lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) v = kmap_atomic(pd->p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) tmp = v[psb_mmu_pd_index(virtual)];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) kunmap_atomic(v);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) spin_unlock(lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) if (tmp != pd->invalid_pde || !(tmp & PSB_PTE_VALID) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) !(pd->invalid_pte & PSB_PTE_VALID)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) *pfn = pd->invalid_pte >> PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) tmp = pt->v[psb_mmu_pt_index(virtual)];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) if (!(tmp & PSB_PTE_VALID)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) *pfn = tmp >> PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) psb_mmu_pt_unmap_unlock(pt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) up_read(&pd->driver->sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) }