Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3)  * TI AM33XX SRAM EMIF Driver
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5)  * Copyright (C) 2016-2017 Texas Instruments Inc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6)  *	Dave Gerlach
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9) #include <linux/err.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10) #include <linux/genalloc.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11) #include <linux/io.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14) #include <linux/of.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15) #include <linux/of_platform.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16) #include <linux/platform_device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17) #include <linux/sram.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18) #include <linux/ti-emif-sram.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20) #include "emif.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22) #define TI_EMIF_SRAM_SYMBOL_OFFSET(sym) ((unsigned long)(sym) - \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23) 					 (unsigned long)&ti_emif_sram)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25) #define EMIF_POWER_MGMT_WAIT_SELF_REFRESH_8192_CYCLES		0x00a0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27) struct ti_emif_data {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28) 	phys_addr_t ti_emif_sram_phys;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29) 	phys_addr_t ti_emif_sram_data_phys;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30) 	unsigned long ti_emif_sram_virt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31) 	unsigned long ti_emif_sram_data_virt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32) 	struct gen_pool *sram_pool_code;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33) 	struct gen_pool	*sram_pool_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34) 	struct ti_emif_pm_data pm_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35) 	struct ti_emif_pm_functions pm_functions;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38) static struct ti_emif_data *emif_instance;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40) static u32 sram_suspend_address(struct ti_emif_data *emif_data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41) 				unsigned long addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43) 	return (emif_data->ti_emif_sram_virt +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44) 		TI_EMIF_SRAM_SYMBOL_OFFSET(addr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47) static phys_addr_t sram_resume_address(struct ti_emif_data *emif_data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48) 				       unsigned long addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50) 	return ((unsigned long)emif_data->ti_emif_sram_phys +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51) 		TI_EMIF_SRAM_SYMBOL_OFFSET(addr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54) static void ti_emif_free_sram(struct ti_emif_data *emif_data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56) 	gen_pool_free(emif_data->sram_pool_code, emif_data->ti_emif_sram_virt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57) 		      ti_emif_sram_sz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58) 	gen_pool_free(emif_data->sram_pool_data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59) 		      emif_data->ti_emif_sram_data_virt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60) 		      sizeof(struct emif_regs_amx3));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63) static int ti_emif_alloc_sram(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64) 			      struct ti_emif_data *emif_data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66) 	struct device_node *np = dev->of_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69) 	emif_data->sram_pool_code = of_gen_pool_get(np, "sram", 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70) 	if (!emif_data->sram_pool_code) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71) 		dev_err(dev, "Unable to get sram pool for ocmcram code\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72) 		return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75) 	emif_data->ti_emif_sram_virt =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76) 			gen_pool_alloc(emif_data->sram_pool_code,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77) 				       ti_emif_sram_sz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78) 	if (!emif_data->ti_emif_sram_virt) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79) 		dev_err(dev, "Unable to allocate code memory from ocmcram\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83) 	/* Save physical address to calculate resume offset during pm init */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84) 	emif_data->ti_emif_sram_phys =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85) 			gen_pool_virt_to_phys(emif_data->sram_pool_code,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86) 					      emif_data->ti_emif_sram_virt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88) 	/* Get sram pool for data section and allocate space */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89) 	emif_data->sram_pool_data = of_gen_pool_get(np, "sram", 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90) 	if (!emif_data->sram_pool_data) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91) 		dev_err(dev, "Unable to get sram pool for ocmcram data\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92) 		ret = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93) 		goto err_free_sram_code;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96) 	emif_data->ti_emif_sram_data_virt =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97) 				gen_pool_alloc(emif_data->sram_pool_data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98) 					       sizeof(struct emif_regs_amx3));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99) 	if (!emif_data->ti_emif_sram_data_virt) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) 		dev_err(dev, "Unable to allocate data memory from ocmcram\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) 		ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) 		goto err_free_sram_code;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) 	/* Save physical address to calculate resume offset during pm init */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) 	emif_data->ti_emif_sram_data_phys =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) 		gen_pool_virt_to_phys(emif_data->sram_pool_data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) 				      emif_data->ti_emif_sram_data_virt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) 	 * These functions are called during suspend path while MMU is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) 	 * still on so add virtual base to offset for absolute address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) 	emif_data->pm_functions.save_context =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) 		sram_suspend_address(emif_data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) 				     (unsigned long)ti_emif_save_context);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) 	emif_data->pm_functions.enter_sr =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) 		sram_suspend_address(emif_data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) 				     (unsigned long)ti_emif_enter_sr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) 	emif_data->pm_functions.abort_sr =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) 		sram_suspend_address(emif_data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) 				     (unsigned long)ti_emif_abort_sr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) 	 * These are called during resume path when MMU is not enabled
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) 	 * so physical address is used instead
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) 	emif_data->pm_functions.restore_context =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) 		sram_resume_address(emif_data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) 				    (unsigned long)ti_emif_restore_context);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) 	emif_data->pm_functions.exit_sr =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) 		sram_resume_address(emif_data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) 				    (unsigned long)ti_emif_exit_sr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) 	emif_data->pm_functions.run_hw_leveling =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) 		sram_resume_address(emif_data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) 				    (unsigned long)ti_emif_run_hw_leveling);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) 	emif_data->pm_data.regs_virt =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) 		(struct emif_regs_amx3 *)emif_data->ti_emif_sram_data_virt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) 	emif_data->pm_data.regs_phys = emif_data->ti_emif_sram_data_phys;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) err_free_sram_code:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) 	gen_pool_free(emif_data->sram_pool_code, emif_data->ti_emif_sram_virt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) 		      ti_emif_sram_sz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) static int ti_emif_push_sram(struct device *dev, struct ti_emif_data *emif_data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) 	void *copy_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) 	u32 data_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) 	copy_addr = sram_exec_copy(emif_data->sram_pool_code,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) 				   (void *)emif_data->ti_emif_sram_virt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) 				   &ti_emif_sram, ti_emif_sram_sz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) 	if (!copy_addr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) 		dev_err(dev, "Cannot copy emif code to sram\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) 		return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) 	data_addr = sram_suspend_address(emif_data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) 					 (unsigned long)&ti_emif_pm_sram_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) 	copy_addr = sram_exec_copy(emif_data->sram_pool_code,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) 				   (void *)data_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) 				   &emif_data->pm_data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) 				   sizeof(emif_data->pm_data));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) 	if (!copy_addr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) 		dev_err(dev, "Cannot copy emif data to code sram\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) 		return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177)  * Due to Usage Note 3.1.2 "DDR3: JEDEC Compliance for Maximum
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178)  * Self-Refresh Command Limit" found in AM335x Silicon Errata
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179)  * (Document SPRZ360F Revised November 2013) we must configure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180)  * the self refresh delay timer to 0xA (8192 cycles) to avoid
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181)  * generating too many refresh command from the EMIF.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) static void ti_emif_configure_sr_delay(struct ti_emif_data *emif_data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) 	writel(EMIF_POWER_MGMT_WAIT_SELF_REFRESH_8192_CYCLES,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) 	       (emif_data->pm_data.ti_emif_base_addr_virt +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) 		EMIF_POWER_MANAGEMENT_CONTROL));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) 	writel(EMIF_POWER_MGMT_WAIT_SELF_REFRESH_8192_CYCLES,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) 	       (emif_data->pm_data.ti_emif_base_addr_virt +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) 		EMIF_POWER_MANAGEMENT_CTRL_SHDW));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195)  * ti_emif_copy_pm_function_table - copy mapping of pm funcs in sram
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196)  * @sram_pool: pointer to struct gen_pool where dst resides
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197)  * @dst: void * to address that table should be copied
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199)  * Returns 0 if success other error code if table is not available
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) int ti_emif_copy_pm_function_table(struct gen_pool *sram_pool, void *dst)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) 	void *copy_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) 	if (!emif_instance)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) 		return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) 	copy_addr = sram_exec_copy(sram_pool, dst,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) 				   &emif_instance->pm_functions,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) 				   sizeof(emif_instance->pm_functions));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) 	if (!copy_addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) 		return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) EXPORT_SYMBOL_GPL(ti_emif_copy_pm_function_table);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219)  * ti_emif_get_mem_type - return type for memory type in use
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221)  * Returns memory type value read from EMIF or error code if fails
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) int ti_emif_get_mem_type(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) 	unsigned long temp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) 	if (!emif_instance)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) 		return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) 	temp = readl(emif_instance->pm_data.ti_emif_base_addr_virt +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) 		     EMIF_SDRAM_CONFIG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) 	temp = (temp & SDRAM_TYPE_MASK) >> SDRAM_TYPE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) 	return temp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) EXPORT_SYMBOL_GPL(ti_emif_get_mem_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) static const struct of_device_id ti_emif_of_match[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) 	{ .compatible = "ti,emif-am3352", .data =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) 					(void *)EMIF_SRAM_AM33_REG_LAYOUT, },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) 	{ .compatible = "ti,emif-am4372", .data =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) 					(void *)EMIF_SRAM_AM43_REG_LAYOUT, },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) 	{},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) MODULE_DEVICE_TABLE(of, ti_emif_of_match);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) #ifdef CONFIG_PM_SLEEP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) static int ti_emif_resume(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) 	unsigned long tmp =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) 			__raw_readl((void __iomem *)emif_instance->ti_emif_sram_virt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) 	 * Check to see if what we are copying is already present in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) 	 * first byte at the destination, only copy if it is not which
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) 	 * indicates we have lost context and sram no longer contains
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) 	 * the PM code
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) 	if (tmp != ti_emif_sram)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) 		ti_emif_push_sram(dev, emif_instance);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) static int ti_emif_suspend(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) 	 * The contents will be present in DDR hence no need to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) 	 * explicitly save
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) #endif /* CONFIG_PM_SLEEP */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) static int ti_emif_probe(struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) 	struct resource *res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) 	struct device *dev = &pdev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) 	const struct of_device_id *match;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) 	struct ti_emif_data *emif_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) 	emif_data = devm_kzalloc(dev, sizeof(*emif_data), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) 	if (!emif_data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) 	match = of_match_device(ti_emif_of_match, &pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) 	if (!match)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) 		return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) 	emif_data->pm_data.ti_emif_sram_config = (unsigned long)match->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) 	emif_data->pm_data.ti_emif_base_addr_virt = devm_ioremap_resource(dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) 									  res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) 	if (IS_ERR(emif_data->pm_data.ti_emif_base_addr_virt)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) 		ret = PTR_ERR(emif_data->pm_data.ti_emif_base_addr_virt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) 	emif_data->pm_data.ti_emif_base_addr_phys = res->start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) 	ti_emif_configure_sr_delay(emif_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) 	ret = ti_emif_alloc_sram(dev, emif_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) 	ret = ti_emif_push_sram(dev, emif_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) 		goto fail_free_sram;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) 	emif_instance = emif_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) fail_free_sram:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) 	ti_emif_free_sram(emif_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) static int ti_emif_remove(struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) 	struct ti_emif_data *emif_data = emif_instance;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) 	emif_instance = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) 	ti_emif_free_sram(emif_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) static const struct dev_pm_ops ti_emif_pm_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) 	SET_SYSTEM_SLEEP_PM_OPS(ti_emif_suspend, ti_emif_resume)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) static struct platform_driver ti_emif_driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) 	.probe = ti_emif_probe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) 	.remove = ti_emif_remove,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) 	.driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) 		.name = KBUILD_MODNAME,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) 		.of_match_table = of_match_ptr(ti_emif_of_match),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) 		.pm = &ti_emif_pm_ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) 	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) module_platform_driver(ti_emif_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) MODULE_AUTHOR("Dave Gerlach <d-gerlach@ti.com>");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) MODULE_DESCRIPTION("Texas Instruments SRAM EMIF driver");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) MODULE_LICENSE("GPL v2");