Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2)  * Nvidia AGPGART routines.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3)  * Based upon a 2.4 agpgart diff by the folks from NVIDIA, and hacked up
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4)  * to work in 2.5 by Dave Jones.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8) #include <linux/pci.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10) #include <linux/agp_backend.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11) #include <linux/page-flags.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12) #include <linux/mm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13) #include <linux/jiffies.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14) #include "agp.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16) /* NVIDIA registers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17) #define NVIDIA_0_APSIZE		0x80
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18) #define NVIDIA_1_WBC		0xf0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19) #define NVIDIA_2_GARTCTRL	0xd0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20) #define NVIDIA_2_APBASE		0xd8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21) #define NVIDIA_2_APLIMIT	0xdc
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22) #define NVIDIA_2_ATTBASE(i)	(0xe0 + (i) * 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23) #define NVIDIA_3_APBASE		0x50
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24) #define NVIDIA_3_APLIMIT	0x54
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27) static struct _nvidia_private {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28) 	struct pci_dev *dev_1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29) 	struct pci_dev *dev_2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30) 	struct pci_dev *dev_3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31) 	volatile u32 __iomem *aperture;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32) 	int num_active_entries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33) 	off_t pg_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34) 	u32 wbc_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35) } nvidia_private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38) static int nvidia_fetch_size(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41) 	u8 size_value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42) 	struct aper_size_info_8 *values;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44) 	pci_read_config_byte(agp_bridge->dev, NVIDIA_0_APSIZE, &size_value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45) 	size_value &= 0x0f;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46) 	values = A_SIZE_8(agp_bridge->driver->aperture_sizes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48) 	for (i = 0; i < agp_bridge->driver->num_aperture_sizes; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49) 		if (size_value == values[i].size_value) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50) 			agp_bridge->previous_size =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51) 				agp_bridge->current_size = (void *) (values + i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52) 			agp_bridge->aperture_size_idx = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53) 			return values[i].size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60) #define SYSCFG          0xC0010010
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61) #define IORR_BASE0      0xC0010016
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62) #define IORR_MASK0      0xC0010017
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63) #define AMD_K7_NUM_IORR 2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65) static int nvidia_init_iorr(u32 base, u32 size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67) 	u32 base_hi, base_lo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68) 	u32 mask_hi, mask_lo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69) 	u32 sys_hi, sys_lo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70) 	u32 iorr_addr, free_iorr_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72) 	/* Find the iorr that is already used for the base */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73) 	/* If not found, determine the uppermost available iorr */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74) 	free_iorr_addr = AMD_K7_NUM_IORR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75) 	for (iorr_addr = 0; iorr_addr < AMD_K7_NUM_IORR; iorr_addr++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76) 		rdmsr(IORR_BASE0 + 2 * iorr_addr, base_lo, base_hi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77) 		rdmsr(IORR_MASK0 + 2 * iorr_addr, mask_lo, mask_hi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79) 		if ((base_lo & 0xfffff000) == (base & 0xfffff000))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82) 		if ((mask_lo & 0x00000800) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83) 			free_iorr_addr = iorr_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86) 	if (iorr_addr >= AMD_K7_NUM_IORR) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87) 		iorr_addr = free_iorr_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88) 		if (iorr_addr >= AMD_K7_NUM_IORR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89) 			return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91)     base_hi = 0x0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92)     base_lo = (base & ~0xfff) | 0x18;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93)     mask_hi = 0xf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94)     mask_lo = ((~(size - 1)) & 0xfffff000) | 0x800;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95)     wrmsr(IORR_BASE0 + 2 * iorr_addr, base_lo, base_hi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96)     wrmsr(IORR_MASK0 + 2 * iorr_addr, mask_lo, mask_hi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98)     rdmsr(SYSCFG, sys_lo, sys_hi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99)     sys_lo |= 0x00100000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100)     wrmsr(SYSCFG, sys_lo, sys_hi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) static int nvidia_configure(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) 	int i, rc, num_dirs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) 	u32 apbase, aplimit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) 	phys_addr_t apbase_phys;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) 	struct aper_size_info_8 *current_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) 	u32 temp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) 	current_size = A_SIZE_8(agp_bridge->current_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) 	/* aperture size */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) 	pci_write_config_byte(agp_bridge->dev, NVIDIA_0_APSIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) 		current_size->size_value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) 	/* address to map to */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) 	apbase = pci_bus_address(agp_bridge->dev, AGP_APERTURE_BAR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) 	agp_bridge->gart_bus_addr = apbase;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) 	aplimit = apbase + (current_size->size * 1024 * 1024) - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) 	pci_write_config_dword(nvidia_private.dev_2, NVIDIA_2_APBASE, apbase);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) 	pci_write_config_dword(nvidia_private.dev_2, NVIDIA_2_APLIMIT, aplimit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) 	pci_write_config_dword(nvidia_private.dev_3, NVIDIA_3_APBASE, apbase);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) 	pci_write_config_dword(nvidia_private.dev_3, NVIDIA_3_APLIMIT, aplimit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) 	if (0 != (rc = nvidia_init_iorr(apbase, current_size->size * 1024 * 1024)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) 		return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) 	/* directory size is 64k */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) 	num_dirs = current_size->size / 64;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) 	nvidia_private.num_active_entries = current_size->num_entries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) 	nvidia_private.pg_offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) 	if (num_dirs == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) 		num_dirs = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) 		nvidia_private.num_active_entries /= (64 / current_size->size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) 		nvidia_private.pg_offset = (apbase & (64 * 1024 * 1024 - 1) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) 			~(current_size->size * 1024 * 1024 - 1)) / PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) 	/* attbase */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) 	for (i = 0; i < 8; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) 		pci_write_config_dword(nvidia_private.dev_2, NVIDIA_2_ATTBASE(i),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) 			(agp_bridge->gatt_bus_addr + (i % num_dirs) * 64 * 1024) | 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) 	/* gtlb control */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) 	pci_read_config_dword(nvidia_private.dev_2, NVIDIA_2_GARTCTRL, &temp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) 	pci_write_config_dword(nvidia_private.dev_2, NVIDIA_2_GARTCTRL, temp | 0x11);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) 	/* gart control */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) 	pci_read_config_dword(agp_bridge->dev, NVIDIA_0_APSIZE, &temp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) 	pci_write_config_dword(agp_bridge->dev, NVIDIA_0_APSIZE, temp | 0x100);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) 	/* map aperture */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) 	apbase_phys = pci_resource_start(agp_bridge->dev, AGP_APERTURE_BAR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) 	nvidia_private.aperture =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) 		(volatile u32 __iomem *) ioremap(apbase_phys, 33 * PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) 	if (!nvidia_private.aperture)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) static void nvidia_cleanup(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) 	struct aper_size_info_8 *previous_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) 	u32 temp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) 	/* gart control */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) 	pci_read_config_dword(agp_bridge->dev, NVIDIA_0_APSIZE, &temp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) 	pci_write_config_dword(agp_bridge->dev, NVIDIA_0_APSIZE, temp & ~(0x100));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) 	/* gtlb control */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) 	pci_read_config_dword(nvidia_private.dev_2, NVIDIA_2_GARTCTRL, &temp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) 	pci_write_config_dword(nvidia_private.dev_2, NVIDIA_2_GARTCTRL, temp & ~(0x11));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) 	/* unmap aperture */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) 	iounmap((void __iomem *) nvidia_private.aperture);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) 	/* restore previous aperture size */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) 	previous_size = A_SIZE_8(agp_bridge->previous_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) 	pci_write_config_byte(agp_bridge->dev, NVIDIA_0_APSIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) 		previous_size->size_value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) 	/* restore iorr for previous aperture size */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) 	nvidia_init_iorr(agp_bridge->gart_bus_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) 		previous_size->size * 1024 * 1024);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194)  * Note we can't use the generic routines, even though they are 99% the same.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195)  * Aperture sizes <64M still requires a full 64k GART directory, but
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196)  * only use the portion of the TLB entries that correspond to the apertures
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197)  * alignment inside the surrounding 64M block.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) extern int agp_memory_reserved;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) static int nvidia_insert_memory(struct agp_memory *mem, off_t pg_start, int type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) 	int i, j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) 	int mask_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) 	mask_type = agp_generic_type_to_mask_type(mem->bridge, type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) 	if (mask_type != 0 || type != mem->type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) 	if (mem->page_count == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) 	if ((pg_start + mem->page_count) >
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) 		(nvidia_private.num_active_entries - agp_memory_reserved/PAGE_SIZE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) 	for (j = pg_start; j < (pg_start + mem->page_count); j++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) 		if (!PGE_EMPTY(agp_bridge, readl(agp_bridge->gatt_table+nvidia_private.pg_offset+j)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) 			return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) 	if (!mem->is_flushed) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) 		global_cache_flush();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) 		mem->is_flushed = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) 	for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) 		writel(agp_bridge->driver->mask_memory(agp_bridge,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) 			       page_to_phys(mem->pages[i]), mask_type),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) 			agp_bridge->gatt_table+nvidia_private.pg_offset+j);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) 	/* PCI Posting. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) 	readl(agp_bridge->gatt_table+nvidia_private.pg_offset+j - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) 	agp_bridge->driver->tlb_flush(mem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) static int nvidia_remove_memory(struct agp_memory *mem, off_t pg_start, int type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) 	int mask_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) 	mask_type = agp_generic_type_to_mask_type(mem->bridge, type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) 	if (mask_type != 0 || type != mem->type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) 	if (mem->page_count == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) 	for (i = pg_start; i < (mem->page_count + pg_start); i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) 		writel(agp_bridge->scratch_page, agp_bridge->gatt_table+nvidia_private.pg_offset+i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) 	agp_bridge->driver->tlb_flush(mem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) static void nvidia_tlbflush(struct agp_memory *mem)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) 	unsigned long end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) 	u32 wbc_reg, temp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) 	/* flush chipset */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) 	if (nvidia_private.wbc_mask) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) 		pci_read_config_dword(nvidia_private.dev_1, NVIDIA_1_WBC, &wbc_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) 		wbc_reg |= nvidia_private.wbc_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) 		pci_write_config_dword(nvidia_private.dev_1, NVIDIA_1_WBC, wbc_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) 		end = jiffies + 3*HZ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) 		do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) 			pci_read_config_dword(nvidia_private.dev_1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) 					NVIDIA_1_WBC, &wbc_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) 			if (time_before_eq(end, jiffies)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) 				printk(KERN_ERR PFX
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) 				    "TLB flush took more than 3 seconds.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) 		} while (wbc_reg & nvidia_private.wbc_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) 	/* flush TLB entries */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) 	for (i = 0; i < 32 + 1; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) 		temp = readl(nvidia_private.aperture+(i * PAGE_SIZE / sizeof(u32)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) 	for (i = 0; i < 32 + 1; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) 		temp = readl(nvidia_private.aperture+(i * PAGE_SIZE / sizeof(u32)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) static const struct aper_size_info_8 nvidia_generic_sizes[5] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) 	{512, 131072, 7, 0},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) 	{256, 65536, 6, 8},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) 	{128, 32768, 5, 12},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) 	{64, 16384, 4, 14},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) 	/* The 32M mode still requires a 64k gatt */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) 	{32, 16384, 4, 15}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) static const struct gatt_mask nvidia_generic_masks[] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) 	{ .mask = 1, .type = 0}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) static const struct agp_bridge_driver nvidia_driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) 	.owner			= THIS_MODULE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) 	.aperture_sizes		= nvidia_generic_sizes,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) 	.size_type		= U8_APER_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) 	.num_aperture_sizes	= 5,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) 	.needs_scratch_page	= true,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) 	.configure		= nvidia_configure,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) 	.fetch_size		= nvidia_fetch_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) 	.cleanup		= nvidia_cleanup,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) 	.tlb_flush		= nvidia_tlbflush,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) 	.mask_memory		= agp_generic_mask_memory,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) 	.masks			= nvidia_generic_masks,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) 	.agp_enable		= agp_generic_enable,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) 	.cache_flush		= global_cache_flush,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) 	.create_gatt_table	= agp_generic_create_gatt_table,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) 	.free_gatt_table	= agp_generic_free_gatt_table,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) 	.insert_memory		= nvidia_insert_memory,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) 	.remove_memory		= nvidia_remove_memory,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) 	.alloc_by_type		= agp_generic_alloc_by_type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) 	.free_by_type		= agp_generic_free_by_type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) 	.agp_alloc_page		= agp_generic_alloc_page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) 	.agp_alloc_pages	= agp_generic_alloc_pages,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) 	.agp_destroy_page	= agp_generic_destroy_page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) 	.agp_destroy_pages	= agp_generic_destroy_pages,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) 	.agp_type_to_mask_type  = agp_generic_type_to_mask_type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) static int agp_nvidia_probe(struct pci_dev *pdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) 			    const struct pci_device_id *ent)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) 	struct agp_bridge_data *bridge;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) 	u8 cap_ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) 	nvidia_private.dev_1 =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) 		pci_get_domain_bus_and_slot(pci_domain_nr(pdev->bus),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) 					    (unsigned int)pdev->bus->number,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) 					    PCI_DEVFN(0, 1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) 	nvidia_private.dev_2 =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) 		pci_get_domain_bus_and_slot(pci_domain_nr(pdev->bus),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) 					    (unsigned int)pdev->bus->number,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) 					    PCI_DEVFN(0, 2));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) 	nvidia_private.dev_3 =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) 		pci_get_domain_bus_and_slot(pci_domain_nr(pdev->bus),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) 					    (unsigned int)pdev->bus->number,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) 					    PCI_DEVFN(30, 0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) 	if (!nvidia_private.dev_1 || !nvidia_private.dev_2 || !nvidia_private.dev_3) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) 		printk(KERN_INFO PFX "Detected an NVIDIA nForce/nForce2 "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) 			"chipset, but could not find the secondary devices.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) 		return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) 	cap_ptr = pci_find_capability(pdev, PCI_CAP_ID_AGP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) 	if (!cap_ptr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) 		return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) 	switch (pdev->device) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) 	case PCI_DEVICE_ID_NVIDIA_NFORCE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) 		printk(KERN_INFO PFX "Detected NVIDIA nForce chipset\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) 		nvidia_private.wbc_mask = 0x00010000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) 	case PCI_DEVICE_ID_NVIDIA_NFORCE2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) 		printk(KERN_INFO PFX "Detected NVIDIA nForce2 chipset\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) 		nvidia_private.wbc_mask = 0x80000000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) 		printk(KERN_ERR PFX "Unsupported NVIDIA chipset (device id: %04x)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) 			    pdev->device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) 		return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) 	bridge = agp_alloc_bridge();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) 	if (!bridge)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) 	bridge->driver = &nvidia_driver;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) 	bridge->dev_private_data = &nvidia_private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) 	bridge->dev = pdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) 	bridge->capndx = cap_ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) 	/* Fill in the mode register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) 	pci_read_config_dword(pdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) 			bridge->capndx+PCI_AGP_STATUS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) 			&bridge->mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) 	pci_set_drvdata(pdev, bridge);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) 	return agp_add_bridge(bridge);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) static void agp_nvidia_remove(struct pci_dev *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) 	struct agp_bridge_data *bridge = pci_get_drvdata(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) 	agp_remove_bridge(bridge);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) 	agp_put_bridge(bridge);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) #ifdef CONFIG_PM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) static int agp_nvidia_suspend(struct pci_dev *pdev, pm_message_t state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) 	pci_save_state(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) 	pci_set_power_state(pdev, PCI_D3hot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) static int agp_nvidia_resume(struct pci_dev *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) 	/* set power state 0 and restore PCI space */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) 	pci_set_power_state(pdev, PCI_D0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) 	pci_restore_state(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) 	/* reconfigure AGP hardware again */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) 	nvidia_configure();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) static const struct pci_device_id agp_nvidia_pci_table[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) 	{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) 	.class		= (PCI_CLASS_BRIDGE_HOST << 8),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) 	.class_mask	= ~0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) 	.vendor		= PCI_VENDOR_ID_NVIDIA,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) 	.device		= PCI_DEVICE_ID_NVIDIA_NFORCE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) 	.subvendor	= PCI_ANY_ID,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) 	.subdevice	= PCI_ANY_ID,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) 	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) 	{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) 	.class		= (PCI_CLASS_BRIDGE_HOST << 8),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) 	.class_mask	= ~0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) 	.vendor		= PCI_VENDOR_ID_NVIDIA,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) 	.device		= PCI_DEVICE_ID_NVIDIA_NFORCE2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) 	.subvendor	= PCI_ANY_ID,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) 	.subdevice	= PCI_ANY_ID,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) 	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) 	{ }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) MODULE_DEVICE_TABLE(pci, agp_nvidia_pci_table);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) static struct pci_driver agp_nvidia_pci_driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) 	.name		= "agpgart-nvidia",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) 	.id_table	= agp_nvidia_pci_table,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) 	.probe		= agp_nvidia_probe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) 	.remove		= agp_nvidia_remove,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) #ifdef CONFIG_PM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) 	.suspend	= agp_nvidia_suspend,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) 	.resume		= agp_nvidia_resume,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) static int __init agp_nvidia_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) 	if (agp_off)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) 	return pci_register_driver(&agp_nvidia_pci_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) static void __exit agp_nvidia_cleanup(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) 	pci_unregister_driver(&agp_nvidia_pci_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) 	pci_dev_put(nvidia_private.dev_1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) 	pci_dev_put(nvidia_private.dev_2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) 	pci_dev_put(nvidia_private.dev_3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) module_init(agp_nvidia_init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) module_exit(agp_nvidia_cleanup);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) MODULE_LICENSE("GPL and additional rights");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) MODULE_AUTHOR("NVIDIA Corporation");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482)