^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) * AGPGART driver.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Copyright (C) 2004 Silicon Graphics, Inc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * Copyright (C) 2002-2005 Dave Jones.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Copyright (C) 1999 Jeff Hartmann.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * Copyright (C) 1999 Precision Insight, Inc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * Copyright (C) 1999 Xi Graphics, Inc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) * Permission is hereby granted, free of charge, to any person obtaining a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) * copy of this software and associated documentation files (the "Software"),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) * to deal in the Software without restriction, including without limitation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) * the rights to use, copy, modify, merge, publish, distribute, sublicense,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) * and/or sell copies of the Software, and to permit persons to whom the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) * Software is furnished to do so, subject to the following conditions:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) * The above copyright notice and this permission notice shall be included
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) * in all copies or substantial portions of the Software.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) * JEFF HARTMANN, OR ANY OTHER CONTRIBUTORS BE LIABLE FOR ANY CLAIM,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) * OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) * TODO:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) * - Allocate more than order 0 pages to avoid too much linear map splitting.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) #include <linux/pci.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) #include <linux/pagemap.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) #include <linux/miscdevice.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) #include <linux/pm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) #include <linux/agp_backend.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) #include <linux/vmalloc.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) #include <linux/dma-mapping.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) #include <linux/mm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) #include <linux/sched.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) #include <asm/io.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) #ifdef CONFIG_X86
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) #include <asm/set_memory.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) #include "agp.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) __u32 *agp_gatt_table;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) int agp_memory_reserved;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) * Needed by the Nforce GART driver for the time being. Would be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) * nice to do this some other way instead of needing this export.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) EXPORT_SYMBOL_GPL(agp_memory_reserved);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) * Generic routines for handling agp_memory structures -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) * They use the basic page allocation routines to do the brunt of the work.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) void agp_free_key(int key)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) if (key < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) if (key < MAXKEY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) clear_bit(key, agp_bridge->key_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) EXPORT_SYMBOL(agp_free_key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) static int agp_get_key(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) int bit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) bit = find_first_zero_bit(agp_bridge->key_list, MAXKEY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) if (bit < MAXKEY) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) set_bit(bit, agp_bridge->key_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) return bit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) * Use kmalloc if possible for the page list. Otherwise fall back to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) * vmalloc. This speeds things up and also saves memory for small AGP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) * regions.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) void agp_alloc_page_array(size_t size, struct agp_memory *mem)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) mem->pages = kvmalloc(size, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) EXPORT_SYMBOL(agp_alloc_page_array);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) static struct agp_memory *agp_create_user_memory(unsigned long num_agp_pages)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) struct agp_memory *new;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) unsigned long alloc_size = num_agp_pages*sizeof(struct page *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) if (INT_MAX/sizeof(struct page *) < num_agp_pages)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) new = kzalloc(sizeof(struct agp_memory), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) if (new == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) new->key = agp_get_key();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) if (new->key < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) kfree(new);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) agp_alloc_page_array(alloc_size, new);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) if (new->pages == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) agp_free_key(new->key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) kfree(new);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) new->num_scratch_pages = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) return new;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) struct agp_memory *agp_create_memory(int scratch_pages)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) struct agp_memory *new;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) new = kzalloc(sizeof(struct agp_memory), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) if (new == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) new->key = agp_get_key();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) if (new->key < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) kfree(new);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) agp_alloc_page_array(PAGE_SIZE * scratch_pages, new);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) if (new->pages == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) agp_free_key(new->key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) kfree(new);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) new->num_scratch_pages = scratch_pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) new->type = AGP_NORMAL_MEMORY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) return new;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) EXPORT_SYMBOL(agp_create_memory);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) * agp_free_memory - free memory associated with an agp_memory pointer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) * @curr: agp_memory pointer to be freed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) * It is the only function that can be called when the backend is not owned
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) * by the caller. (So it can free memory on client death.)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) void agp_free_memory(struct agp_memory *curr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) size_t i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) if (curr == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) if (curr->is_bound)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) agp_unbind_memory(curr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) if (curr->type >= AGP_USER_TYPES) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) agp_generic_free_by_type(curr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) if (curr->type != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) curr->bridge->driver->free_by_type(curr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) if (curr->page_count != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) if (curr->bridge->driver->agp_destroy_pages) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) curr->bridge->driver->agp_destroy_pages(curr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) for (i = 0; i < curr->page_count; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) curr->bridge->driver->agp_destroy_page(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) curr->pages[i],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) AGP_PAGE_DESTROY_UNMAP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) for (i = 0; i < curr->page_count; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) curr->bridge->driver->agp_destroy_page(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) curr->pages[i],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) AGP_PAGE_DESTROY_FREE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) agp_free_key(curr->key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) agp_free_page_array(curr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) kfree(curr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) EXPORT_SYMBOL(agp_free_memory);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) #define ENTRIES_PER_PAGE (PAGE_SIZE / sizeof(unsigned long))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) * agp_allocate_memory - allocate a group of pages of a certain type.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) * @bridge: an agp_bridge_data struct allocated for the AGP host bridge.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) * @page_count: size_t argument of the number of pages
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) * @type: u32 argument of the type of memory to be allocated.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) * Every agp bridge device will allow you to allocate AGP_NORMAL_MEMORY which
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) * maps to physical ram. Any other type is device dependent.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) * It returns NULL whenever memory is unavailable.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) struct agp_memory *agp_allocate_memory(struct agp_bridge_data *bridge,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) size_t page_count, u32 type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) int scratch_pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) struct agp_memory *new;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) size_t i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) int cur_memory;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) if (!bridge)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) cur_memory = atomic_read(&bridge->current_memory_agp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) if ((cur_memory + page_count > bridge->max_memory_agp) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) (cur_memory + page_count < page_count))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) if (type >= AGP_USER_TYPES) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) new = agp_generic_alloc_user(page_count, type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) if (new)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) new->bridge = bridge;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) return new;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) if (type != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) new = bridge->driver->alloc_by_type(page_count, type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) if (new)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) new->bridge = bridge;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) return new;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) scratch_pages = (page_count + ENTRIES_PER_PAGE - 1) / ENTRIES_PER_PAGE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) new = agp_create_memory(scratch_pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) if (new == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) if (bridge->driver->agp_alloc_pages) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) if (bridge->driver->agp_alloc_pages(bridge, new, page_count)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) agp_free_memory(new);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) new->bridge = bridge;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) return new;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) for (i = 0; i < page_count; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) struct page *page = bridge->driver->agp_alloc_page(bridge);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) if (page == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) agp_free_memory(new);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) new->pages[i] = page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) new->page_count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) new->bridge = bridge;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) return new;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) EXPORT_SYMBOL(agp_allocate_memory);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) /* End - Generic routines for handling agp_memory structures */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) static int agp_return_size(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) int current_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) void *temp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) temp = agp_bridge->current_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) switch (agp_bridge->driver->size_type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) case U8_APER_SIZE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) current_size = A_SIZE_8(temp)->size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) case U16_APER_SIZE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) current_size = A_SIZE_16(temp)->size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) case U32_APER_SIZE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) current_size = A_SIZE_32(temp)->size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) case LVL2_APER_SIZE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) current_size = A_SIZE_LVL2(temp)->size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) case FIXED_APER_SIZE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) current_size = A_SIZE_FIX(temp)->size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) current_size = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) current_size -= (agp_memory_reserved / (1024*1024));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) if (current_size <0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) current_size = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) return current_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) int agp_num_entries(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) int num_entries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) void *temp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) temp = agp_bridge->current_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) switch (agp_bridge->driver->size_type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) case U8_APER_SIZE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) num_entries = A_SIZE_8(temp)->num_entries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) case U16_APER_SIZE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) num_entries = A_SIZE_16(temp)->num_entries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) case U32_APER_SIZE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) num_entries = A_SIZE_32(temp)->num_entries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) case LVL2_APER_SIZE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) num_entries = A_SIZE_LVL2(temp)->num_entries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) case FIXED_APER_SIZE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) num_entries = A_SIZE_FIX(temp)->num_entries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) num_entries = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) num_entries -= agp_memory_reserved>>PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) if (num_entries<0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) num_entries = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) return num_entries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) EXPORT_SYMBOL_GPL(agp_num_entries);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) * agp_copy_info - copy bridge state information
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) * @bridge: an agp_bridge_data struct allocated for the AGP host bridge.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) * @info: agp_kern_info pointer. The caller should insure that this pointer is valid.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) * This function copies information about the agp bridge device and the state of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) * the agp backend into an agp_kern_info pointer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) int agp_copy_info(struct agp_bridge_data *bridge, struct agp_kern_info *info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) memset(info, 0, sizeof(struct agp_kern_info));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) if (!bridge) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) info->chipset = NOT_SUPPORTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) info->version.major = bridge->version->major;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) info->version.minor = bridge->version->minor;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) info->chipset = SUPPORTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) info->device = bridge->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) if (bridge->mode & AGPSTAT_MODE_3_0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) info->mode = bridge->mode & ~AGP3_RESERVED_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) info->mode = bridge->mode & ~AGP2_RESERVED_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) info->aper_base = bridge->gart_bus_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) info->aper_size = agp_return_size();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) info->max_memory = bridge->max_memory_agp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) info->current_memory = atomic_read(&bridge->current_memory_agp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) info->cant_use_aperture = bridge->driver->cant_use_aperture;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) info->vm_ops = bridge->vm_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) info->page_mask = ~0UL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) EXPORT_SYMBOL(agp_copy_info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) /* End - Routine to copy over information structure */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) * Routines for handling swapping of agp_memory into the GATT -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) * These routines take agp_memory and insert them into the GATT.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) * They call device specific routines to actually write to the GATT.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) * agp_bind_memory - Bind an agp_memory structure into the GATT.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) * @curr: agp_memory pointer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) * @pg_start: an offset into the graphics aperture translation table
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) * It returns -EINVAL if the pointer == NULL.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) * It returns -EBUSY if the area of the table requested is already in use.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) int agp_bind_memory(struct agp_memory *curr, off_t pg_start)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) int ret_val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) if (curr == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) if (curr->is_bound) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) printk(KERN_INFO PFX "memory %p is already bound!\n", curr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) if (!curr->is_flushed) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) curr->bridge->driver->cache_flush();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) curr->is_flushed = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) ret_val = curr->bridge->driver->insert_memory(curr, pg_start, curr->type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) if (ret_val != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) return ret_val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) curr->is_bound = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) curr->pg_start = pg_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) spin_lock(&agp_bridge->mapped_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) list_add(&curr->mapped_list, &agp_bridge->mapped_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) spin_unlock(&agp_bridge->mapped_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) EXPORT_SYMBOL(agp_bind_memory);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) * agp_unbind_memory - Removes an agp_memory structure from the GATT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) * @curr: agp_memory pointer to be removed from the GATT.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) * It returns -EINVAL if this piece of agp_memory is not currently bound to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) * the graphics aperture translation table or if the agp_memory pointer == NULL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) int agp_unbind_memory(struct agp_memory *curr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) int ret_val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) if (curr == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) if (!curr->is_bound) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) printk(KERN_INFO PFX "memory %p was not bound!\n", curr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) ret_val = curr->bridge->driver->remove_memory(curr, curr->pg_start, curr->type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) if (ret_val != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) return ret_val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) curr->is_bound = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) curr->pg_start = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) spin_lock(&curr->bridge->mapped_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) list_del(&curr->mapped_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) spin_unlock(&curr->bridge->mapped_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) EXPORT_SYMBOL(agp_unbind_memory);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) /* End - Routines for handling swapping of agp_memory into the GATT */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) /* Generic Agp routines - Start */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) static void agp_v2_parse_one(u32 *requested_mode, u32 *bridge_agpstat, u32 *vga_agpstat)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) u32 tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) if (*requested_mode & AGP2_RESERVED_MASK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) printk(KERN_INFO PFX "reserved bits set (%x) in mode 0x%x. Fixed.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) *requested_mode & AGP2_RESERVED_MASK, *requested_mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) *requested_mode &= ~AGP2_RESERVED_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) * Some dumb bridges are programmed to disobey the AGP2 spec.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) * This is likely a BIOS misprogramming rather than poweron default, or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) * it would be a lot more common.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) * https://bugs.freedesktop.org/show_bug.cgi?id=8816
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) * AGPv2 spec 6.1.9 states:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) * The RATE field indicates the data transfer rates supported by this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) * device. A.G.P. devices must report all that apply.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) * Fix them up as best we can.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) switch (*bridge_agpstat & 7) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) case 4:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) *bridge_agpstat |= (AGPSTAT2_2X | AGPSTAT2_1X);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) printk(KERN_INFO PFX "BIOS bug. AGP bridge claims to only support x4 rate. "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) "Fixing up support for x2 & x1\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) case 2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) *bridge_agpstat |= AGPSTAT2_1X;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) printk(KERN_INFO PFX "BIOS bug. AGP bridge claims to only support x2 rate. "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) "Fixing up support for x1\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) /* Check the speed bits make sense. Only one should be set. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) tmp = *requested_mode & 7;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) switch (tmp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) case 0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) printk(KERN_INFO PFX "%s tried to set rate=x0. Setting to x1 mode.\n", current->comm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) *requested_mode |= AGPSTAT2_1X;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) case 1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) case 2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) case 3:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) *requested_mode &= ~(AGPSTAT2_1X); /* rate=2 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) case 4:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) case 5:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) case 6:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) case 7:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) *requested_mode &= ~(AGPSTAT2_1X|AGPSTAT2_2X); /* rate=4*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) /* disable SBA if it's not supported */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) if (!((*bridge_agpstat & AGPSTAT_SBA) && (*vga_agpstat & AGPSTAT_SBA) && (*requested_mode & AGPSTAT_SBA)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) *bridge_agpstat &= ~AGPSTAT_SBA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) /* Set rate */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) if (!((*bridge_agpstat & AGPSTAT2_4X) && (*vga_agpstat & AGPSTAT2_4X) && (*requested_mode & AGPSTAT2_4X)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) *bridge_agpstat &= ~AGPSTAT2_4X;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) if (!((*bridge_agpstat & AGPSTAT2_2X) && (*vga_agpstat & AGPSTAT2_2X) && (*requested_mode & AGPSTAT2_2X)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) *bridge_agpstat &= ~AGPSTAT2_2X;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) if (!((*bridge_agpstat & AGPSTAT2_1X) && (*vga_agpstat & AGPSTAT2_1X) && (*requested_mode & AGPSTAT2_1X)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) *bridge_agpstat &= ~AGPSTAT2_1X;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) /* Now we know what mode it should be, clear out the unwanted bits. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) if (*bridge_agpstat & AGPSTAT2_4X)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) *bridge_agpstat &= ~(AGPSTAT2_1X | AGPSTAT2_2X); /* 4X */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) if (*bridge_agpstat & AGPSTAT2_2X)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) *bridge_agpstat &= ~(AGPSTAT2_1X | AGPSTAT2_4X); /* 2X */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) if (*bridge_agpstat & AGPSTAT2_1X)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) *bridge_agpstat &= ~(AGPSTAT2_2X | AGPSTAT2_4X); /* 1X */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) /* Apply any errata. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) if (agp_bridge->flags & AGP_ERRATA_FASTWRITES)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) *bridge_agpstat &= ~AGPSTAT_FW;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) if (agp_bridge->flags & AGP_ERRATA_SBA)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) *bridge_agpstat &= ~AGPSTAT_SBA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) if (agp_bridge->flags & AGP_ERRATA_1X) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) *bridge_agpstat &= ~(AGPSTAT2_2X | AGPSTAT2_4X);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) *bridge_agpstat |= AGPSTAT2_1X;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) /* If we've dropped down to 1X, disable fast writes. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) if (*bridge_agpstat & AGPSTAT2_1X)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) *bridge_agpstat &= ~AGPSTAT_FW;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) * requested_mode = Mode requested by (typically) X.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) * bridge_agpstat = PCI_AGP_STATUS from agp bridge.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) * vga_agpstat = PCI_AGP_STATUS from graphic card.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) static void agp_v3_parse_one(u32 *requested_mode, u32 *bridge_agpstat, u32 *vga_agpstat)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) u32 origbridge=*bridge_agpstat, origvga=*vga_agpstat;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) u32 tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) if (*requested_mode & AGP3_RESERVED_MASK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) printk(KERN_INFO PFX "reserved bits set (%x) in mode 0x%x. Fixed.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) *requested_mode & AGP3_RESERVED_MASK, *requested_mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) *requested_mode &= ~AGP3_RESERVED_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) /* Check the speed bits make sense. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) tmp = *requested_mode & 7;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) if (tmp == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) printk(KERN_INFO PFX "%s tried to set rate=x0. Setting to AGP3 x4 mode.\n", current->comm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) *requested_mode |= AGPSTAT3_4X;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) if (tmp >= 3) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) printk(KERN_INFO PFX "%s tried to set rate=x%d. Setting to AGP3 x8 mode.\n", current->comm, tmp * 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) *requested_mode = (*requested_mode & ~7) | AGPSTAT3_8X;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) /* ARQSZ - Set the value to the maximum one.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) * Don't allow the mode register to override values. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) *bridge_agpstat = ((*bridge_agpstat & ~AGPSTAT_ARQSZ) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) max_t(u32,(*bridge_agpstat & AGPSTAT_ARQSZ),(*vga_agpstat & AGPSTAT_ARQSZ)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) /* Calibration cycle.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) * Don't allow the mode register to override values. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) *bridge_agpstat = ((*bridge_agpstat & ~AGPSTAT_CAL_MASK) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) min_t(u32,(*bridge_agpstat & AGPSTAT_CAL_MASK),(*vga_agpstat & AGPSTAT_CAL_MASK)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) /* SBA *must* be supported for AGP v3 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) *bridge_agpstat |= AGPSTAT_SBA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) * Set speed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) * Check for invalid speeds. This can happen when applications
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) * written before the AGP 3.0 standard pass AGP2.x modes to AGP3 hardware
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) if (*requested_mode & AGPSTAT_MODE_3_0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) * Caller hasn't a clue what it is doing. Bridge is in 3.0 mode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) * have been passed a 3.0 mode, but with 2.x speed bits set.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) * AGP2.x 4x -> AGP3.0 4x.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) if (*requested_mode & AGPSTAT2_4X) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) printk(KERN_INFO PFX "%s passes broken AGP3 flags (%x). Fixed.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) current->comm, *requested_mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) *requested_mode &= ~AGPSTAT2_4X;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) *requested_mode |= AGPSTAT3_4X;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) * The caller doesn't know what they are doing. We are in 3.0 mode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) * but have been passed an AGP 2.x mode.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) * Convert AGP 1x,2x,4x -> AGP 3.0 4x.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) printk(KERN_INFO PFX "%s passes broken AGP2 flags (%x) in AGP3 mode. Fixed.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) current->comm, *requested_mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) *requested_mode &= ~(AGPSTAT2_4X | AGPSTAT2_2X | AGPSTAT2_1X);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) *requested_mode |= AGPSTAT3_4X;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) if (*requested_mode & AGPSTAT3_8X) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) if (!(*bridge_agpstat & AGPSTAT3_8X)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) *bridge_agpstat &= ~(AGPSTAT3_8X | AGPSTAT3_RSVD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) *bridge_agpstat |= AGPSTAT3_4X;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) printk(KERN_INFO PFX "%s requested AGPx8 but bridge not capable.\n", current->comm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) if (!(*vga_agpstat & AGPSTAT3_8X)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) *bridge_agpstat &= ~(AGPSTAT3_8X | AGPSTAT3_RSVD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) *bridge_agpstat |= AGPSTAT3_4X;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) printk(KERN_INFO PFX "%s requested AGPx8 but graphic card not capable.\n", current->comm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) /* All set, bridge & device can do AGP x8*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) *bridge_agpstat &= ~(AGPSTAT3_4X | AGPSTAT3_RSVD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) } else if (*requested_mode & AGPSTAT3_4X) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) *bridge_agpstat &= ~(AGPSTAT3_8X | AGPSTAT3_RSVD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) *bridge_agpstat |= AGPSTAT3_4X;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) * If we didn't specify an AGP mode, we see if both
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) * the graphics card, and the bridge can do x8, and use if so.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) * If not, we fall back to x4 mode.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) if ((*bridge_agpstat & AGPSTAT3_8X) && (*vga_agpstat & AGPSTAT3_8X)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) printk(KERN_INFO PFX "No AGP mode specified. Setting to highest mode "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) "supported by bridge & card (x8).\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) *bridge_agpstat &= ~(AGPSTAT3_4X | AGPSTAT3_RSVD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) *vga_agpstat &= ~(AGPSTAT3_4X | AGPSTAT3_RSVD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) printk(KERN_INFO PFX "Fell back to AGPx4 mode because ");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) if (!(*bridge_agpstat & AGPSTAT3_8X)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) printk(KERN_INFO PFX "bridge couldn't do x8. bridge_agpstat:%x (orig=%x)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) *bridge_agpstat, origbridge);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) *bridge_agpstat &= ~(AGPSTAT3_8X | AGPSTAT3_RSVD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) *bridge_agpstat |= AGPSTAT3_4X;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) if (!(*vga_agpstat & AGPSTAT3_8X)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) printk(KERN_INFO PFX "graphics card couldn't do x8. vga_agpstat:%x (orig=%x)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) *vga_agpstat, origvga);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) *vga_agpstat &= ~(AGPSTAT3_8X | AGPSTAT3_RSVD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) *vga_agpstat |= AGPSTAT3_4X;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) done:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) /* Apply any errata. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) if (agp_bridge->flags & AGP_ERRATA_FASTWRITES)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) *bridge_agpstat &= ~AGPSTAT_FW;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) if (agp_bridge->flags & AGP_ERRATA_SBA)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) *bridge_agpstat &= ~AGPSTAT_SBA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) if (agp_bridge->flags & AGP_ERRATA_1X) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) *bridge_agpstat &= ~(AGPSTAT2_2X | AGPSTAT2_4X);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) *bridge_agpstat |= AGPSTAT2_1X;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) * agp_collect_device_status - determine correct agp_cmd from various agp_stat's
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) * @bridge: an agp_bridge_data struct allocated for the AGP host bridge.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) * @requested_mode: requested agp_stat from userspace (Typically from X)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) * @bridge_agpstat: current agp_stat from AGP bridge.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) * This function will hunt for an AGP graphics card, and try to match
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) * the requested mode to the capabilities of both the bridge and the card.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) u32 agp_collect_device_status(struct agp_bridge_data *bridge, u32 requested_mode, u32 bridge_agpstat)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) struct pci_dev *device = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) u32 vga_agpstat;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) u8 cap_ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) for (;;) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) device = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) if (!device) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) printk(KERN_INFO PFX "Couldn't find an AGP VGA controller.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) cap_ptr = pci_find_capability(device, PCI_CAP_ID_AGP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) if (cap_ptr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) * Ok, here we have a AGP device. Disable impossible
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) * settings, and adjust the readqueue to the minimum.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) pci_read_config_dword(device, cap_ptr+PCI_AGP_STATUS, &vga_agpstat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) /* adjust RQ depth */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) bridge_agpstat = ((bridge_agpstat & ~AGPSTAT_RQ_DEPTH) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) min_t(u32, (requested_mode & AGPSTAT_RQ_DEPTH),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) min_t(u32, (bridge_agpstat & AGPSTAT_RQ_DEPTH), (vga_agpstat & AGPSTAT_RQ_DEPTH))));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) /* disable FW if it's not supported */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) if (!((bridge_agpstat & AGPSTAT_FW) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) (vga_agpstat & AGPSTAT_FW) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) (requested_mode & AGPSTAT_FW)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) bridge_agpstat &= ~AGPSTAT_FW;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) /* Check to see if we are operating in 3.0 mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) if (agp_bridge->mode & AGPSTAT_MODE_3_0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) agp_v3_parse_one(&requested_mode, &bridge_agpstat, &vga_agpstat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) agp_v2_parse_one(&requested_mode, &bridge_agpstat, &vga_agpstat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) pci_dev_put(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) return bridge_agpstat;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) EXPORT_SYMBOL(agp_collect_device_status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) void agp_device_command(u32 bridge_agpstat, bool agp_v3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) struct pci_dev *device = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) int mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) mode = bridge_agpstat & 0x7;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) if (agp_v3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) mode *= 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) for_each_pci_dev(device) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) u8 agp = pci_find_capability(device, PCI_CAP_ID_AGP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) if (!agp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) dev_info(&device->dev, "putting AGP V%d device into %dx mode\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) agp_v3 ? 3 : 2, mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) pci_write_config_dword(device, agp + PCI_AGP_COMMAND, bridge_agpstat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) EXPORT_SYMBOL(agp_device_command);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) void get_agp_version(struct agp_bridge_data *bridge)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) u32 ncapid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) /* Exit early if already set by errata workarounds. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) if (bridge->major_version != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) pci_read_config_dword(bridge->dev, bridge->capndx, &ncapid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) bridge->major_version = (ncapid >> AGP_MAJOR_VERSION_SHIFT) & 0xf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) bridge->minor_version = (ncapid >> AGP_MINOR_VERSION_SHIFT) & 0xf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) EXPORT_SYMBOL(get_agp_version);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) void agp_generic_enable(struct agp_bridge_data *bridge, u32 requested_mode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) u32 bridge_agpstat, temp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) get_agp_version(agp_bridge);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) dev_info(&agp_bridge->dev->dev, "AGP %d.%d bridge\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) agp_bridge->major_version, agp_bridge->minor_version);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) pci_read_config_dword(agp_bridge->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) agp_bridge->capndx + PCI_AGP_STATUS, &bridge_agpstat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) bridge_agpstat = agp_collect_device_status(agp_bridge, requested_mode, bridge_agpstat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) if (bridge_agpstat == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) /* Something bad happened. FIXME: Return error code? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) bridge_agpstat |= AGPSTAT_AGP_ENABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) /* Do AGP version specific frobbing. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) if (bridge->major_version >= 3) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) if (bridge->mode & AGPSTAT_MODE_3_0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) /* If we have 3.5, we can do the isoch stuff. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) if (bridge->minor_version >= 5)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) agp_3_5_enable(bridge);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) agp_device_command(bridge_agpstat, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) /* Disable calibration cycle in RX91<1> when not in AGP3.0 mode of operation.*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) bridge_agpstat &= ~(7<<10) ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) pci_read_config_dword(bridge->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) bridge->capndx+AGPCTRL, &temp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) temp |= (1<<9);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) pci_write_config_dword(bridge->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) bridge->capndx+AGPCTRL, temp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) dev_info(&bridge->dev->dev, "bridge is in legacy mode, falling back to 2.x\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) /* AGP v<3 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) agp_device_command(bridge_agpstat, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) EXPORT_SYMBOL(agp_generic_enable);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) int agp_generic_create_gatt_table(struct agp_bridge_data *bridge)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) char *table;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) char *table_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) int page_order;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) int num_entries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) void *temp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) struct page *page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) /* The generic routines can't handle 2 level gatt's */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) if (bridge->driver->size_type == LVL2_APER_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) table = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) i = bridge->aperture_size_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) temp = bridge->current_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) page_order = num_entries = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) if (bridge->driver->size_type != FIXED_APER_SIZE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) switch (bridge->driver->size_type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) case U8_APER_SIZE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) page_order =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) A_SIZE_8(temp)->page_order;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) num_entries =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) A_SIZE_8(temp)->num_entries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) case U16_APER_SIZE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) page_order = A_SIZE_16(temp)->page_order;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) num_entries = A_SIZE_16(temp)->num_entries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) case U32_APER_SIZE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) page_order = A_SIZE_32(temp)->page_order;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) num_entries = A_SIZE_32(temp)->num_entries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) /* This case will never really happen. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) case FIXED_APER_SIZE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) case LVL2_APER_SIZE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) page_order = num_entries = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) table = alloc_gatt_pages(page_order);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) if (table == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) i++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) switch (bridge->driver->size_type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) case U8_APER_SIZE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) bridge->current_size = A_IDX8(bridge);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) case U16_APER_SIZE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) bridge->current_size = A_IDX16(bridge);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) case U32_APER_SIZE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) bridge->current_size = A_IDX32(bridge);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) /* These cases will never really happen. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) case FIXED_APER_SIZE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) case LVL2_APER_SIZE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) temp = bridge->current_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) bridge->aperture_size_idx = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) } while (!table && (i < bridge->driver->num_aperture_sizes));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) page_order = ((struct aper_size_info_fixed *) temp)->page_order;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) num_entries = ((struct aper_size_info_fixed *) temp)->num_entries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) table = alloc_gatt_pages(page_order);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) if (table == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) table_end = table + ((PAGE_SIZE * (1 << page_order)) - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) for (page = virt_to_page(table); page <= virt_to_page(table_end); page++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) SetPageReserved(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) bridge->gatt_table_real = (u32 *) table;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) agp_gatt_table = (void *)table;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) bridge->driver->cache_flush();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) #ifdef CONFIG_X86
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) if (set_memory_uc((unsigned long)table, 1 << page_order))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) printk(KERN_WARNING "Could not set GATT table memory to UC!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) bridge->gatt_table = (u32 __iomem *)table;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) bridge->gatt_table = ioremap(virt_to_phys(table),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) (PAGE_SIZE * (1 << page_order)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) bridge->driver->cache_flush();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) if (bridge->gatt_table == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) for (page = virt_to_page(table); page <= virt_to_page(table_end); page++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) ClearPageReserved(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) free_gatt_pages(table, page_order);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) bridge->gatt_bus_addr = virt_to_phys(bridge->gatt_table_real);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) /* AK: bogus, should encode addresses > 4GB */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) for (i = 0; i < num_entries; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) writel(bridge->scratch_page, bridge->gatt_table+i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) readl(bridge->gatt_table+i); /* PCI Posting. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) EXPORT_SYMBOL(agp_generic_create_gatt_table);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) int agp_generic_free_gatt_table(struct agp_bridge_data *bridge)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) int page_order;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) char *table, *table_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) void *temp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) struct page *page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) temp = bridge->current_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) switch (bridge->driver->size_type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) case U8_APER_SIZE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) page_order = A_SIZE_8(temp)->page_order;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) case U16_APER_SIZE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) page_order = A_SIZE_16(temp)->page_order;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) case U32_APER_SIZE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) page_order = A_SIZE_32(temp)->page_order;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) case FIXED_APER_SIZE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) page_order = A_SIZE_FIX(temp)->page_order;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) case LVL2_APER_SIZE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) /* The generic routines can't deal with 2 level gatt's */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) page_order = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) /* Do not worry about freeing memory, because if this is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) * called, then all agp memory is deallocated and removed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) * from the table. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) #ifdef CONFIG_X86
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) set_memory_wb((unsigned long)bridge->gatt_table, 1 << page_order);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) iounmap(bridge->gatt_table);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) table = (char *) bridge->gatt_table_real;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) table_end = table + ((PAGE_SIZE * (1 << page_order)) - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) for (page = virt_to_page(table); page <= virt_to_page(table_end); page++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) ClearPageReserved(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) free_gatt_pages(bridge->gatt_table_real, page_order);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) agp_gatt_table = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) bridge->gatt_table = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) bridge->gatt_table_real = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) bridge->gatt_bus_addr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) EXPORT_SYMBOL(agp_generic_free_gatt_table);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) int agp_generic_insert_memory(struct agp_memory * mem, off_t pg_start, int type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) int num_entries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) size_t i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) off_t j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) void *temp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) struct agp_bridge_data *bridge;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) int mask_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) bridge = mem->bridge;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) if (!bridge)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) if (mem->page_count == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) temp = bridge->current_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) switch (bridge->driver->size_type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) case U8_APER_SIZE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) num_entries = A_SIZE_8(temp)->num_entries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) case U16_APER_SIZE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) num_entries = A_SIZE_16(temp)->num_entries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) case U32_APER_SIZE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) num_entries = A_SIZE_32(temp)->num_entries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) case FIXED_APER_SIZE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) num_entries = A_SIZE_FIX(temp)->num_entries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) case LVL2_APER_SIZE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) /* The generic routines can't deal with 2 level gatt's */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) num_entries = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) num_entries -= agp_memory_reserved/PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) if (num_entries < 0) num_entries = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) if (type != mem->type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) mask_type = bridge->driver->agp_type_to_mask_type(bridge, type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) if (mask_type != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) /* The generic routines know nothing of memory types */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) if (((pg_start + mem->page_count) > num_entries) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) ((pg_start + mem->page_count) < pg_start))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) j = pg_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) while (j < (pg_start + mem->page_count)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) if (!PGE_EMPTY(bridge, readl(bridge->gatt_table+j)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) j++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) if (!mem->is_flushed) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) bridge->driver->cache_flush();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) mem->is_flushed = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) writel(bridge->driver->mask_memory(bridge,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) page_to_phys(mem->pages[i]),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) mask_type),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) bridge->gatt_table+j);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) readl(bridge->gatt_table+j-1); /* PCI Posting. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) bridge->driver->tlb_flush(mem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) EXPORT_SYMBOL(agp_generic_insert_memory);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) int agp_generic_remove_memory(struct agp_memory *mem, off_t pg_start, int type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) size_t i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) struct agp_bridge_data *bridge;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) int mask_type, num_entries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) bridge = mem->bridge;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) if (!bridge)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) if (mem->page_count == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) if (type != mem->type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) num_entries = agp_num_entries();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) if (((pg_start + mem->page_count) > num_entries) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) ((pg_start + mem->page_count) < pg_start))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) mask_type = bridge->driver->agp_type_to_mask_type(bridge, type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) if (mask_type != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) /* The generic routines know nothing of memory types */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) /* AK: bogus, should encode addresses > 4GB */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) for (i = pg_start; i < (mem->page_count + pg_start); i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) writel(bridge->scratch_page, bridge->gatt_table+i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) readl(bridge->gatt_table+i-1); /* PCI Posting. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) bridge->driver->tlb_flush(mem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) EXPORT_SYMBOL(agp_generic_remove_memory);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) struct agp_memory *agp_generic_alloc_by_type(size_t page_count, int type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) EXPORT_SYMBOL(agp_generic_alloc_by_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) void agp_generic_free_by_type(struct agp_memory *curr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) agp_free_page_array(curr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) agp_free_key(curr->key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) kfree(curr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) EXPORT_SYMBOL(agp_generic_free_by_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) struct agp_memory *agp_generic_alloc_user(size_t page_count, int type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) struct agp_memory *new;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) int pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) pages = (page_count + ENTRIES_PER_PAGE - 1) / ENTRIES_PER_PAGE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) new = agp_create_user_memory(page_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) if (new == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) for (i = 0; i < page_count; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) new->pages[i] = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) new->page_count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) new->type = type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) new->num_scratch_pages = pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) return new;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) EXPORT_SYMBOL(agp_generic_alloc_user);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) * Basic Page Allocation Routines -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) * These routines handle page allocation and by default they reserve the allocated
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) * memory. They also handle incrementing the current_memory_agp value, Which is checked
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) * against a maximum value.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) int agp_generic_alloc_pages(struct agp_bridge_data *bridge, struct agp_memory *mem, size_t num_pages)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) struct page * page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) int i, ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) for (i = 0; i < num_pages; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) page = alloc_page(GFP_KERNEL | GFP_DMA32 | __GFP_ZERO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) /* agp_free_memory() needs gart address */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) if (page == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) #ifndef CONFIG_X86
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) map_page_into_agp(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) get_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) atomic_inc(&agp_bridge->current_memory_agp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) mem->pages[i] = page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) mem->page_count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) #ifdef CONFIG_X86
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) set_pages_array_uc(mem->pages, num_pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) EXPORT_SYMBOL(agp_generic_alloc_pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) struct page *agp_generic_alloc_page(struct agp_bridge_data *bridge)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) struct page * page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) page = alloc_page(GFP_KERNEL | GFP_DMA32 | __GFP_ZERO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) if (page == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) map_page_into_agp(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) get_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) atomic_inc(&agp_bridge->current_memory_agp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) return page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) EXPORT_SYMBOL(agp_generic_alloc_page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) void agp_generic_destroy_pages(struct agp_memory *mem)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) struct page *page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) if (!mem)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) #ifdef CONFIG_X86
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) set_pages_array_wb(mem->pages, mem->page_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) for (i = 0; i < mem->page_count; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) page = mem->pages[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) #ifndef CONFIG_X86
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) unmap_page_from_agp(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) put_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) __free_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) atomic_dec(&agp_bridge->current_memory_agp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) mem->pages[i] = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) EXPORT_SYMBOL(agp_generic_destroy_pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) void agp_generic_destroy_page(struct page *page, int flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) if (page == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) if (flags & AGP_PAGE_DESTROY_UNMAP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) unmap_page_from_agp(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) if (flags & AGP_PAGE_DESTROY_FREE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) put_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) __free_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) atomic_dec(&agp_bridge->current_memory_agp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) EXPORT_SYMBOL(agp_generic_destroy_page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) /* End Basic Page Allocation Routines */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) * agp_enable - initialise the agp point-to-point connection.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) * @bridge: an agp_bridge_data struct allocated for the AGP host bridge.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) * @mode: agp mode register value to configure with.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) void agp_enable(struct agp_bridge_data *bridge, u32 mode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) if (!bridge)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) bridge->driver->agp_enable(bridge, mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) EXPORT_SYMBOL(agp_enable);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) /* When we remove the global variable agp_bridge from all drivers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) * then agp_alloc_bridge and agp_generic_find_bridge need to be updated
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) struct agp_bridge_data *agp_generic_find_bridge(struct pci_dev *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) if (list_empty(&agp_bridges))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) return agp_bridge;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) static void ipi_handler(void *null)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) flush_agp_cache();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) void global_cache_flush(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) on_each_cpu(ipi_handler, NULL, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) EXPORT_SYMBOL(global_cache_flush);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) unsigned long agp_generic_mask_memory(struct agp_bridge_data *bridge,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) dma_addr_t addr, int type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) /* memory type is ignored in the generic routine */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) if (bridge->driver->masks)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) return addr | bridge->driver->masks[0].mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) return addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) EXPORT_SYMBOL(agp_generic_mask_memory);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) int agp_generic_type_to_mask_type(struct agp_bridge_data *bridge,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) int type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) if (type >= AGP_USER_TYPES)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) return type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) EXPORT_SYMBOL(agp_generic_type_to_mask_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) * These functions are implemented according to the AGPv3 spec,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) * which covers implementation details that had previously been
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) * left open.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) int agp3_generic_fetch_size(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) u16 temp_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) struct aper_size_info_16 *values;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) pci_read_config_word(agp_bridge->dev, agp_bridge->capndx+AGPAPSIZE, &temp_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) values = A_SIZE_16(agp_bridge->driver->aperture_sizes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) for (i = 0; i < agp_bridge->driver->num_aperture_sizes; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) if (temp_size == values[i].size_value) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) agp_bridge->previous_size =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) agp_bridge->current_size = (void *) (values + i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) agp_bridge->aperture_size_idx = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) return values[i].size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) EXPORT_SYMBOL(agp3_generic_fetch_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) void agp3_generic_tlbflush(struct agp_memory *mem)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) u32 ctrl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) pci_read_config_dword(agp_bridge->dev, agp_bridge->capndx+AGPCTRL, &ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) pci_write_config_dword(agp_bridge->dev, agp_bridge->capndx+AGPCTRL, ctrl & ~AGPCTRL_GTLBEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) pci_write_config_dword(agp_bridge->dev, agp_bridge->capndx+AGPCTRL, ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) EXPORT_SYMBOL(agp3_generic_tlbflush);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) int agp3_generic_configure(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) u32 temp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) struct aper_size_info_16 *current_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) current_size = A_SIZE_16(agp_bridge->current_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) agp_bridge->gart_bus_addr = pci_bus_address(agp_bridge->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) AGP_APERTURE_BAR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) /* set aperture size */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) pci_write_config_word(agp_bridge->dev, agp_bridge->capndx+AGPAPSIZE, current_size->size_value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) /* set gart pointer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) pci_write_config_dword(agp_bridge->dev, agp_bridge->capndx+AGPGARTLO, agp_bridge->gatt_bus_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) /* enable aperture and GTLB */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) pci_read_config_dword(agp_bridge->dev, agp_bridge->capndx+AGPCTRL, &temp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) pci_write_config_dword(agp_bridge->dev, agp_bridge->capndx+AGPCTRL, temp | AGPCTRL_APERENB | AGPCTRL_GTLBEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) EXPORT_SYMBOL(agp3_generic_configure);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) void agp3_generic_cleanup(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) u32 ctrl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) pci_read_config_dword(agp_bridge->dev, agp_bridge->capndx+AGPCTRL, &ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) pci_write_config_dword(agp_bridge->dev, agp_bridge->capndx+AGPCTRL, ctrl & ~AGPCTRL_APERENB);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) EXPORT_SYMBOL(agp3_generic_cleanup);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) const struct aper_size_info_16 agp3_generic_sizes[AGP_GENERIC_SIZES_ENTRIES] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) {4096, 1048576, 10,0x000},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) {2048, 524288, 9, 0x800},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) {1024, 262144, 8, 0xc00},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) { 512, 131072, 7, 0xe00},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) { 256, 65536, 6, 0xf00},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) { 128, 32768, 5, 0xf20},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) { 64, 16384, 4, 0xf30},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) { 32, 8192, 3, 0xf38},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) { 16, 4096, 2, 0xf3c},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) { 8, 2048, 1, 0xf3e},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) { 4, 1024, 0, 0xf3f}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) EXPORT_SYMBOL(agp3_generic_sizes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416)