^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-or-later
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Copyright (c) Intel Corp. 2007.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * All Rights Reserved.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * develop this driver.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) * This file is part of the Vermilion Range fb driver.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) * Authors:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) * Thomas Hellström <thomas-at-tungstengraphics-dot-com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) * Michel Dänzer <michel-at-tungstengraphics-dot-com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) * Alan Hourihane <alanh-at-tungstengraphics-dot-com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <linux/errno.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <linux/string.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <linux/delay.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include <linux/mm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #include <linux/fb.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #include <linux/pci.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #include <asm/set_memory.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #include <asm/tlbflush.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #include <linux/mmzone.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) /* #define VERMILION_DEBUG */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) #include "vermilion.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) #define MODULE_NAME "vmlfb"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) #define VML_TOHW(_val, _width) ((((_val) << (_width)) + 0x7FFF - (_val)) >> 16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) static struct mutex vml_mutex;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) static struct list_head global_no_mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) static struct list_head global_has_mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) static struct fb_ops vmlfb_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) static struct vml_sys *subsys = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) static char *vml_default_mode = "1024x768@60";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) static const struct fb_videomode defaultmode = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) NULL, 60, 1024, 768, 12896, 144, 24, 29, 3, 136, 6,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) 0, FB_VMODE_NONINTERLACED
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) static u32 vml_mem_requested = (10 * 1024 * 1024);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) static u32 vml_mem_contig = (4 * 1024 * 1024);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) static u32 vml_mem_min = (4 * 1024 * 1024);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) static u32 vml_clocks[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) 6750,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) 13500,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) 27000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) 29700,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) 37125,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) 54000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) 59400,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) 74250,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) 120000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) 148500
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) static u32 vml_num_clocks = ARRAY_SIZE(vml_clocks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) * Allocate a contiguous vram area and make its linear kernel map
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) * uncached.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) static int vmlfb_alloc_vram_area(struct vram_area *va, unsigned max_order,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) unsigned min_order)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) gfp_t flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) unsigned long i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) max_order++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) * Really try hard to get the needed memory.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) * We need memory below the first 32MB, so we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) * add the __GFP_DMA flag that guarantees that we are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) * below the first 16MB.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) flags = __GFP_DMA | __GFP_HIGH | __GFP_KSWAPD_RECLAIM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) va->logical =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) __get_free_pages(flags, --max_order);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) } while (va->logical == 0 && max_order > min_order);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) if (!va->logical)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) va->phys = virt_to_phys((void *)va->logical);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) va->size = PAGE_SIZE << max_order;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) va->order = max_order;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) * It seems like __get_free_pages only ups the usage count
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) * of the first page. This doesn't work with fault mapping, so
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) * up the usage count once more (XXX: should use split_page or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) * compound page).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) memset((void *)va->logical, 0x00, va->size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) for (i = va->logical; i < va->logical + va->size; i += PAGE_SIZE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) get_page(virt_to_page(i));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) * Change caching policy of the linear kernel map to avoid
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) * mapping type conflicts with user-space mappings.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) set_pages_uc(virt_to_page(va->logical), va->size >> PAGE_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) printk(KERN_DEBUG MODULE_NAME
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) ": Allocated %ld bytes vram area at 0x%08lx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) va->size, va->phys);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) * Free a contiguous vram area and reset its linear kernel map
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) * mapping type.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) static void vmlfb_free_vram_area(struct vram_area *va)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) unsigned long j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) if (va->logical) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) * Reset the linear kernel map caching policy.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) set_pages_wb(virt_to_page(va->logical),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) va->size >> PAGE_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) * Decrease the usage count on the pages we've used
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) * to compensate for upping when allocating.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) for (j = va->logical; j < va->logical + va->size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) j += PAGE_SIZE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) (void)put_page_testzero(virt_to_page(j));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) printk(KERN_DEBUG MODULE_NAME
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) ": Freeing %ld bytes vram area at 0x%08lx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) va->size, va->phys);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) free_pages(va->logical, va->order);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) va->logical = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) * Free allocated vram.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) static void vmlfb_free_vram(struct vml_info *vinfo)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) for (i = 0; i < vinfo->num_areas; ++i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) vmlfb_free_vram_area(&vinfo->vram[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) vinfo->num_areas = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) * Allocate vram. Currently we try to allocate contiguous areas from the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) * __GFP_DMA zone and puzzle them together. A better approach would be to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) * allocate one contiguous area for scanout and use one-page allocations for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) * offscreen areas. This requires user-space and GPU virtual mappings.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) static int vmlfb_alloc_vram(struct vml_info *vinfo,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) size_t requested,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) size_t min_total, size_t min_contig)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) int i, j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) int order;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) int contiguous;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) struct vram_area *va;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) struct vram_area *va2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) vinfo->num_areas = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) for (i = 0; i < VML_VRAM_AREAS; ++i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) va = &vinfo->vram[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) order = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) while (requested > (PAGE_SIZE << order) && order < MAX_ORDER)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) order++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) err = vmlfb_alloc_vram_area(va, order, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) if (i == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) vinfo->vram_start = va->phys;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) vinfo->vram_logical = (void __iomem *) va->logical;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) vinfo->vram_contig_size = va->size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) vinfo->num_areas = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) contiguous = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) for (j = 0; j < i; ++j) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) va2 = &vinfo->vram[j];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) if (va->phys + va->size == va2->phys ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) va2->phys + va2->size == va->phys) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) contiguous = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) if (contiguous) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) vinfo->num_areas++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) if (va->phys < vinfo->vram_start) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) vinfo->vram_start = va->phys;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) vinfo->vram_logical =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) (void __iomem *)va->logical;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) vinfo->vram_contig_size += va->size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) vmlfb_free_vram_area(va);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) if (requested < va->size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) requested -= va->size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) if (vinfo->vram_contig_size > min_total &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) vinfo->vram_contig_size > min_contig) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) printk(KERN_DEBUG MODULE_NAME
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) ": Contiguous vram: %ld bytes at physical 0x%08lx.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) (unsigned long)vinfo->vram_contig_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) (unsigned long)vinfo->vram_start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) printk(KERN_ERR MODULE_NAME
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) ": Could not allocate requested minimal amount of vram.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) vmlfb_free_vram(vinfo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) * Find the GPU to use with our display controller.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) static int vmlfb_get_gpu(struct vml_par *par)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) mutex_lock(&vml_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) par->gpu = pci_get_device(PCI_VENDOR_ID_INTEL, VML_DEVICE_GPU, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) if (!par->gpu) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) mutex_unlock(&vml_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) mutex_unlock(&vml_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) if (pci_enable_device(par->gpu) < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) * Find a contiguous vram area that contains a given offset from vram start.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) static int vmlfb_vram_offset(struct vml_info *vinfo, unsigned long offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) unsigned long aoffset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) unsigned i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) for (i = 0; i < vinfo->num_areas; ++i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) aoffset = offset - (vinfo->vram[i].phys - vinfo->vram_start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) if (aoffset < vinfo->vram[i].size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) * Remap the MMIO register spaces of the VDC and the GPU.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) static int vmlfb_enable_mmio(struct vml_par *par)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) par->vdc_mem_base = pci_resource_start(par->vdc, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) par->vdc_mem_size = pci_resource_len(par->vdc, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) if (!request_mem_region(par->vdc_mem_base, par->vdc_mem_size, "vmlfb")) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) printk(KERN_ERR MODULE_NAME
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) ": Could not claim display controller MMIO.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) par->vdc_mem = ioremap(par->vdc_mem_base, par->vdc_mem_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) if (par->vdc_mem == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) printk(KERN_ERR MODULE_NAME
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) ": Could not map display controller MMIO.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) goto out_err_0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) par->gpu_mem_base = pci_resource_start(par->gpu, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) par->gpu_mem_size = pci_resource_len(par->gpu, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) if (!request_mem_region(par->gpu_mem_base, par->gpu_mem_size, "vmlfb")) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) printk(KERN_ERR MODULE_NAME ": Could not claim GPU MMIO.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) err = -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) goto out_err_1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) par->gpu_mem = ioremap(par->gpu_mem_base, par->gpu_mem_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) if (par->gpu_mem == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) printk(KERN_ERR MODULE_NAME ": Could not map GPU MMIO.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) goto out_err_2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) out_err_2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) release_mem_region(par->gpu_mem_base, par->gpu_mem_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) out_err_1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) iounmap(par->vdc_mem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) out_err_0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) release_mem_region(par->vdc_mem_base, par->vdc_mem_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) * Unmap the VDC and GPU register spaces.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) static void vmlfb_disable_mmio(struct vml_par *par)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) iounmap(par->gpu_mem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) release_mem_region(par->gpu_mem_base, par->gpu_mem_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) iounmap(par->vdc_mem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) release_mem_region(par->vdc_mem_base, par->vdc_mem_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) * Release and uninit the VDC and GPU.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) static void vmlfb_release_devices(struct vml_par *par)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) if (atomic_dec_and_test(&par->refcount)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) pci_disable_device(par->gpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) pci_disable_device(par->vdc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) * Free up allocated resources for a device.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) static void vml_pci_remove(struct pci_dev *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) struct fb_info *info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) struct vml_info *vinfo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) struct vml_par *par;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) info = pci_get_drvdata(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) if (info) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) vinfo = container_of(info, struct vml_info, info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) par = vinfo->par;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) mutex_lock(&vml_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) unregister_framebuffer(info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) fb_dealloc_cmap(&info->cmap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) vmlfb_free_vram(vinfo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) vmlfb_disable_mmio(par);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) vmlfb_release_devices(par);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) kfree(vinfo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) kfree(par);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) mutex_unlock(&vml_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) static void vmlfb_set_pref_pixel_format(struct fb_var_screeninfo *var)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) switch (var->bits_per_pixel) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) case 16:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) var->blue.offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) var->blue.length = 5;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) var->green.offset = 5;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) var->green.length = 5;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) var->red.offset = 10;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) var->red.length = 5;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) var->transp.offset = 15;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) var->transp.length = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) case 32:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) var->blue.offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) var->blue.length = 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) var->green.offset = 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) var->green.length = 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) var->red.offset = 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) var->red.length = 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) var->transp.offset = 24;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) var->transp.length = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) var->blue.msb_right = var->green.msb_right =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) var->red.msb_right = var->transp.msb_right = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) * Device initialization.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) * We initialize one vml_par struct per device and one vml_info
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) * struct per pipe. Currently we have only one pipe.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) static int vml_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) struct vml_info *vinfo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) struct fb_info *info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) struct vml_par *par;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) par = kzalloc(sizeof(*par), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) if (par == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) vinfo = kzalloc(sizeof(*vinfo), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) if (vinfo == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) goto out_err_0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) vinfo->par = par;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) par->vdc = dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) atomic_set(&par->refcount, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) switch (id->device) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) case VML_DEVICE_VDC:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) if ((err = vmlfb_get_gpu(par)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) goto out_err_1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) pci_set_drvdata(dev, &vinfo->info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) err = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) goto out_err_1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) info = &vinfo->info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) info->flags = FBINFO_DEFAULT | FBINFO_PARTIAL_PAN_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) err = vmlfb_enable_mmio(par);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) goto out_err_2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) err = vmlfb_alloc_vram(vinfo, vml_mem_requested,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) vml_mem_contig, vml_mem_min);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) goto out_err_3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) strcpy(info->fix.id, "Vermilion Range");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) info->fix.mmio_start = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) info->fix.mmio_len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) info->fix.smem_start = vinfo->vram_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) info->fix.smem_len = vinfo->vram_contig_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) info->fix.type = FB_TYPE_PACKED_PIXELS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) info->fix.visual = FB_VISUAL_TRUECOLOR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) info->fix.ypanstep = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) info->fix.xpanstep = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) info->fix.ywrapstep = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) info->fix.accel = FB_ACCEL_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) info->screen_base = vinfo->vram_logical;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) info->pseudo_palette = vinfo->pseudo_palette;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) info->par = par;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) info->fbops = &vmlfb_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) info->device = &dev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) INIT_LIST_HEAD(&vinfo->head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) vinfo->pipe_disabled = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) vinfo->cur_blank_mode = FB_BLANK_UNBLANK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) info->var.grayscale = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) info->var.bits_per_pixel = 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) vmlfb_set_pref_pixel_format(&info->var);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) if (!fb_find_mode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) (&info->var, info, vml_default_mode, NULL, 0, &defaultmode, 16)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) printk(KERN_ERR MODULE_NAME ": Could not find initial mode\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) if (fb_alloc_cmap(&info->cmap, 256, 1) < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) goto out_err_4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) err = register_framebuffer(info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) printk(KERN_ERR MODULE_NAME ": Register framebuffer error.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) goto out_err_5;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) printk("Initialized vmlfb\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) out_err_5:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) fb_dealloc_cmap(&info->cmap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) out_err_4:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) vmlfb_free_vram(vinfo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) out_err_3:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) vmlfb_disable_mmio(par);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) out_err_2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) vmlfb_release_devices(par);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) out_err_1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) kfree(vinfo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) out_err_0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) kfree(par);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) static int vmlfb_open(struct fb_info *info, int user)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) * Save registers here?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) static int vmlfb_release(struct fb_info *info, int user)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) * Restore registers here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) static int vml_nearest_clock(int clock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) int cur_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) int cur_diff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) int diff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) cur_index = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) cur_diff = clock - vml_clocks[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) cur_diff = (cur_diff < 0) ? -cur_diff : cur_diff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) for (i = 1; i < vml_num_clocks; ++i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) diff = clock - vml_clocks[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) diff = (diff < 0) ? -diff : diff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) if (diff < cur_diff) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) cur_index = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) cur_diff = diff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) return vml_clocks[cur_index];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) static int vmlfb_check_var_locked(struct fb_var_screeninfo *var,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) struct vml_info *vinfo)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) u32 pitch;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) u64 mem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) int nearest_clock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) int clock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) int clock_diff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) struct fb_var_screeninfo v;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) v = *var;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) clock = PICOS2KHZ(var->pixclock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) if (subsys && subsys->nearest_clock) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) nearest_clock = subsys->nearest_clock(subsys, clock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) nearest_clock = vml_nearest_clock(clock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) * Accept a 20% diff.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) clock_diff = nearest_clock - clock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) clock_diff = (clock_diff < 0) ? -clock_diff : clock_diff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) if (clock_diff > clock / 5) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) #if 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) printk(KERN_DEBUG MODULE_NAME ": Diff failure. %d %d\n",clock_diff,clock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) v.pixclock = KHZ2PICOS(nearest_clock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) if (var->xres > VML_MAX_XRES || var->yres > VML_MAX_YRES) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) printk(KERN_DEBUG MODULE_NAME ": Resolution failure.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) if (var->xres_virtual > VML_MAX_XRES_VIRTUAL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) printk(KERN_DEBUG MODULE_NAME
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) ": Virtual resolution failure.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) switch (v.bits_per_pixel) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) case 0 ... 16:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) v.bits_per_pixel = 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) case 17 ... 32:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) v.bits_per_pixel = 32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) printk(KERN_DEBUG MODULE_NAME ": Invalid bpp: %d.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) var->bits_per_pixel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) pitch = ALIGN((var->xres * var->bits_per_pixel) >> 3, 0x40);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) mem = (u64)pitch * var->yres_virtual;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) if (mem > vinfo->vram_contig_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) switch (v.bits_per_pixel) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) case 16:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) if (var->blue.offset != 0 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) var->blue.length != 5 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) var->green.offset != 5 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) var->green.length != 5 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) var->red.offset != 10 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) var->red.length != 5 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) var->transp.offset != 15 || var->transp.length != 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) vmlfb_set_pref_pixel_format(&v);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) case 32:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) if (var->blue.offset != 0 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) var->blue.length != 8 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) var->green.offset != 8 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) var->green.length != 8 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) var->red.offset != 16 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) var->red.length != 8 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) (var->transp.length != 0 && var->transp.length != 8) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) (var->transp.length == 8 && var->transp.offset != 24)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) vmlfb_set_pref_pixel_format(&v);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) *var = v;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) static int vmlfb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) struct vml_info *vinfo = container_of(info, struct vml_info, info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) mutex_lock(&vml_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) ret = vmlfb_check_var_locked(var, vinfo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) mutex_unlock(&vml_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) static void vml_wait_vblank(struct vml_info *vinfo)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) /* Wait for vblank. For now, just wait for a 50Hz cycle (20ms)) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) mdelay(20);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) static void vmlfb_disable_pipe(struct vml_info *vinfo)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) struct vml_par *par = vinfo->par;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) /* Disable the MDVO pad */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) VML_WRITE32(par, VML_RCOMPSTAT, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) while (!(VML_READ32(par, VML_RCOMPSTAT) & VML_MDVO_VDC_I_RCOMP)) ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) /* Disable display planes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) VML_WRITE32(par, VML_DSPCCNTR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) VML_READ32(par, VML_DSPCCNTR) & ~VML_GFX_ENABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) (void)VML_READ32(par, VML_DSPCCNTR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) /* Wait for vblank for the disable to take effect */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) vml_wait_vblank(vinfo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) /* Next, disable display pipes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) VML_WRITE32(par, VML_PIPEACONF, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) (void)VML_READ32(par, VML_PIPEACONF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) vinfo->pipe_disabled = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) #ifdef VERMILION_DEBUG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) static void vml_dump_regs(struct vml_info *vinfo)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) struct vml_par *par = vinfo->par;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) printk(KERN_DEBUG MODULE_NAME ": Modesetting register dump:\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) printk(KERN_DEBUG MODULE_NAME ": \tHTOTAL_A : 0x%08x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) (unsigned)VML_READ32(par, VML_HTOTAL_A));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) printk(KERN_DEBUG MODULE_NAME ": \tHBLANK_A : 0x%08x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) (unsigned)VML_READ32(par, VML_HBLANK_A));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) printk(KERN_DEBUG MODULE_NAME ": \tHSYNC_A : 0x%08x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) (unsigned)VML_READ32(par, VML_HSYNC_A));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) printk(KERN_DEBUG MODULE_NAME ": \tVTOTAL_A : 0x%08x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) (unsigned)VML_READ32(par, VML_VTOTAL_A));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) printk(KERN_DEBUG MODULE_NAME ": \tVBLANK_A : 0x%08x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) (unsigned)VML_READ32(par, VML_VBLANK_A));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) printk(KERN_DEBUG MODULE_NAME ": \tVSYNC_A : 0x%08x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) (unsigned)VML_READ32(par, VML_VSYNC_A));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) printk(KERN_DEBUG MODULE_NAME ": \tDSPCSTRIDE : 0x%08x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) (unsigned)VML_READ32(par, VML_DSPCSTRIDE));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) printk(KERN_DEBUG MODULE_NAME ": \tDSPCSIZE : 0x%08x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) (unsigned)VML_READ32(par, VML_DSPCSIZE));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) printk(KERN_DEBUG MODULE_NAME ": \tDSPCPOS : 0x%08x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) (unsigned)VML_READ32(par, VML_DSPCPOS));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) printk(KERN_DEBUG MODULE_NAME ": \tDSPARB : 0x%08x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) (unsigned)VML_READ32(par, VML_DSPARB));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) printk(KERN_DEBUG MODULE_NAME ": \tDSPCADDR : 0x%08x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) (unsigned)VML_READ32(par, VML_DSPCADDR));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) printk(KERN_DEBUG MODULE_NAME ": \tBCLRPAT_A : 0x%08x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) (unsigned)VML_READ32(par, VML_BCLRPAT_A));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) printk(KERN_DEBUG MODULE_NAME ": \tCANVSCLR_A : 0x%08x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) (unsigned)VML_READ32(par, VML_CANVSCLR_A));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) printk(KERN_DEBUG MODULE_NAME ": \tPIPEASRC : 0x%08x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) (unsigned)VML_READ32(par, VML_PIPEASRC));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) printk(KERN_DEBUG MODULE_NAME ": \tPIPEACONF : 0x%08x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) (unsigned)VML_READ32(par, VML_PIPEACONF));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) printk(KERN_DEBUG MODULE_NAME ": \tDSPCCNTR : 0x%08x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) (unsigned)VML_READ32(par, VML_DSPCCNTR));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) printk(KERN_DEBUG MODULE_NAME ": \tRCOMPSTAT : 0x%08x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) (unsigned)VML_READ32(par, VML_RCOMPSTAT));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) printk(KERN_DEBUG MODULE_NAME ": End of modesetting register dump.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) static int vmlfb_set_par_locked(struct vml_info *vinfo)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) struct vml_par *par = vinfo->par;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) struct fb_info *info = &vinfo->info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) struct fb_var_screeninfo *var = &info->var;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) u32 htotal, hactive, hblank_start, hblank_end, hsync_start, hsync_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) u32 vtotal, vactive, vblank_start, vblank_end, vsync_start, vsync_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) u32 dspcntr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) int clock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) vinfo->bytes_per_pixel = var->bits_per_pixel >> 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) vinfo->stride = ALIGN(var->xres_virtual * vinfo->bytes_per_pixel, 0x40);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) info->fix.line_length = vinfo->stride;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) if (!subsys)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) htotal =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) var->xres + var->right_margin + var->hsync_len + var->left_margin;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) hactive = var->xres;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) hblank_start = var->xres;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) hblank_end = htotal;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) hsync_start = hactive + var->right_margin;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) hsync_end = hsync_start + var->hsync_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) vtotal =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) var->yres + var->lower_margin + var->vsync_len + var->upper_margin;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) vactive = var->yres;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) vblank_start = var->yres;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) vblank_end = vtotal;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) vsync_start = vactive + var->lower_margin;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) vsync_end = vsync_start + var->vsync_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) dspcntr = VML_GFX_ENABLE | VML_GFX_GAMMABYPASS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) clock = PICOS2KHZ(var->pixclock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) if (subsys->nearest_clock) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) clock = subsys->nearest_clock(subsys, clock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) clock = vml_nearest_clock(clock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) printk(KERN_DEBUG MODULE_NAME
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) ": Set mode Hfreq : %d kHz, Vfreq : %d Hz.\n", clock / htotal,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) ((clock / htotal) * 1000) / vtotal);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) switch (var->bits_per_pixel) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) case 16:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) dspcntr |= VML_GFX_ARGB1555;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) case 32:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) if (var->transp.length == 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) dspcntr |= VML_GFX_ARGB8888 | VML_GFX_ALPHAMULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) dspcntr |= VML_GFX_RGB0888;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) vmlfb_disable_pipe(vinfo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) mb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) if (subsys->set_clock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) subsys->set_clock(subsys, clock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) VML_WRITE32(par, VML_HTOTAL_A, ((htotal - 1) << 16) | (hactive - 1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) VML_WRITE32(par, VML_HBLANK_A,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) ((hblank_end - 1) << 16) | (hblank_start - 1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) VML_WRITE32(par, VML_HSYNC_A,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) ((hsync_end - 1) << 16) | (hsync_start - 1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) VML_WRITE32(par, VML_VTOTAL_A, ((vtotal - 1) << 16) | (vactive - 1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) VML_WRITE32(par, VML_VBLANK_A,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) ((vblank_end - 1) << 16) | (vblank_start - 1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) VML_WRITE32(par, VML_VSYNC_A,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) ((vsync_end - 1) << 16) | (vsync_start - 1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) VML_WRITE32(par, VML_DSPCSTRIDE, vinfo->stride);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) VML_WRITE32(par, VML_DSPCSIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) ((var->yres - 1) << 16) | (var->xres - 1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) VML_WRITE32(par, VML_DSPCPOS, 0x00000000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) VML_WRITE32(par, VML_DSPARB, VML_FIFO_DEFAULT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) VML_WRITE32(par, VML_BCLRPAT_A, 0x00000000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) VML_WRITE32(par, VML_CANVSCLR_A, 0x00000000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) VML_WRITE32(par, VML_PIPEASRC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) ((var->xres - 1) << 16) | (var->yres - 1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) VML_WRITE32(par, VML_PIPEACONF, VML_PIPE_ENABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) VML_WRITE32(par, VML_DSPCCNTR, dspcntr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) VML_WRITE32(par, VML_DSPCADDR, (u32) vinfo->vram_start +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) var->yoffset * vinfo->stride +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) var->xoffset * vinfo->bytes_per_pixel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) VML_WRITE32(par, VML_RCOMPSTAT, VML_MDVO_PAD_ENABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) while (!(VML_READ32(par, VML_RCOMPSTAT) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) (VML_MDVO_VDC_I_RCOMP | VML_MDVO_PAD_ENABLE))) ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) vinfo->pipe_disabled = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) #ifdef VERMILION_DEBUG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) vml_dump_regs(vinfo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) static int vmlfb_set_par(struct fb_info *info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) struct vml_info *vinfo = container_of(info, struct vml_info, info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) mutex_lock(&vml_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) list_move(&vinfo->head, (subsys) ? &global_has_mode : &global_no_mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) ret = vmlfb_set_par_locked(vinfo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) mutex_unlock(&vml_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) static int vmlfb_blank_locked(struct vml_info *vinfo)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) struct vml_par *par = vinfo->par;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) u32 cur = VML_READ32(par, VML_PIPEACONF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) switch (vinfo->cur_blank_mode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) case FB_BLANK_UNBLANK:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) if (vinfo->pipe_disabled) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) vmlfb_set_par_locked(vinfo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) VML_WRITE32(par, VML_PIPEACONF, cur & ~VML_PIPE_FORCE_BORDER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) (void)VML_READ32(par, VML_PIPEACONF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) case FB_BLANK_NORMAL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) if (vinfo->pipe_disabled) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) vmlfb_set_par_locked(vinfo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) VML_WRITE32(par, VML_PIPEACONF, cur | VML_PIPE_FORCE_BORDER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) (void)VML_READ32(par, VML_PIPEACONF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) case FB_BLANK_VSYNC_SUSPEND:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) case FB_BLANK_HSYNC_SUSPEND:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) if (!vinfo->pipe_disabled) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) vmlfb_disable_pipe(vinfo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) case FB_BLANK_POWERDOWN:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) if (!vinfo->pipe_disabled) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) vmlfb_disable_pipe(vinfo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) static int vmlfb_blank(int blank_mode, struct fb_info *info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) struct vml_info *vinfo = container_of(info, struct vml_info, info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) mutex_lock(&vml_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) vinfo->cur_blank_mode = blank_mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) ret = vmlfb_blank_locked(vinfo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) mutex_unlock(&vml_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) static int vmlfb_pan_display(struct fb_var_screeninfo *var,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) struct fb_info *info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) struct vml_info *vinfo = container_of(info, struct vml_info, info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) struct vml_par *par = vinfo->par;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) mutex_lock(&vml_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) VML_WRITE32(par, VML_DSPCADDR, (u32) vinfo->vram_start +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) var->yoffset * vinfo->stride +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) var->xoffset * vinfo->bytes_per_pixel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) (void)VML_READ32(par, VML_DSPCADDR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) mutex_unlock(&vml_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) static int vmlfb_setcolreg(u_int regno, u_int red, u_int green, u_int blue,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) u_int transp, struct fb_info *info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) u32 v;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) if (regno >= 16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) if (info->var.grayscale) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) red = green = blue = (red * 77 + green * 151 + blue * 28) >> 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) if (info->fix.visual != FB_VISUAL_TRUECOLOR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) red = VML_TOHW(red, info->var.red.length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) blue = VML_TOHW(blue, info->var.blue.length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) green = VML_TOHW(green, info->var.green.length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) transp = VML_TOHW(transp, info->var.transp.length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) v = (red << info->var.red.offset) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) (green << info->var.green.offset) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) (blue << info->var.blue.offset) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) (transp << info->var.transp.offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) switch (info->var.bits_per_pixel) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) case 16:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) ((u32 *) info->pseudo_palette)[regno] = v;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) case 24:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) case 32:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) ((u32 *) info->pseudo_palette)[regno] = v;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) static int vmlfb_mmap(struct fb_info *info, struct vm_area_struct *vma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) struct vml_info *vinfo = container_of(info, struct vml_info, info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) unsigned long prot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) ret = vmlfb_vram_offset(vinfo, offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) prot = pgprot_val(vma->vm_page_prot) & ~_PAGE_CACHE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) pgprot_val(vma->vm_page_prot) =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) prot | cachemode2protval(_PAGE_CACHE_MODE_UC_MINUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) return vm_iomap_memory(vma, vinfo->vram_start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) vinfo->vram_contig_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) static int vmlfb_sync(struct fb_info *info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) static int vmlfb_cursor(struct fb_info *info, struct fb_cursor *cursor)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) return -EINVAL; /* just to force soft_cursor() call */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) static struct fb_ops vmlfb_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) .owner = THIS_MODULE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) .fb_open = vmlfb_open,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) .fb_release = vmlfb_release,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) .fb_check_var = vmlfb_check_var,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) .fb_set_par = vmlfb_set_par,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) .fb_blank = vmlfb_blank,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) .fb_pan_display = vmlfb_pan_display,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) .fb_fillrect = cfb_fillrect,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) .fb_copyarea = cfb_copyarea,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) .fb_imageblit = cfb_imageblit,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) .fb_cursor = vmlfb_cursor,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) .fb_sync = vmlfb_sync,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) .fb_mmap = vmlfb_mmap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) .fb_setcolreg = vmlfb_setcolreg
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) static const struct pci_device_id vml_ids[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) {PCI_DEVICE(PCI_VENDOR_ID_INTEL, VML_DEVICE_VDC)},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) {0}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) static struct pci_driver vmlfb_pci_driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) .name = "vmlfb",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) .id_table = vml_ids,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) .probe = vml_pci_probe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) .remove = vml_pci_remove,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) static void __exit vmlfb_cleanup(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) pci_unregister_driver(&vmlfb_pci_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) static int __init vmlfb_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) #ifndef MODULE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) char *option = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) if (fb_get_options(MODULE_NAME, &option))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) printk(KERN_DEBUG MODULE_NAME ": initializing\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) mutex_init(&vml_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) INIT_LIST_HEAD(&global_no_mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) INIT_LIST_HEAD(&global_has_mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) return pci_register_driver(&vmlfb_pci_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) int vmlfb_register_subsys(struct vml_sys *sys)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) struct vml_info *entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) struct list_head *list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) u32 save_activate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) mutex_lock(&vml_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) if (subsys != NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) subsys->restore(subsys);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) subsys = sys;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) subsys->save(subsys);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) * We need to restart list traversal for each item, since we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) * release the list mutex in the loop.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) list = global_no_mode.next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) while (list != &global_no_mode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) list_del_init(list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) entry = list_entry(list, struct vml_info, head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) * First, try the current mode which might not be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) * completely validated with respect to the pixel clock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) if (!vmlfb_check_var_locked(&entry->info.var, entry)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) vmlfb_set_par_locked(entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) list_add_tail(list, &global_has_mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) * Didn't work. Try to find another mode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) * that matches this subsys.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) mutex_unlock(&vml_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) save_activate = entry->info.var.activate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) entry->info.var.bits_per_pixel = 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) vmlfb_set_pref_pixel_format(&entry->info.var);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) if (fb_find_mode(&entry->info.var,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) &entry->info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) vml_default_mode, NULL, 0, NULL, 16)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) entry->info.var.activate |=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) FB_ACTIVATE_FORCE | FB_ACTIVATE_NOW;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) fb_set_var(&entry->info, &entry->info.var);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) printk(KERN_ERR MODULE_NAME
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) ": Sorry. no mode found for this subsys.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) entry->info.var.activate = save_activate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) mutex_lock(&vml_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) vmlfb_blank_locked(entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) list = global_no_mode.next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) mutex_unlock(&vml_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) printk(KERN_DEBUG MODULE_NAME ": Registered %s subsystem.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) subsys->name ? subsys->name : "unknown");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) EXPORT_SYMBOL_GPL(vmlfb_register_subsys);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) void vmlfb_unregister_subsys(struct vml_sys *sys)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) struct vml_info *entry, *next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) mutex_lock(&vml_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) if (subsys != sys) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) mutex_unlock(&vml_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) subsys->restore(subsys);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) subsys = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) list_for_each_entry_safe(entry, next, &global_has_mode, head) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) printk(KERN_DEBUG MODULE_NAME ": subsys disable pipe\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) vmlfb_disable_pipe(entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) list_move_tail(&entry->head, &global_no_mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) mutex_unlock(&vml_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) EXPORT_SYMBOL_GPL(vmlfb_unregister_subsys);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) module_init(vmlfb_init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) module_exit(vmlfb_cleanup);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) MODULE_AUTHOR("Tungsten Graphics");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) MODULE_DESCRIPTION("Initialization of the Vermilion display devices");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) MODULE_VERSION("1.0.0");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) MODULE_LICENSE("GPL");