Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) // SPDX-License-Identifier: GPL-2.0-or-later
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3)  *  Copyright (c) by Jaroslav Kysela <perex@perex.cz>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4)  *  Copyright (c) by Takashi Iwai <tiwai@suse.de>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6)  *  EMU10K1 memory page allocation (PTB area)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9) #include <linux/pci.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10) #include <linux/gfp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11) #include <linux/time.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12) #include <linux/mutex.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13) #include <linux/export.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15) #include <sound/core.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16) #include <sound/emu10k1.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18) /* page arguments of these two macros are Emu page (4096 bytes), not like
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19)  * aligned pages in others
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21) #define __set_ptb_entry(emu,page,addr) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22) 	(((__le32 *)(emu)->ptb_pages.area)[page] = \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23) 	 cpu_to_le32(((addr) << (emu->address_mode)) | (page)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24) #define __get_ptb_entry(emu, page) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25) 	(le32_to_cpu(((__le32 *)(emu)->ptb_pages.area)[page]))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27) #define UNIT_PAGES		(PAGE_SIZE / EMUPAGESIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28) #define MAX_ALIGN_PAGES0		(MAXPAGES0 / UNIT_PAGES)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29) #define MAX_ALIGN_PAGES1		(MAXPAGES1 / UNIT_PAGES)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30) /* get aligned page from offset address */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31) #define get_aligned_page(offset)	((offset) >> PAGE_SHIFT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32) /* get offset address from aligned page */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33) #define aligned_page_offset(page)	((page) << PAGE_SHIFT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35) #if PAGE_SIZE == EMUPAGESIZE && !IS_ENABLED(CONFIG_DYNAMIC_DEBUG)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36) /* fill PTB entrie(s) corresponding to page with addr */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37) #define set_ptb_entry(emu,page,addr)	__set_ptb_entry(emu,page,addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38) /* fill PTB entrie(s) corresponding to page with silence pointer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39) #define set_silent_ptb(emu,page)	__set_ptb_entry(emu,page,emu->silent_page.addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41) /* fill PTB entries -- we need to fill UNIT_PAGES entries */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42) static inline void set_ptb_entry(struct snd_emu10k1 *emu, int page, dma_addr_t addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45) 	page *= UNIT_PAGES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46) 	for (i = 0; i < UNIT_PAGES; i++, page++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47) 		__set_ptb_entry(emu, page, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48) 		dev_dbg(emu->card->dev, "mapped page %d to entry %.8x\n", page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49) 			(unsigned int)__get_ptb_entry(emu, page));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50) 		addr += EMUPAGESIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53) static inline void set_silent_ptb(struct snd_emu10k1 *emu, int page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56) 	page *= UNIT_PAGES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57) 	for (i = 0; i < UNIT_PAGES; i++, page++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58) 		/* do not increment ptr */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59) 		__set_ptb_entry(emu, page, emu->silent_page.addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60) 		dev_dbg(emu->card->dev, "mapped silent page %d to entry %.8x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61) 			page, (unsigned int)__get_ptb_entry(emu, page));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64) #endif /* PAGE_SIZE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69) static int synth_alloc_pages(struct snd_emu10k1 *hw, struct snd_emu10k1_memblk *blk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70) static int synth_free_pages(struct snd_emu10k1 *hw, struct snd_emu10k1_memblk *blk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72) #define get_emu10k1_memblk(l,member)	list_entry(l, struct snd_emu10k1_memblk, member)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75) /* initialize emu10k1 part */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76) static void emu10k1_memblk_init(struct snd_emu10k1_memblk *blk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78) 	blk->mapped_page = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79) 	INIT_LIST_HEAD(&blk->mapped_link);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80) 	INIT_LIST_HEAD(&blk->mapped_order_link);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81) 	blk->map_locked = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83) 	blk->first_page = get_aligned_page(blk->mem.offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84) 	blk->last_page = get_aligned_page(blk->mem.offset + blk->mem.size - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85) 	blk->pages = blk->last_page - blk->first_page + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89)  * search empty region on PTB with the given size
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91)  * if an empty region is found, return the page and store the next mapped block
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92)  * in nextp
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93)  * if not found, return a negative error code.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95) static int search_empty_map_area(struct snd_emu10k1 *emu, int npages, struct list_head **nextp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97) 	int page = 1, found_page = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98) 	int max_size = npages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99) 	int size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) 	struct list_head *candidate = &emu->mapped_link_head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) 	struct list_head *pos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) 	list_for_each (pos, &emu->mapped_link_head) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) 		struct snd_emu10k1_memblk *blk = get_emu10k1_memblk(pos, mapped_link);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) 		if (blk->mapped_page < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) 		size = blk->mapped_page - page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) 		if (size == npages) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) 			*nextp = pos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) 			return page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) 		else if (size > max_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) 			/* we look for the maximum empty hole */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) 			max_size = size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) 			candidate = pos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) 			found_page = page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) 		page = blk->mapped_page + blk->pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) 	size = (emu->address_mode ? MAX_ALIGN_PAGES1 : MAX_ALIGN_PAGES0) - page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) 	if (size >= max_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) 		*nextp = pos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) 		return page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) 	*nextp = candidate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) 	return found_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130)  * map a memory block onto emu10k1's PTB
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132)  * call with memblk_lock held
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) static int map_memblk(struct snd_emu10k1 *emu, struct snd_emu10k1_memblk *blk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) 	int page, pg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) 	struct list_head *next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) 	page = search_empty_map_area(emu, blk->pages, &next);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) 	if (page < 0) /* not found */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) 		return page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) 	if (page == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) 		dev_err(emu->card->dev, "trying to map zero (reserved) page\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) 	/* insert this block in the proper position of mapped list */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) 	list_add_tail(&blk->mapped_link, next);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) 	/* append this as a newest block in order list */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) 	list_add_tail(&blk->mapped_order_link, &emu->mapped_order_link_head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) 	blk->mapped_page = page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) 	/* fill PTB */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) 	for (pg = blk->first_page; pg <= blk->last_page; pg++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) 		set_ptb_entry(emu, page, emu->page_addr_table[pg]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) 		page++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160)  * unmap the block
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161)  * return the size of resultant empty pages
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163)  * call with memblk_lock held
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) static int unmap_memblk(struct snd_emu10k1 *emu, struct snd_emu10k1_memblk *blk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) 	int start_page, end_page, mpage, pg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) 	struct list_head *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) 	struct snd_emu10k1_memblk *q;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) 	/* calculate the expected size of empty region */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) 	if ((p = blk->mapped_link.prev) != &emu->mapped_link_head) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) 		q = get_emu10k1_memblk(p, mapped_link);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) 		start_page = q->mapped_page + q->pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) 	} else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) 		start_page = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) 	if ((p = blk->mapped_link.next) != &emu->mapped_link_head) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) 		q = get_emu10k1_memblk(p, mapped_link);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) 		end_page = q->mapped_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) 	} else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) 		end_page = (emu->address_mode ? MAX_ALIGN_PAGES1 : MAX_ALIGN_PAGES0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) 	/* remove links */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) 	list_del(&blk->mapped_link);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) 	list_del(&blk->mapped_order_link);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) 	/* clear PTB */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) 	mpage = blk->mapped_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) 	for (pg = blk->first_page; pg <= blk->last_page; pg++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) 		set_silent_ptb(emu, mpage);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) 		mpage++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) 	blk->mapped_page = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) 	return end_page - start_page; /* return the new empty size */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197)  * search empty pages with the given size, and create a memory block
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199)  * unlike synth_alloc the memory block is aligned to the page start
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) static struct snd_emu10k1_memblk *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) search_empty(struct snd_emu10k1 *emu, int size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) 	struct list_head *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) 	struct snd_emu10k1_memblk *blk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) 	int page, psize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) 	psize = get_aligned_page(size + PAGE_SIZE -1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) 	page = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) 	list_for_each(p, &emu->memhdr->block) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) 		blk = get_emu10k1_memblk(p, mem.list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) 		if (page + psize <= blk->first_page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) 			goto __found_pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) 		page = blk->last_page + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) 	if (page + psize > emu->max_cache_pages)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) __found_pages:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) 	/* create a new memory block */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) 	blk = (struct snd_emu10k1_memblk *)__snd_util_memblk_new(emu->memhdr, psize << PAGE_SHIFT, p->prev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) 	if (blk == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) 	blk->mem.offset = aligned_page_offset(page); /* set aligned offset */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) 	emu10k1_memblk_init(blk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) 	return blk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231)  * check if the given pointer is valid for pages
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) static int is_valid_page(struct snd_emu10k1 *emu, dma_addr_t addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) 	if (addr & ~emu->dma_mask) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) 		dev_err_ratelimited(emu->card->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) 			"max memory size is 0x%lx (addr = 0x%lx)!!\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) 			emu->dma_mask, (unsigned long)addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) 	if (addr & (EMUPAGESIZE-1)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) 		dev_err_ratelimited(emu->card->dev, "page is not aligned\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) 	return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249)  * map the given memory block on PTB.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250)  * if the block is already mapped, update the link order.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251)  * if no empty pages are found, tries to release unused memory blocks
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252)  * and retry the mapping.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) int snd_emu10k1_memblk_map(struct snd_emu10k1 *emu, struct snd_emu10k1_memblk *blk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) 	int size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) 	struct list_head *p, *nextp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) 	struct snd_emu10k1_memblk *deleted;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) 	spin_lock_irqsave(&emu->memblk_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) 	if (blk->mapped_page >= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) 		/* update order link */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) 		list_move_tail(&blk->mapped_order_link,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) 			       &emu->mapped_order_link_head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) 		spin_unlock_irqrestore(&emu->memblk_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) 	if ((err = map_memblk(emu, blk)) < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) 		/* no enough page - try to unmap some blocks */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) 		/* starting from the oldest block */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) 		p = emu->mapped_order_link_head.next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) 		for (; p != &emu->mapped_order_link_head; p = nextp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) 			nextp = p->next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) 			deleted = get_emu10k1_memblk(p, mapped_order_link);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) 			if (deleted->map_locked)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) 				continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) 			size = unmap_memblk(emu, deleted);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) 			if (size >= blk->pages) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) 				/* ok the empty region is enough large */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) 				err = map_memblk(emu, blk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) 	spin_unlock_irqrestore(&emu->memblk_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) EXPORT_SYMBOL(snd_emu10k1_memblk_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294)  * page allocation for DMA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) struct snd_util_memblk *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) snd_emu10k1_alloc_pages(struct snd_emu10k1 *emu, struct snd_pcm_substream *substream)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) 	struct snd_pcm_runtime *runtime = substream->runtime;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) 	struct snd_util_memhdr *hdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) 	struct snd_emu10k1_memblk *blk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) 	int page, err, idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) 	if (snd_BUG_ON(!emu))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) 	if (snd_BUG_ON(runtime->dma_bytes <= 0 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) 		       runtime->dma_bytes >= (emu->address_mode ? MAXPAGES1 : MAXPAGES0) * EMUPAGESIZE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) 	hdr = emu->memhdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) 	if (snd_BUG_ON(!hdr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) 	idx = runtime->period_size >= runtime->buffer_size ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) 					(emu->delay_pcm_irq * 2) : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) 	mutex_lock(&hdr->block_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) 	blk = search_empty(emu, runtime->dma_bytes + idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) 	if (blk == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) 		mutex_unlock(&hdr->block_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) 	/* fill buffer addresses but pointers are not stored so that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) 	 * snd_free_pci_page() is not called in in synth_free()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) 	idx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) 	for (page = blk->first_page; page <= blk->last_page; page++, idx++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) 		unsigned long ofs = idx << PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) 		dma_addr_t addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) 		if (ofs >= runtime->dma_bytes)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) 			addr = emu->silent_page.addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) 			addr = snd_pcm_sgbuf_get_addr(substream, ofs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) 		if (! is_valid_page(emu, addr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) 			dev_err_ratelimited(emu->card->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) 				"emu: failure page = %d\n", idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) 			mutex_unlock(&hdr->block_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) 			return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) 		emu->page_addr_table[page] = addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) 		emu->page_ptr_table[page] = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) 	/* set PTB entries */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) 	blk->map_locked = 1; /* do not unmap this block! */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) 	err = snd_emu10k1_memblk_map(emu, blk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) 	if (err < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) 		__snd_util_mem_free(hdr, (struct snd_util_memblk *)blk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) 		mutex_unlock(&hdr->block_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) 	mutex_unlock(&hdr->block_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) 	return (struct snd_util_memblk *)blk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356)  * release DMA buffer from page table
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) int snd_emu10k1_free_pages(struct snd_emu10k1 *emu, struct snd_util_memblk *blk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) 	if (snd_BUG_ON(!emu || !blk))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) 	return snd_emu10k1_synth_free(emu, blk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366)  * allocate DMA pages, widening the allocation if necessary
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368)  * See the comment above snd_emu10k1_detect_iommu() in emu10k1_main.c why
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369)  * this might be needed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371)  * If you modify this function check whether __synth_free_pages() also needs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372)  * changes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) int snd_emu10k1_alloc_pages_maybe_wider(struct snd_emu10k1 *emu, size_t size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) 					struct snd_dma_buffer *dmab)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) 	if (emu->iommu_workaround) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) 		size_t npages = (size + PAGE_SIZE - 1) / PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) 		size_t size_real = npages * PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) 		 * The device has been observed to accesses up to 256 extra
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) 		 * bytes, but use 1k to be safe.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) 		if (size_real < size + 1024)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) 			size += PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) 	return snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) 				   &emu->pci->dev, size, dmab);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394)  * memory allocation using multiple pages (for synth)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395)  * Unlike the DMA allocation above, non-contiguous pages are assined.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399)  * allocate a synth sample area
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) struct snd_util_memblk *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) snd_emu10k1_synth_alloc(struct snd_emu10k1 *hw, unsigned int size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) 	struct snd_emu10k1_memblk *blk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) 	struct snd_util_memhdr *hdr = hw->memhdr; 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) 	mutex_lock(&hdr->block_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) 	blk = (struct snd_emu10k1_memblk *)__snd_util_mem_alloc(hdr, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) 	if (blk == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) 		mutex_unlock(&hdr->block_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) 	if (synth_alloc_pages(hw, blk)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) 		__snd_util_mem_free(hdr, (struct snd_util_memblk *)blk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) 		mutex_unlock(&hdr->block_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) 	snd_emu10k1_memblk_map(hw, blk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) 	mutex_unlock(&hdr->block_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) 	return (struct snd_util_memblk *)blk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) EXPORT_SYMBOL(snd_emu10k1_synth_alloc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426)  * free a synth sample area
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) snd_emu10k1_synth_free(struct snd_emu10k1 *emu, struct snd_util_memblk *memblk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) 	struct snd_util_memhdr *hdr = emu->memhdr; 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) 	struct snd_emu10k1_memblk *blk = (struct snd_emu10k1_memblk *)memblk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) 	mutex_lock(&hdr->block_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) 	spin_lock_irqsave(&emu->memblk_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) 	if (blk->mapped_page >= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) 		unmap_memblk(emu, blk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) 	spin_unlock_irqrestore(&emu->memblk_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) 	synth_free_pages(emu, blk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) 	 __snd_util_mem_free(hdr, memblk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) 	mutex_unlock(&hdr->block_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) EXPORT_SYMBOL(snd_emu10k1_synth_free);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) /* check new allocation range */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) static void get_single_page_range(struct snd_util_memhdr *hdr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) 				  struct snd_emu10k1_memblk *blk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) 				  int *first_page_ret, int *last_page_ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) 	struct list_head *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) 	struct snd_emu10k1_memblk *q;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) 	int first_page, last_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) 	first_page = blk->first_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) 	if ((p = blk->mem.list.prev) != &hdr->block) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) 		q = get_emu10k1_memblk(p, mem.list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) 		if (q->last_page == first_page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) 			first_page++;  /* first page was already allocated */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) 	last_page = blk->last_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) 	if ((p = blk->mem.list.next) != &hdr->block) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) 		q = get_emu10k1_memblk(p, mem.list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) 		if (q->first_page == last_page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) 			last_page--; /* last page was already allocated */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) 	*first_page_ret = first_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) 	*last_page_ret = last_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) /* release allocated pages */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) static void __synth_free_pages(struct snd_emu10k1 *emu, int first_page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) 			       int last_page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) 	struct snd_dma_buffer dmab;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) 	int page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) 	dmab.dev.type = SNDRV_DMA_TYPE_DEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) 	dmab.dev.dev = &emu->pci->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) 	for (page = first_page; page <= last_page; page++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) 		if (emu->page_ptr_table[page] == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) 		dmab.area = emu->page_ptr_table[page];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) 		dmab.addr = emu->page_addr_table[page];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) 		 * please keep me in sync with logic in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) 		 * snd_emu10k1_alloc_pages_maybe_wider()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) 		dmab.bytes = PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) 		if (emu->iommu_workaround)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) 			dmab.bytes *= 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) 		snd_dma_free_pages(&dmab);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) 		emu->page_addr_table[page] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) 		emu->page_ptr_table[page] = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503)  * allocate kernel pages
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) static int synth_alloc_pages(struct snd_emu10k1 *emu, struct snd_emu10k1_memblk *blk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) 	int page, first_page, last_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) 	struct snd_dma_buffer dmab;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) 	emu10k1_memblk_init(blk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) 	get_single_page_range(emu->memhdr, blk, &first_page, &last_page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) 	/* allocate kernel pages */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) 	for (page = first_page; page <= last_page; page++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) 		if (snd_emu10k1_alloc_pages_maybe_wider(emu, PAGE_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) 							&dmab) < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) 			goto __fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) 		if (!is_valid_page(emu, dmab.addr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) 			snd_dma_free_pages(&dmab);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) 			goto __fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) 		emu->page_addr_table[page] = dmab.addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) 		emu->page_ptr_table[page] = dmab.area;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) __fail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) 	/* release allocated pages */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) 	last_page = page - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) 	__synth_free_pages(emu, first_page, last_page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) 	return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535)  * free pages
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) static int synth_free_pages(struct snd_emu10k1 *emu, struct snd_emu10k1_memblk *blk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) 	int first_page, last_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) 	get_single_page_range(emu->memhdr, blk, &first_page, &last_page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) 	__synth_free_pages(emu, first_page, last_page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) /* calculate buffer pointer from offset address */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) static inline void *offset_ptr(struct snd_emu10k1 *emu, int page, int offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) 	char *ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) 	if (snd_BUG_ON(page < 0 || page >= emu->max_cache_pages))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) 	ptr = emu->page_ptr_table[page];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) 	if (! ptr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) 		dev_err(emu->card->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) 			"access to NULL ptr: page = %d\n", page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) 	ptr += offset & (PAGE_SIZE - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) 	return (void*)ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563)  * bzero(blk + offset, size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) int snd_emu10k1_synth_bzero(struct snd_emu10k1 *emu, struct snd_util_memblk *blk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) 			    int offset, int size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) 	int page, nextofs, end_offset, temp, temp1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) 	void *ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) 	struct snd_emu10k1_memblk *p = (struct snd_emu10k1_memblk *)blk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) 	offset += blk->offset & (PAGE_SIZE - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) 	end_offset = offset + size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) 	page = get_aligned_page(offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) 	do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) 		nextofs = aligned_page_offset(page + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) 		temp = nextofs - offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) 		temp1 = end_offset - offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) 		if (temp1 < temp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) 			temp = temp1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) 		ptr = offset_ptr(emu, page + p->first_page, offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) 		if (ptr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) 			memset(ptr, 0, temp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) 		offset = nextofs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) 		page++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) 	} while (offset < end_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) EXPORT_SYMBOL(snd_emu10k1_synth_bzero);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593)  * copy_from_user(blk + offset, data, size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) int snd_emu10k1_synth_copy_from_user(struct snd_emu10k1 *emu, struct snd_util_memblk *blk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) 				     int offset, const char __user *data, int size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) 	int page, nextofs, end_offset, temp, temp1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) 	void *ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) 	struct snd_emu10k1_memblk *p = (struct snd_emu10k1_memblk *)blk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) 	offset += blk->offset & (PAGE_SIZE - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) 	end_offset = offset + size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) 	page = get_aligned_page(offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) 	do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) 		nextofs = aligned_page_offset(page + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) 		temp = nextofs - offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) 		temp1 = end_offset - offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) 		if (temp1 < temp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) 			temp = temp1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) 		ptr = offset_ptr(emu, page + p->first_page, offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) 		if (ptr && copy_from_user(ptr, data, temp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) 			return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) 		offset = nextofs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) 		data += temp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) 		page++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) 	} while (offset < end_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) EXPORT_SYMBOL(snd_emu10k1_synth_copy_from_user);