Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    3)  * smscufx.c -- Framebuffer driver for SMSC UFX USB controller
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    4)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    5)  * Copyright (C) 2011 Steve Glendinning <steve.glendinning@shawell.net>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    6)  * Copyright (C) 2009 Roberto De Ioris <roberto@unbit.it>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    7)  * Copyright (C) 2009 Jaya Kumar <jayakumar.lkml@gmail.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    8)  * Copyright (C) 2009 Bernie Thompson <bernie@plugable.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    9)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   10)  * Based on udlfb, with work from Florian Echtler, Henrik Bjerregaard Pedersen,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   11)  * and others.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   12)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   13)  * Works well with Bernie Thompson's X DAMAGE patch to xf86-video-fbdev
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   14)  * available from http://git.plugable.com
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   15)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   16)  * Layout is based on skeletonfb by James Simmons and Geert Uytterhoeven,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   17)  * usb-skeleton by GregKH.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   18)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   19) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   20) #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   21) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   22) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   23) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   24) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   25) #include <linux/usb.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   26) #include <linux/uaccess.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   27) #include <linux/mm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   28) #include <linux/fb.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   29) #include <linux/vmalloc.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   30) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   31) #include <linux/delay.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   32) #include "edid.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   33) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   34) #define check_warn(status, fmt, args...) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   35) 	({ if (status < 0) pr_warn(fmt, ##args); })
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   36) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   37) #define check_warn_return(status, fmt, args...) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   38) 	({ if (status < 0) { pr_warn(fmt, ##args); return status; } })
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   39) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   40) #define check_warn_goto_error(status, fmt, args...) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   41) 	({ if (status < 0) { pr_warn(fmt, ##args); goto error; } })
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   42) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   43) #define all_bits_set(x, bits) (((x) & (bits)) == (bits))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   44) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   45) #define USB_VENDOR_REQUEST_WRITE_REGISTER	0xA0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   46) #define USB_VENDOR_REQUEST_READ_REGISTER	0xA1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   47) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   48) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   49)  * TODO: Propose standard fb.h ioctl for reporting damage,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   50)  * using _IOWR() and one of the existing area structs from fb.h
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   51)  * Consider these ioctls deprecated, but they're still used by the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   52)  * DisplayLink X server as yet - need both to be modified in tandem
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   53)  * when new ioctl(s) are ready.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   54)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   55) #define UFX_IOCTL_RETURN_EDID	(0xAD)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   56) #define UFX_IOCTL_REPORT_DAMAGE	(0xAA)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   57) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   58) /* -BULK_SIZE as per usb-skeleton. Can we get full page and avoid overhead? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   59) #define BULK_SIZE		(512)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   60) #define MAX_TRANSFER		(PAGE_SIZE*16 - BULK_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   61) #define WRITES_IN_FLIGHT	(4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   62) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   63) #define GET_URB_TIMEOUT		(HZ)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   64) #define FREE_URB_TIMEOUT	(HZ*2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   65) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   66) #define BPP			2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   67) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   68) #define UFX_DEFIO_WRITE_DELAY	5 /* fb_deferred_io.delay in jiffies */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   69) #define UFX_DEFIO_WRITE_DISABLE	(HZ*60) /* "disable" with long delay */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   70) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   71) struct dloarea {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   72) 	int x, y;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   73) 	int w, h;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   74) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   75) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   76) struct urb_node {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   77) 	struct list_head entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   78) 	struct ufx_data *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   79) 	struct delayed_work release_urb_work;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   80) 	struct urb *urb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   81) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   82) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   83) struct urb_list {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   84) 	struct list_head list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   85) 	spinlock_t lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   86) 	struct semaphore limit_sem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   87) 	int available;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   88) 	int count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   89) 	size_t size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   90) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   91) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   92) struct ufx_data {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   93) 	struct usb_device *udev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   94) 	struct device *gdev; /* &udev->dev */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   95) 	struct fb_info *info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   96) 	struct urb_list urbs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   97) 	struct kref kref;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   98) 	int fb_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   99) 	bool virtualized; /* true when physical usb device not present */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  100) 	struct delayed_work free_framebuffer_work;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  101) 	atomic_t usb_active; /* 0 = update virtual buffer, but no usb traffic */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  102) 	atomic_t lost_pixels; /* 1 = a render op failed. Need screen refresh */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  103) 	u8 *edid; /* null until we read edid from hw or get from sysfs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  104) 	size_t edid_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  105) 	u32 pseudo_palette[256];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  106) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  107) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  108) static struct fb_fix_screeninfo ufx_fix = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  109) 	.id =           "smscufx",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  110) 	.type =         FB_TYPE_PACKED_PIXELS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  111) 	.visual =       FB_VISUAL_TRUECOLOR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  112) 	.xpanstep =     0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  113) 	.ypanstep =     0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  114) 	.ywrapstep =    0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  115) 	.accel =        FB_ACCEL_NONE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  116) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  117) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  118) static const u32 smscufx_info_flags = FBINFO_DEFAULT | FBINFO_READS_FAST |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  119) 	FBINFO_VIRTFB |	FBINFO_HWACCEL_IMAGEBLIT | FBINFO_HWACCEL_FILLRECT |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  120) 	FBINFO_HWACCEL_COPYAREA | FBINFO_MISC_ALWAYS_SETPAR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  121) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  122) static const struct usb_device_id id_table[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  123) 	{USB_DEVICE(0x0424, 0x9d00),},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  124) 	{USB_DEVICE(0x0424, 0x9d01),},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  125) 	{},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  126) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  127) MODULE_DEVICE_TABLE(usb, id_table);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  128) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  129) /* module options */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  130) static bool console;   /* Optionally allow fbcon to consume first framebuffer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  131) static bool fb_defio = true;  /* Optionally enable fb_defio mmap support */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  132) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  133) /* ufx keeps a list of urbs for efficient bulk transfers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  134) static void ufx_urb_completion(struct urb *urb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  135) static struct urb *ufx_get_urb(struct ufx_data *dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  136) static int ufx_submit_urb(struct ufx_data *dev, struct urb * urb, size_t len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  137) static int ufx_alloc_urb_list(struct ufx_data *dev, int count, size_t size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  138) static void ufx_free_urb_list(struct ufx_data *dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  139) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  140) /* reads a control register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  141) static int ufx_reg_read(struct ufx_data *dev, u32 index, u32 *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  142) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  143) 	u32 *buf = kmalloc(4, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  144) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  145) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  146) 	BUG_ON(!dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  147) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  148) 	if (!buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  149) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  150) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  151) 	ret = usb_control_msg(dev->udev, usb_rcvctrlpipe(dev->udev, 0),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  152) 		USB_VENDOR_REQUEST_READ_REGISTER,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  153) 		USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  154) 		00, index, buf, 4, USB_CTRL_GET_TIMEOUT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  155) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  156) 	le32_to_cpus(buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  157) 	*data = *buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  158) 	kfree(buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  159) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  160) 	if (unlikely(ret < 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  161) 		pr_warn("Failed to read register index 0x%08x\n", index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  162) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  163) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  164) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  165) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  166) /* writes a control register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  167) static int ufx_reg_write(struct ufx_data *dev, u32 index, u32 data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  168) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  169) 	u32 *buf = kmalloc(4, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  170) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  171) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  172) 	BUG_ON(!dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  173) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  174) 	if (!buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  175) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  176) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  177) 	*buf = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  178) 	cpu_to_le32s(buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  179) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  180) 	ret = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  181) 		USB_VENDOR_REQUEST_WRITE_REGISTER,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  182) 		USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  183) 		00, index, buf, 4, USB_CTRL_SET_TIMEOUT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  184) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  185) 	kfree(buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  186) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  187) 	if (unlikely(ret < 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  188) 		pr_warn("Failed to write register index 0x%08x with value "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  189) 			"0x%08x\n", index, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  190) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  191) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  192) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  193) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  194) static int ufx_reg_clear_and_set_bits(struct ufx_data *dev, u32 index,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  195) 	u32 bits_to_clear, u32 bits_to_set)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  196) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  197) 	u32 data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  198) 	int status = ufx_reg_read(dev, index, &data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  199) 	check_warn_return(status, "ufx_reg_clear_and_set_bits error reading "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  200) 		"0x%x", index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  201) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  202) 	data &= (~bits_to_clear);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  203) 	data |= bits_to_set;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  204) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  205) 	status = ufx_reg_write(dev, index, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  206) 	check_warn_return(status, "ufx_reg_clear_and_set_bits error writing "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  207) 		"0x%x", index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  208) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  209) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  210) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  211) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  212) static int ufx_reg_set_bits(struct ufx_data *dev, u32 index, u32 bits)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  213) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  214) 	return ufx_reg_clear_and_set_bits(dev, index, 0, bits);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  215) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  216) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  217) static int ufx_reg_clear_bits(struct ufx_data *dev, u32 index, u32 bits)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  218) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  219) 	return ufx_reg_clear_and_set_bits(dev, index, bits, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  220) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  221) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  222) static int ufx_lite_reset(struct ufx_data *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  223) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  224) 	int status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  225) 	u32 value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  226) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  227) 	status = ufx_reg_write(dev, 0x3008, 0x00000001);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  228) 	check_warn_return(status, "ufx_lite_reset error writing 0x3008");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  229) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  230) 	status = ufx_reg_read(dev, 0x3008, &value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  231) 	check_warn_return(status, "ufx_lite_reset error reading 0x3008");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  232) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  233) 	return (value == 0) ? 0 : -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  234) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  235) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  236) /* If display is unblanked, then blank it */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  237) static int ufx_blank(struct ufx_data *dev, bool wait)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  238) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  239) 	u32 dc_ctrl, dc_sts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  240) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  241) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  242) 	int status = ufx_reg_read(dev, 0x2004, &dc_sts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  243) 	check_warn_return(status, "ufx_blank error reading 0x2004");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  244) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  245) 	status = ufx_reg_read(dev, 0x2000, &dc_ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  246) 	check_warn_return(status, "ufx_blank error reading 0x2000");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  247) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  248) 	/* return success if display is already blanked */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  249) 	if ((dc_sts & 0x00000100) || (dc_ctrl & 0x00000100))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  250) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  251) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  252) 	/* request the DC to blank the display */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  253) 	dc_ctrl |= 0x00000100;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  254) 	status = ufx_reg_write(dev, 0x2000, dc_ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  255) 	check_warn_return(status, "ufx_blank error writing 0x2000");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  256) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  257) 	/* return success immediately if we don't have to wait */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  258) 	if (!wait)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  259) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  260) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  261) 	for (i = 0; i < 250; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  262) 		status = ufx_reg_read(dev, 0x2004, &dc_sts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  263) 		check_warn_return(status, "ufx_blank error reading 0x2004");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  264) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  265) 		if (dc_sts & 0x00000100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  266) 			return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  267) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  268) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  269) 	/* timed out waiting for display to blank */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  270) 	return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  271) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  272) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  273) /* If display is blanked, then unblank it */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  274) static int ufx_unblank(struct ufx_data *dev, bool wait)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  275) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  276) 	u32 dc_ctrl, dc_sts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  277) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  278) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  279) 	int status = ufx_reg_read(dev, 0x2004, &dc_sts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  280) 	check_warn_return(status, "ufx_unblank error reading 0x2004");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  281) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  282) 	status = ufx_reg_read(dev, 0x2000, &dc_ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  283) 	check_warn_return(status, "ufx_unblank error reading 0x2000");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  284) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  285) 	/* return success if display is already unblanked */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  286) 	if (((dc_sts & 0x00000100) == 0) || ((dc_ctrl & 0x00000100) == 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  287) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  288) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  289) 	/* request the DC to unblank the display */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  290) 	dc_ctrl &= ~0x00000100;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  291) 	status = ufx_reg_write(dev, 0x2000, dc_ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  292) 	check_warn_return(status, "ufx_unblank error writing 0x2000");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  293) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  294) 	/* return success immediately if we don't have to wait */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  295) 	if (!wait)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  296) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  297) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  298) 	for (i = 0; i < 250; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  299) 		status = ufx_reg_read(dev, 0x2004, &dc_sts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  300) 		check_warn_return(status, "ufx_unblank error reading 0x2004");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  301) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  302) 		if ((dc_sts & 0x00000100) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  303) 			return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  304) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  305) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  306) 	/* timed out waiting for display to unblank */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  307) 	return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  308) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  309) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  310) /* If display is enabled, then disable it */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  311) static int ufx_disable(struct ufx_data *dev, bool wait)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  312) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  313) 	u32 dc_ctrl, dc_sts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  314) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  315) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  316) 	int status = ufx_reg_read(dev, 0x2004, &dc_sts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  317) 	check_warn_return(status, "ufx_disable error reading 0x2004");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  318) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  319) 	status = ufx_reg_read(dev, 0x2000, &dc_ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  320) 	check_warn_return(status, "ufx_disable error reading 0x2000");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  321) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  322) 	/* return success if display is already disabled */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  323) 	if (((dc_sts & 0x00000001) == 0) || ((dc_ctrl & 0x00000001) == 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  324) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  325) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  326) 	/* request the DC to disable the display */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  327) 	dc_ctrl &= ~(0x00000001);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  328) 	status = ufx_reg_write(dev, 0x2000, dc_ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  329) 	check_warn_return(status, "ufx_disable error writing 0x2000");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  330) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  331) 	/* return success immediately if we don't have to wait */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  332) 	if (!wait)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  333) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  334) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  335) 	for (i = 0; i < 250; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  336) 		status = ufx_reg_read(dev, 0x2004, &dc_sts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  337) 		check_warn_return(status, "ufx_disable error reading 0x2004");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  338) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  339) 		if ((dc_sts & 0x00000001) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  340) 			return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  341) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  342) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  343) 	/* timed out waiting for display to disable */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  344) 	return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  345) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  346) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  347) /* If display is disabled, then enable it */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  348) static int ufx_enable(struct ufx_data *dev, bool wait)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  349) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  350) 	u32 dc_ctrl, dc_sts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  351) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  352) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  353) 	int status = ufx_reg_read(dev, 0x2004, &dc_sts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  354) 	check_warn_return(status, "ufx_enable error reading 0x2004");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  355) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  356) 	status = ufx_reg_read(dev, 0x2000, &dc_ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  357) 	check_warn_return(status, "ufx_enable error reading 0x2000");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  358) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  359) 	/* return success if display is already enabled */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  360) 	if ((dc_sts & 0x00000001) || (dc_ctrl & 0x00000001))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  361) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  362) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  363) 	/* request the DC to enable the display */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  364) 	dc_ctrl |= 0x00000001;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  365) 	status = ufx_reg_write(dev, 0x2000, dc_ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  366) 	check_warn_return(status, "ufx_enable error writing 0x2000");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  367) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  368) 	/* return success immediately if we don't have to wait */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  369) 	if (!wait)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  370) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  371) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  372) 	for (i = 0; i < 250; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  373) 		status = ufx_reg_read(dev, 0x2004, &dc_sts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  374) 		check_warn_return(status, "ufx_enable error reading 0x2004");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  375) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  376) 		if (dc_sts & 0x00000001)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  377) 			return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  378) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  379) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  380) 	/* timed out waiting for display to enable */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  381) 	return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  382) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  383) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  384) static int ufx_config_sys_clk(struct ufx_data *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  385) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  386) 	int status = ufx_reg_write(dev, 0x700C, 0x8000000F);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  387) 	check_warn_return(status, "error writing 0x700C");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  388) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  389) 	status = ufx_reg_write(dev, 0x7014, 0x0010024F);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  390) 	check_warn_return(status, "error writing 0x7014");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  391) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  392) 	status = ufx_reg_write(dev, 0x7010, 0x00000000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  393) 	check_warn_return(status, "error writing 0x7010");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  394) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  395) 	status = ufx_reg_clear_bits(dev, 0x700C, 0x0000000A);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  396) 	check_warn_return(status, "error clearing PLL1 bypass in 0x700C");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  397) 	msleep(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  398) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  399) 	status = ufx_reg_clear_bits(dev, 0x700C, 0x80000000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  400) 	check_warn_return(status, "error clearing output gate in 0x700C");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  401) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  402) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  403) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  404) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  405) static int ufx_config_ddr2(struct ufx_data *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  406) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  407) 	int status, i = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  408) 	u32 tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  409) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  410) 	status = ufx_reg_write(dev, 0x0004, 0x001F0F77);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  411) 	check_warn_return(status, "error writing 0x0004");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  412) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  413) 	status = ufx_reg_write(dev, 0x0008, 0xFFF00000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  414) 	check_warn_return(status, "error writing 0x0008");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  415) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  416) 	status = ufx_reg_write(dev, 0x000C, 0x0FFF2222);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  417) 	check_warn_return(status, "error writing 0x000C");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  418) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  419) 	status = ufx_reg_write(dev, 0x0010, 0x00030814);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  420) 	check_warn_return(status, "error writing 0x0010");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  421) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  422) 	status = ufx_reg_write(dev, 0x0014, 0x00500019);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  423) 	check_warn_return(status, "error writing 0x0014");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  424) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  425) 	status = ufx_reg_write(dev, 0x0018, 0x020D0F15);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  426) 	check_warn_return(status, "error writing 0x0018");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  427) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  428) 	status = ufx_reg_write(dev, 0x001C, 0x02532305);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  429) 	check_warn_return(status, "error writing 0x001C");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  430) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  431) 	status = ufx_reg_write(dev, 0x0020, 0x0B030905);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  432) 	check_warn_return(status, "error writing 0x0020");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  433) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  434) 	status = ufx_reg_write(dev, 0x0024, 0x00000827);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  435) 	check_warn_return(status, "error writing 0x0024");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  436) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  437) 	status = ufx_reg_write(dev, 0x0028, 0x00000000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  438) 	check_warn_return(status, "error writing 0x0028");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  439) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  440) 	status = ufx_reg_write(dev, 0x002C, 0x00000042);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  441) 	check_warn_return(status, "error writing 0x002C");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  442) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  443) 	status = ufx_reg_write(dev, 0x0030, 0x09520000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  444) 	check_warn_return(status, "error writing 0x0030");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  445) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  446) 	status = ufx_reg_write(dev, 0x0034, 0x02223314);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  447) 	check_warn_return(status, "error writing 0x0034");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  448) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  449) 	status = ufx_reg_write(dev, 0x0038, 0x00430043);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  450) 	check_warn_return(status, "error writing 0x0038");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  451) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  452) 	status = ufx_reg_write(dev, 0x003C, 0xF00F000F);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  453) 	check_warn_return(status, "error writing 0x003C");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  454) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  455) 	status = ufx_reg_write(dev, 0x0040, 0xF380F00F);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  456) 	check_warn_return(status, "error writing 0x0040");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  457) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  458) 	status = ufx_reg_write(dev, 0x0044, 0xF00F0496);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  459) 	check_warn_return(status, "error writing 0x0044");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  460) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  461) 	status = ufx_reg_write(dev, 0x0048, 0x03080406);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  462) 	check_warn_return(status, "error writing 0x0048");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  463) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  464) 	status = ufx_reg_write(dev, 0x004C, 0x00001000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  465) 	check_warn_return(status, "error writing 0x004C");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  466) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  467) 	status = ufx_reg_write(dev, 0x005C, 0x00000007);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  468) 	check_warn_return(status, "error writing 0x005C");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  469) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  470) 	status = ufx_reg_write(dev, 0x0100, 0x54F00012);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  471) 	check_warn_return(status, "error writing 0x0100");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  472) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  473) 	status = ufx_reg_write(dev, 0x0104, 0x00004012);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  474) 	check_warn_return(status, "error writing 0x0104");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  475) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  476) 	status = ufx_reg_write(dev, 0x0118, 0x40404040);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  477) 	check_warn_return(status, "error writing 0x0118");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  478) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  479) 	status = ufx_reg_write(dev, 0x0000, 0x00000001);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  480) 	check_warn_return(status, "error writing 0x0000");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  481) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  482) 	while (i++ < 500) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  483) 		status = ufx_reg_read(dev, 0x0000, &tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  484) 		check_warn_return(status, "error reading 0x0000");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  485) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  486) 		if (all_bits_set(tmp, 0xC0000000))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  487) 			return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  488) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  489) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  490) 	pr_err("DDR2 initialisation timed out, reg 0x0000=0x%08x", tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  491) 	return -ETIMEDOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  492) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  493) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  494) struct pll_values {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  495) 	u32 div_r0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  496) 	u32 div_f0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  497) 	u32 div_q0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  498) 	u32 range0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  499) 	u32 div_r1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  500) 	u32 div_f1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  501) 	u32 div_q1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  502) 	u32 range1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  503) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  504) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  505) static u32 ufx_calc_range(u32 ref_freq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  506) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  507) 	if (ref_freq >= 88000000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  508) 		return 7;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  509) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  510) 	if (ref_freq >= 54000000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  511) 		return 6;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  512) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  513) 	if (ref_freq >= 34000000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  514) 		return 5;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  515) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  516) 	if (ref_freq >= 21000000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  517) 		return 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  518) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  519) 	if (ref_freq >= 13000000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  520) 		return 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  521) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  522) 	if (ref_freq >= 8000000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  523) 		return 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  524) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  525) 	return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  526) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  527) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  528) /* calculates PLL divider settings for a desired target frequency */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  529) static void ufx_calc_pll_values(const u32 clk_pixel_pll, struct pll_values *asic_pll)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  530) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  531) 	const u32 ref_clk = 25000000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  532) 	u32 div_r0, div_f0, div_q0, div_r1, div_f1, div_q1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  533) 	u32 min_error = clk_pixel_pll;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  534) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  535) 	for (div_r0 = 1; div_r0 <= 32; div_r0++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  536) 		u32 ref_freq0 = ref_clk / div_r0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  537) 		if (ref_freq0 < 5000000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  538) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  539) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  540) 		if (ref_freq0 > 200000000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  541) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  542) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  543) 		for (div_f0 = 1; div_f0 <= 256; div_f0++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  544) 			u32 vco_freq0 = ref_freq0 * div_f0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  545) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  546) 			if (vco_freq0 < 350000000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  547) 				continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  548) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  549) 			if (vco_freq0 > 700000000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  550) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  551) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  552) 			for (div_q0 = 0; div_q0 < 7; div_q0++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  553) 				u32 pllout_freq0 = vco_freq0 / (1 << div_q0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  554) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  555) 				if (pllout_freq0 < 5000000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  556) 					break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  557) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  558) 				if (pllout_freq0 > 200000000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  559) 					continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  560) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  561) 				for (div_r1 = 1; div_r1 <= 32; div_r1++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  562) 					u32 ref_freq1 = pllout_freq0 / div_r1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  563) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  564) 					if (ref_freq1 < 5000000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  565) 						break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  566) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  567) 					for (div_f1 = 1; div_f1 <= 256; div_f1++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  568) 						u32 vco_freq1 = ref_freq1 * div_f1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  569) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  570) 						if (vco_freq1 < 350000000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  571) 							continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  572) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  573) 						if (vco_freq1 > 700000000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  574) 							break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  575) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  576) 						for (div_q1 = 0; div_q1 < 7; div_q1++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  577) 							u32 pllout_freq1 = vco_freq1 / (1 << div_q1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  578) 							int error = abs(pllout_freq1 - clk_pixel_pll);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  579) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  580) 							if (pllout_freq1 < 5000000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  581) 								break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  582) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  583) 							if (pllout_freq1 > 700000000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  584) 								continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  585) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  586) 							if (error < min_error) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  587) 								min_error = error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  588) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  589) 								/* final returned value is equal to calculated value - 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  590) 								 * because a value of 0 = divide by 1 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  591) 								asic_pll->div_r0 = div_r0 - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  592) 								asic_pll->div_f0 = div_f0 - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  593) 								asic_pll->div_q0 = div_q0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  594) 								asic_pll->div_r1 = div_r1 - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  595) 								asic_pll->div_f1 = div_f1 - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  596) 								asic_pll->div_q1 = div_q1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  597) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  598) 								asic_pll->range0 = ufx_calc_range(ref_freq0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  599) 								asic_pll->range1 = ufx_calc_range(ref_freq1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  600) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  601) 								if (min_error == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  602) 									return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  603) 							}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  604) 						}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  605) 					}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  606) 				}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  607) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  608) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  609) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  610) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  611) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  612) /* sets analog bit PLL configuration values */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  613) static int ufx_config_pix_clk(struct ufx_data *dev, u32 pixclock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  614) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  615) 	struct pll_values asic_pll = {0};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  616) 	u32 value, clk_pixel, clk_pixel_pll;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  617) 	int status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  618) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  619) 	/* convert pixclock (in ps) to frequency (in Hz) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  620) 	clk_pixel = PICOS2KHZ(pixclock) * 1000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  621) 	pr_debug("pixclock %d ps = clk_pixel %d Hz", pixclock, clk_pixel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  622) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  623) 	/* clk_pixel = 1/2 clk_pixel_pll */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  624) 	clk_pixel_pll = clk_pixel * 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  625) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  626) 	ufx_calc_pll_values(clk_pixel_pll, &asic_pll);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  627) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  628) 	/* Keep BYPASS and RESET signals asserted until configured */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  629) 	status = ufx_reg_write(dev, 0x7000, 0x8000000F);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  630) 	check_warn_return(status, "error writing 0x7000");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  631) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  632) 	value = (asic_pll.div_f1 | (asic_pll.div_r1 << 8) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  633) 		(asic_pll.div_q1 << 16) | (asic_pll.range1 << 20));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  634) 	status = ufx_reg_write(dev, 0x7008, value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  635) 	check_warn_return(status, "error writing 0x7008");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  636) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  637) 	value = (asic_pll.div_f0 | (asic_pll.div_r0 << 8) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  638) 		(asic_pll.div_q0 << 16) | (asic_pll.range0 << 20));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  639) 	status = ufx_reg_write(dev, 0x7004, value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  640) 	check_warn_return(status, "error writing 0x7004");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  641) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  642) 	status = ufx_reg_clear_bits(dev, 0x7000, 0x00000005);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  643) 	check_warn_return(status,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  644) 		"error clearing PLL0 bypass bits in 0x7000");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  645) 	msleep(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  646) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  647) 	status = ufx_reg_clear_bits(dev, 0x7000, 0x0000000A);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  648) 	check_warn_return(status,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  649) 		"error clearing PLL1 bypass bits in 0x7000");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  650) 	msleep(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  651) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  652) 	status = ufx_reg_clear_bits(dev, 0x7000, 0x80000000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  653) 	check_warn_return(status, "error clearing gate bits in 0x7000");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  654) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  655) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  656) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  657) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  658) static int ufx_set_vid_mode(struct ufx_data *dev, struct fb_var_screeninfo *var)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  659) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  660) 	u32 temp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  661) 	u16 h_total, h_active, h_blank_start, h_blank_end, h_sync_start, h_sync_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  662) 	u16 v_total, v_active, v_blank_start, v_blank_end, v_sync_start, v_sync_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  663) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  664) 	int status = ufx_reg_write(dev, 0x8028, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  665) 	check_warn_return(status, "ufx_set_vid_mode error disabling RGB pad");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  666) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  667) 	status = ufx_reg_write(dev, 0x8024, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  668) 	check_warn_return(status, "ufx_set_vid_mode error disabling VDAC");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  669) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  670) 	/* shut everything down before changing timing */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  671) 	status = ufx_blank(dev, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  672) 	check_warn_return(status, "ufx_set_vid_mode error blanking display");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  673) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  674) 	status = ufx_disable(dev, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  675) 	check_warn_return(status, "ufx_set_vid_mode error disabling display");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  676) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  677) 	status = ufx_config_pix_clk(dev, var->pixclock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  678) 	check_warn_return(status, "ufx_set_vid_mode error configuring pixclock");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  679) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  680) 	status = ufx_reg_write(dev, 0x2000, 0x00000104);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  681) 	check_warn_return(status, "ufx_set_vid_mode error writing 0x2000");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  682) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  683) 	/* set horizontal timings */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  684) 	h_total = var->xres + var->right_margin + var->hsync_len + var->left_margin;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  685) 	h_active = var->xres;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  686) 	h_blank_start = var->xres + var->right_margin;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  687) 	h_blank_end = var->xres + var->right_margin + var->hsync_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  688) 	h_sync_start = var->xres + var->right_margin;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  689) 	h_sync_end = var->xres + var->right_margin + var->hsync_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  690) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  691) 	temp = ((h_total - 1) << 16) | (h_active - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  692) 	status = ufx_reg_write(dev, 0x2008, temp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  693) 	check_warn_return(status, "ufx_set_vid_mode error writing 0x2008");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  694) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  695) 	temp = ((h_blank_start - 1) << 16) | (h_blank_end - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  696) 	status = ufx_reg_write(dev, 0x200C, temp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  697) 	check_warn_return(status, "ufx_set_vid_mode error writing 0x200C");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  698) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  699) 	temp = ((h_sync_start - 1) << 16) | (h_sync_end - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  700) 	status = ufx_reg_write(dev, 0x2010, temp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  701) 	check_warn_return(status, "ufx_set_vid_mode error writing 0x2010");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  702) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  703) 	/* set vertical timings */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  704) 	v_total = var->upper_margin + var->yres + var->lower_margin + var->vsync_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  705) 	v_active = var->yres;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  706) 	v_blank_start = var->yres + var->lower_margin;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  707) 	v_blank_end = var->yres + var->lower_margin + var->vsync_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  708) 	v_sync_start = var->yres + var->lower_margin;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  709) 	v_sync_end = var->yres + var->lower_margin + var->vsync_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  710) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  711) 	temp = ((v_total - 1) << 16) | (v_active - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  712) 	status = ufx_reg_write(dev, 0x2014, temp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  713) 	check_warn_return(status, "ufx_set_vid_mode error writing 0x2014");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  714) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  715) 	temp = ((v_blank_start - 1) << 16) | (v_blank_end - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  716) 	status = ufx_reg_write(dev, 0x2018, temp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  717) 	check_warn_return(status, "ufx_set_vid_mode error writing 0x2018");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  718) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  719) 	temp = ((v_sync_start - 1) << 16) | (v_sync_end - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  720) 	status = ufx_reg_write(dev, 0x201C, temp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  721) 	check_warn_return(status, "ufx_set_vid_mode error writing 0x201C");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  722) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  723) 	status = ufx_reg_write(dev, 0x2020, 0x00000000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  724) 	check_warn_return(status, "ufx_set_vid_mode error writing 0x2020");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  725) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  726) 	status = ufx_reg_write(dev, 0x2024, 0x00000000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  727) 	check_warn_return(status, "ufx_set_vid_mode error writing 0x2024");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  728) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  729) 	/* Set the frame length register (#pix * 2 bytes/pixel) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  730) 	temp = var->xres * var->yres * 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  731) 	temp = (temp + 7) & (~0x7);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  732) 	status = ufx_reg_write(dev, 0x2028, temp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  733) 	check_warn_return(status, "ufx_set_vid_mode error writing 0x2028");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  734) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  735) 	/* enable desired output interface & disable others */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  736) 	status = ufx_reg_write(dev, 0x2040, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  737) 	check_warn_return(status, "ufx_set_vid_mode error writing 0x2040");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  738) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  739) 	status = ufx_reg_write(dev, 0x2044, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  740) 	check_warn_return(status, "ufx_set_vid_mode error writing 0x2044");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  741) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  742) 	status = ufx_reg_write(dev, 0x2048, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  743) 	check_warn_return(status, "ufx_set_vid_mode error writing 0x2048");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  744) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  745) 	/* set the sync polarities & enable bit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  746) 	temp = 0x00000001;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  747) 	if (var->sync & FB_SYNC_HOR_HIGH_ACT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  748) 		temp |= 0x00000010;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  749) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  750) 	if (var->sync & FB_SYNC_VERT_HIGH_ACT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  751) 		temp |= 0x00000008;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  752) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  753) 	status = ufx_reg_write(dev, 0x2040, temp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  754) 	check_warn_return(status, "ufx_set_vid_mode error writing 0x2040");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  755) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  756) 	/* start everything back up */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  757) 	status = ufx_enable(dev, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  758) 	check_warn_return(status, "ufx_set_vid_mode error enabling display");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  759) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  760) 	/* Unblank the display */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  761) 	status = ufx_unblank(dev, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  762) 	check_warn_return(status, "ufx_set_vid_mode error unblanking display");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  763) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  764) 	/* enable RGB pad */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  765) 	status = ufx_reg_write(dev, 0x8028, 0x00000003);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  766) 	check_warn_return(status, "ufx_set_vid_mode error enabling RGB pad");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  767) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  768) 	/* enable VDAC */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  769) 	status = ufx_reg_write(dev, 0x8024, 0x00000007);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  770) 	check_warn_return(status, "ufx_set_vid_mode error enabling VDAC");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  771) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  772) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  773) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  774) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  775) static int ufx_ops_mmap(struct fb_info *info, struct vm_area_struct *vma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  776) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  777) 	unsigned long start = vma->vm_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  778) 	unsigned long size = vma->vm_end - vma->vm_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  779) 	unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  780) 	unsigned long page, pos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  781) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  782) 	if (vma->vm_pgoff > (~0UL >> PAGE_SHIFT))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  783) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  784) 	if (size > info->fix.smem_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  785) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  786) 	if (offset > info->fix.smem_len - size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  787) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  788) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  789) 	pos = (unsigned long)info->fix.smem_start + offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  790) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  791) 	pr_debug("mmap() framebuffer addr:%lu size:%lu\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  792) 		  pos, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  793) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  794) 	while (size > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  795) 		page = vmalloc_to_pfn((void *)pos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  796) 		if (remap_pfn_range(vma, start, page, PAGE_SIZE, PAGE_SHARED))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  797) 			return -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  798) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  799) 		start += PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  800) 		pos += PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  801) 		if (size > PAGE_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  802) 			size -= PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  803) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  804) 			size = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  805) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  806) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  807) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  808) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  809) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  810) static void ufx_raw_rect(struct ufx_data *dev, u16 *cmd, int x, int y,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  811) 	int width, int height)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  812) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  813) 	size_t packed_line_len = ALIGN((width * 2), 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  814) 	size_t packed_rect_len = packed_line_len * height;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  815) 	int line;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  816) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  817) 	BUG_ON(!dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  818) 	BUG_ON(!dev->info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  819) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  820) 	/* command word */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  821) 	*((u32 *)&cmd[0]) = cpu_to_le32(0x01);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  822) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  823) 	/* length word */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  824) 	*((u32 *)&cmd[2]) = cpu_to_le32(packed_rect_len + 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  825) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  826) 	cmd[4] = cpu_to_le16(x);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  827) 	cmd[5] = cpu_to_le16(y);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  828) 	cmd[6] = cpu_to_le16(width);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  829) 	cmd[7] = cpu_to_le16(height);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  830) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  831) 	/* frame base address */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  832) 	*((u32 *)&cmd[8]) = cpu_to_le32(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  833) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  834) 	/* color mode and horizontal resolution */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  835) 	cmd[10] = cpu_to_le16(0x4000 | dev->info->var.xres);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  836) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  837) 	/* vertical resolution */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  838) 	cmd[11] = cpu_to_le16(dev->info->var.yres);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  839) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  840) 	/* packed data */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  841) 	for (line = 0; line < height; line++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  842) 		const int line_offset = dev->info->fix.line_length * (y + line);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  843) 		const int byte_offset = line_offset + (x * BPP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  844) 		memcpy(&cmd[(24 + (packed_line_len * line)) / 2],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  845) 			(char *)dev->info->fix.smem_start + byte_offset, width * BPP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  846) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  847) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  848) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  849) static int ufx_handle_damage(struct ufx_data *dev, int x, int y,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  850) 	int width, int height)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  851) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  852) 	size_t packed_line_len = ALIGN((width * 2), 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  853) 	int len, status, urb_lines, start_line = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  854) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  855) 	if ((width <= 0) || (height <= 0) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  856) 	    (x + width > dev->info->var.xres) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  857) 	    (y + height > dev->info->var.yres))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  858) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  859) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  860) 	if (!atomic_read(&dev->usb_active))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  861) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  862) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  863) 	while (start_line < height) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  864) 		struct urb *urb = ufx_get_urb(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  865) 		if (!urb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  866) 			pr_warn("ufx_handle_damage unable to get urb");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  867) 			return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  868) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  869) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  870) 		/* assume we have enough space to transfer at least one line */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  871) 		BUG_ON(urb->transfer_buffer_length < (24 + (width * 2)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  872) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  873) 		/* calculate the maximum number of lines we could fit in */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  874) 		urb_lines = (urb->transfer_buffer_length - 24) / packed_line_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  875) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  876) 		/* but we might not need this many */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  877) 		urb_lines = min(urb_lines, (height - start_line));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  878) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  879) 		memset(urb->transfer_buffer, 0, urb->transfer_buffer_length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  880) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  881) 		ufx_raw_rect(dev, urb->transfer_buffer, x, (y + start_line), width, urb_lines);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  882) 		len = 24 + (packed_line_len * urb_lines);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  883) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  884) 		status = ufx_submit_urb(dev, urb, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  885) 		check_warn_return(status, "Error submitting URB");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  886) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  887) 		start_line += urb_lines;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  888) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  889) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  890) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  891) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  892) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  893) /* Path triggered by usermode clients who write to filesystem
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  894)  * e.g. cat filename > /dev/fb1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  895)  * Not used by X Windows or text-mode console. But useful for testing.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  896)  * Slow because of extra copy and we must assume all pixels dirty. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  897) static ssize_t ufx_ops_write(struct fb_info *info, const char __user *buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  898) 			  size_t count, loff_t *ppos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  899) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  900) 	ssize_t result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  901) 	struct ufx_data *dev = info->par;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  902) 	u32 offset = (u32) *ppos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  903) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  904) 	result = fb_sys_write(info, buf, count, ppos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  905) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  906) 	if (result > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  907) 		int start = max((int)(offset / info->fix.line_length), 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  908) 		int lines = min((u32)((result / info->fix.line_length) + 1),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  909) 				(u32)info->var.yres);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  910) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  911) 		ufx_handle_damage(dev, 0, start, info->var.xres, lines);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  912) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  913) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  914) 	return result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  915) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  916) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  917) static void ufx_ops_copyarea(struct fb_info *info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  918) 				const struct fb_copyarea *area)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  919) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  920) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  921) 	struct ufx_data *dev = info->par;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  922) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  923) 	sys_copyarea(info, area);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  924) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  925) 	ufx_handle_damage(dev, area->dx, area->dy,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  926) 			area->width, area->height);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  927) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  928) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  929) static void ufx_ops_imageblit(struct fb_info *info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  930) 				const struct fb_image *image)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  931) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  932) 	struct ufx_data *dev = info->par;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  933) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  934) 	sys_imageblit(info, image);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  935) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  936) 	ufx_handle_damage(dev, image->dx, image->dy,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  937) 			image->width, image->height);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  938) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  939) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  940) static void ufx_ops_fillrect(struct fb_info *info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  941) 			  const struct fb_fillrect *rect)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  942) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  943) 	struct ufx_data *dev = info->par;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  944) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  945) 	sys_fillrect(info, rect);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  946) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  947) 	ufx_handle_damage(dev, rect->dx, rect->dy, rect->width,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  948) 			      rect->height);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  949) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  950) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  951) /* NOTE: fb_defio.c is holding info->fbdefio.mutex
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  952)  *   Touching ANY framebuffer memory that triggers a page fault
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  953)  *   in fb_defio will cause a deadlock, when it also tries to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  954)  *   grab the same mutex. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  955) static void ufx_dpy_deferred_io(struct fb_info *info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  956) 				struct list_head *pagelist)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  957) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  958) 	struct page *cur;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  959) 	struct fb_deferred_io *fbdefio = info->fbdefio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  960) 	struct ufx_data *dev = info->par;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  961) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  962) 	if (!fb_defio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  963) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  964) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  965) 	if (!atomic_read(&dev->usb_active))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  966) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  967) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  968) 	/* walk the written page list and render each to device */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  969) 	list_for_each_entry(cur, &fbdefio->pagelist, lru) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  970) 		/* create a rectangle of full screen width that encloses the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  971) 		 * entire dirty framebuffer page */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  972) 		const int x = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  973) 		const int width = dev->info->var.xres;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  974) 		const int y = (cur->index << PAGE_SHIFT) / (width * 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  975) 		int height = (PAGE_SIZE / (width * 2)) + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  976) 		height = min(height, (int)(dev->info->var.yres - y));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  977) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  978) 		BUG_ON(y >= dev->info->var.yres);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  979) 		BUG_ON((y + height) > dev->info->var.yres);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  980) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  981) 		ufx_handle_damage(dev, x, y, width, height);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  982) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  983) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  984) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  985) static int ufx_ops_ioctl(struct fb_info *info, unsigned int cmd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  986) 			 unsigned long arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  987) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  988) 	struct ufx_data *dev = info->par;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  989) 	struct dloarea *area = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  990) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  991) 	if (!atomic_read(&dev->usb_active))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  992) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  993) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  994) 	/* TODO: Update X server to get this from sysfs instead */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  995) 	if (cmd == UFX_IOCTL_RETURN_EDID) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  996) 		u8 __user *edid = (u8 __user *)arg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  997) 		if (copy_to_user(edid, dev->edid, dev->edid_size))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  998) 			return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  999) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) 	/* TODO: Help propose a standard fb.h ioctl to report mmap damage */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) 	if (cmd == UFX_IOCTL_REPORT_DAMAGE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) 		/* If we have a damage-aware client, turn fb_defio "off"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) 		 * To avoid perf imact of unnecessary page fault handling.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) 		 * Done by resetting the delay for this fb_info to a very
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) 		 * long period. Pages will become writable and stay that way.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) 		 * Reset to normal value when all clients have closed this fb.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) 		if (info->fbdefio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) 			info->fbdefio->delay = UFX_DEFIO_WRITE_DISABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) 		area = (struct dloarea *)arg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) 		if (area->x < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) 			area->x = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) 		if (area->x > info->var.xres)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) 			area->x = info->var.xres;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) 		if (area->y < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) 			area->y = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) 		if (area->y > info->var.yres)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) 			area->y = info->var.yres;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) 		ufx_handle_damage(dev, area->x, area->y, area->w, area->h);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) /* taken from vesafb */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) ufx_ops_setcolreg(unsigned regno, unsigned red, unsigned green,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) 	       unsigned blue, unsigned transp, struct fb_info *info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) 	int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) 	if (regno >= info->cmap.len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) 		return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) 	if (regno < 16) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) 		if (info->var.red.offset == 10) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) 			/* 1:5:5:5 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) 			((u32 *) (info->pseudo_palette))[regno] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) 			    ((red & 0xf800) >> 1) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) 			    ((green & 0xf800) >> 6) | ((blue & 0xf800) >> 11);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) 			/* 0:5:6:5 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) 			((u32 *) (info->pseudo_palette))[regno] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) 			    ((red & 0xf800)) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) 			    ((green & 0xfc00) >> 5) | ((blue & 0xf800) >> 11);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) /* It's common for several clients to have framebuffer open simultaneously.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061)  * e.g. both fbcon and X. Makes things interesting.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062)  * Assumes caller is holding info->lock (for open and release at least) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) static int ufx_ops_open(struct fb_info *info, int user)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) 	struct ufx_data *dev = info->par;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) 	/* fbcon aggressively connects to first framebuffer it finds,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) 	 * preventing other clients (X) from working properly. Usually
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) 	 * not what the user wants. Fail by default with option to enable. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) 	if (user == 0 && !console)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) 		return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) 	/* If the USB device is gone, we don't accept new opens */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) 	if (dev->virtualized)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) 		return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) 	dev->fb_count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) 	kref_get(&dev->kref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) 	if (fb_defio && (info->fbdefio == NULL)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) 		/* enable defio at last moment if not disabled by client */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) 		struct fb_deferred_io *fbdefio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) 		fbdefio = kzalloc(sizeof(*fbdefio), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) 		if (fbdefio) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) 			fbdefio->delay = UFX_DEFIO_WRITE_DELAY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) 			fbdefio->deferred_io = ufx_dpy_deferred_io;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) 		info->fbdefio = fbdefio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) 		fb_deferred_io_init(info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) 	pr_debug("open /dev/fb%d user=%d fb_info=%p count=%d",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) 		info->node, user, info, dev->fb_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103)  * Called when all client interfaces to start transactions have been disabled,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104)  * and all references to our device instance (ufx_data) are released.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105)  * Every transaction must have a reference, so we know are fully spun down
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) static void ufx_free(struct kref *kref)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) 	struct ufx_data *dev = container_of(kref, struct ufx_data, kref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) 	/* this function will wait for all in-flight urbs to complete */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) 	if (dev->urbs.count > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) 		ufx_free_urb_list(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) 	pr_debug("freeing ufx_data %p", dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) 	kfree(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) static void ufx_release_urb_work(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) 	struct urb_node *unode = container_of(work, struct urb_node,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) 					      release_urb_work.work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) 	up(&unode->dev->urbs.limit_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) static void ufx_free_framebuffer_work(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) 	struct ufx_data *dev = container_of(work, struct ufx_data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) 					    free_framebuffer_work.work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) 	struct fb_info *info = dev->info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) 	int node = info->node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) 	unregister_framebuffer(info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) 	if (info->cmap.len != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) 		fb_dealloc_cmap(&info->cmap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) 	if (info->monspecs.modedb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) 		fb_destroy_modedb(info->monspecs.modedb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) 	vfree(info->screen_base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) 	fb_destroy_modelist(&info->modelist);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) 	dev->info = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) 	/* Assume info structure is freed after this point */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) 	framebuffer_release(info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) 	pr_debug("fb_info for /dev/fb%d has been freed", node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) 	/* ref taken in probe() as part of registering framebfufer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) 	kref_put(&dev->kref, ufx_free);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157)  * Assumes caller is holding info->lock mutex (for open and release at least)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) static int ufx_ops_release(struct fb_info *info, int user)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) 	struct ufx_data *dev = info->par;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) 	dev->fb_count--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) 	/* We can't free fb_info here - fbmem will touch it when we return */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) 	if (dev->virtualized && (dev->fb_count == 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) 		schedule_delayed_work(&dev->free_framebuffer_work, HZ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) 	if ((dev->fb_count == 0) && (info->fbdefio)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) 		fb_deferred_io_cleanup(info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) 		kfree(info->fbdefio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) 		info->fbdefio = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) 	pr_debug("released /dev/fb%d user=%d count=%d",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) 		  info->node, user, dev->fb_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) 	kref_put(&dev->kref, ufx_free);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) /* Check whether a video mode is supported by the chip
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184)  * We start from monitor's modes, so don't need to filter that here */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) static int ufx_is_valid_mode(struct fb_videomode *mode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) 		struct fb_info *info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) 	if ((mode->xres * mode->yres) > (2048 * 1152)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) 		pr_debug("%dx%d too many pixels",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) 		       mode->xres, mode->yres);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) 	if (mode->pixclock < 5000) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) 		pr_debug("%dx%d %dps pixel clock too fast",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) 		       mode->xres, mode->yres, mode->pixclock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) 	pr_debug("%dx%d (pixclk %dps %dMHz) valid mode", mode->xres, mode->yres,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) 		mode->pixclock, (1000000 / mode->pixclock));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) 	return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) static void ufx_var_color_format(struct fb_var_screeninfo *var)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) 	const struct fb_bitfield red = { 11, 5, 0 };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) 	const struct fb_bitfield green = { 5, 6, 0 };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) 	const struct fb_bitfield blue = { 0, 5, 0 };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) 	var->bits_per_pixel = 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) 	var->red = red;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) 	var->green = green;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) 	var->blue = blue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) static int ufx_ops_check_var(struct fb_var_screeninfo *var,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) 				struct fb_info *info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) 	struct fb_videomode mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) 	/* TODO: support dynamically changing framebuffer size */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) 	if ((var->xres * var->yres * 2) > info->fix.smem_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) 	/* set device-specific elements of var unrelated to mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) 	ufx_var_color_format(var);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) 	fb_var_to_videomode(&mode, var);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) 	if (!ufx_is_valid_mode(&mode, info))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) static int ufx_ops_set_par(struct fb_info *info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) 	struct ufx_data *dev = info->par;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) 	int result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) 	u16 *pix_framebuffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) 	pr_debug("set_par mode %dx%d", info->var.xres, info->var.yres);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) 	result = ufx_set_vid_mode(dev, &info->var);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) 	if ((result == 0) && (dev->fb_count == 0)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) 		/* paint greenscreen */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) 		pix_framebuffer = (u16 *) info->screen_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) 		for (i = 0; i < info->fix.smem_len / 2; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) 			pix_framebuffer[i] = 0x37e6;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) 		ufx_handle_damage(dev, 0, 0, info->var.xres, info->var.yres);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) 	/* re-enable defio if previously disabled by damage tracking */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) 	if (info->fbdefio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) 		info->fbdefio->delay = UFX_DEFIO_WRITE_DELAY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) 	return result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) /* In order to come back from full DPMS off, we need to set the mode again */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) static int ufx_ops_blank(int blank_mode, struct fb_info *info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) 	struct ufx_data *dev = info->par;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) 	ufx_set_vid_mode(dev, &info->var);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) static const struct fb_ops ufx_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) 	.owner = THIS_MODULE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) 	.fb_read = fb_sys_read,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) 	.fb_write = ufx_ops_write,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) 	.fb_setcolreg = ufx_ops_setcolreg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) 	.fb_fillrect = ufx_ops_fillrect,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) 	.fb_copyarea = ufx_ops_copyarea,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) 	.fb_imageblit = ufx_ops_imageblit,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) 	.fb_mmap = ufx_ops_mmap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) 	.fb_ioctl = ufx_ops_ioctl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) 	.fb_open = ufx_ops_open,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) 	.fb_release = ufx_ops_release,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) 	.fb_blank = ufx_ops_blank,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) 	.fb_check_var = ufx_ops_check_var,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) 	.fb_set_par = ufx_ops_set_par,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) /* Assumes &info->lock held by caller
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289)  * Assumes no active clients have framebuffer open */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) static int ufx_realloc_framebuffer(struct ufx_data *dev, struct fb_info *info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) 	int old_len = info->fix.smem_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) 	int new_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) 	unsigned char *old_fb = info->screen_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) 	unsigned char *new_fb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) 	pr_debug("Reallocating framebuffer. Addresses will change!");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) 	new_len = info->fix.line_length * info->var.yres;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) 	if (PAGE_ALIGN(new_len) > old_len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) 		 * Alloc system memory for virtual framebuffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) 		new_fb = vmalloc(new_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) 		if (!new_fb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) 			return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) 		if (info->screen_base) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) 			memcpy(new_fb, old_fb, old_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) 			vfree(info->screen_base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) 		info->screen_base = new_fb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) 		info->fix.smem_len = PAGE_ALIGN(new_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) 		info->fix.smem_start = (unsigned long) new_fb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) 		info->flags = smscufx_info_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) /* sets up I2C Controller for 100 Kbps, std. speed, 7-bit addr, master,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323)  * restart enabled, but no start byte, enable controller */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) static int ufx_i2c_init(struct ufx_data *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) 	u32 tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) 	/* disable the controller before it can be reprogrammed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) 	int status = ufx_reg_write(dev, 0x106C, 0x00);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) 	check_warn_return(status, "failed to disable I2C");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) 	/* Setup the clock count registers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) 	 * (12+1) = 13 clks @ 2.5 MHz = 5.2 uS */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) 	status = ufx_reg_write(dev, 0x1018, 12);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) 	check_warn_return(status, "error writing 0x1018");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) 	/* (6+8) = 14 clks @ 2.5 MHz = 5.6 uS */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) 	status = ufx_reg_write(dev, 0x1014, 6);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) 	check_warn_return(status, "error writing 0x1014");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) 	status = ufx_reg_read(dev, 0x1000, &tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) 	check_warn_return(status, "error reading 0x1000");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) 	/* set speed to std mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) 	tmp &= ~(0x06);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) 	tmp |= 0x02;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) 	/* 7-bit (not 10-bit) addressing */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) 	tmp &= ~(0x10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) 	/* enable restart conditions and master mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) 	tmp |= 0x21;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) 	status = ufx_reg_write(dev, 0x1000, tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) 	check_warn_return(status, "error writing 0x1000");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) 	/* Set normal tx using target address 0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) 	status = ufx_reg_clear_and_set_bits(dev, 0x1004, 0xC00, 0x000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) 	check_warn_return(status, "error setting TX mode bits in 0x1004");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) 	/* Enable the controller */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) 	status = ufx_reg_write(dev, 0x106C, 0x01);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) 	check_warn_return(status, "failed to enable I2C");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) /* sets the I2C port mux and target address */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) static int ufx_i2c_configure(struct ufx_data *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) 	int status = ufx_reg_write(dev, 0x106C, 0x00);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) 	check_warn_return(status, "failed to disable I2C");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) 	status = ufx_reg_write(dev, 0x3010, 0x00000000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) 	check_warn_return(status, "failed to write 0x3010");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) 	/* A0h is std for any EDID, right shifted by one */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) 	status = ufx_reg_clear_and_set_bits(dev, 0x1004, 0x3FF,	(0xA0 >> 1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) 	check_warn_return(status, "failed to set TAR bits in 0x1004");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) 	status = ufx_reg_write(dev, 0x106C, 0x01);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) 	check_warn_return(status, "failed to enable I2C");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) /* wait for BUSY to clear, with a timeout of 50ms with 10ms sleeps. if no
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388)  * monitor is connected, there is no error except for timeout */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) static int ufx_i2c_wait_busy(struct ufx_data *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) 	u32 tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) 	int i, status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) 	for (i = 0; i < 15; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) 		status = ufx_reg_read(dev, 0x1100, &tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) 		check_warn_return(status, "0x1100 read failed");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) 		/* if BUSY is clear, check for error */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) 		if ((tmp & 0x80000000) == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) 			if (tmp & 0x20000000) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) 				pr_warn("I2C read failed, 0x1100=0x%08x", tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) 				return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) 			return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) 		/* perform the first 10 retries without delay */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) 		if (i >= 10)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) 			msleep(10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) 	pr_warn("I2C access timed out, resetting I2C hardware");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) 	status =  ufx_reg_write(dev, 0x1100, 0x40000000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) 	check_warn_return(status, "0x1100 write failed");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) 	return -ETIMEDOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) /* reads a 128-byte EDID block from the currently selected port and TAR */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) static int ufx_read_edid(struct ufx_data *dev, u8 *edid, int edid_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) 	int i, j, status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) 	u32 *edid_u32 = (u32 *)edid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) 	BUG_ON(edid_len != EDID_LENGTH);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) 	status = ufx_i2c_configure(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) 	if (status < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) 		pr_err("ufx_i2c_configure failed");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) 		return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) 	memset(edid, 0xff, EDID_LENGTH);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) 	/* Read the 128-byte EDID as 2 bursts of 64 bytes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) 	for (i = 0; i < 2; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) 		u32 temp = 0x28070000 | (63 << 20) | (((u32)(i * 64)) << 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) 		status = ufx_reg_write(dev, 0x1100, temp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) 		check_warn_return(status, "Failed to write 0x1100");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) 		temp |= 0x80000000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) 		status = ufx_reg_write(dev, 0x1100, temp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) 		check_warn_return(status, "Failed to write 0x1100");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) 		status = ufx_i2c_wait_busy(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) 		check_warn_return(status, "Timeout waiting for I2C BUSY to clear");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) 		for (j = 0; j < 16; j++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) 			u32 data_reg_addr = 0x1110 + (j * 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) 			status = ufx_reg_read(dev, data_reg_addr, edid_u32++);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) 			check_warn_return(status, "Error reading i2c data");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) 	/* all FF's in the first 16 bytes indicates nothing is connected */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) 	for (i = 0; i < 16; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) 		if (edid[i] != 0xFF) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) 			pr_debug("edid data read successfully");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) 			return EDID_LENGTH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) 	pr_warn("edid data contains all 0xff");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) 	return -ETIMEDOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) /* 1) use sw default
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469)  * 2) Parse into various fb_info structs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470)  * 3) Allocate virtual framebuffer memory to back highest res mode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472)  * Parses EDID into three places used by various parts of fbdev:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473)  * fb_var_screeninfo contains the timing of the monitor's preferred mode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474)  * fb_info.monspecs is full parsed EDID info, including monspecs.modedb
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475)  * fb_info.modelist is a linked list of all monitor & VESA modes which work
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477)  * If EDID is not readable/valid, then modelist is all VESA modes,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478)  * monspecs is NULL, and fb_var_screeninfo is set to safe VESA mode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479)  * Returns 0 if successful */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) static int ufx_setup_modes(struct ufx_data *dev, struct fb_info *info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) 	char *default_edid, size_t default_edid_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) 	const struct fb_videomode *default_vmode = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) 	u8 *edid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) 	int i, result = 0, tries = 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) 	if (info->dev) /* only use mutex if info has been registered */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) 		mutex_lock(&info->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) 	edid = kmalloc(EDID_LENGTH, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) 	if (!edid) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) 		result = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) 		goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) 	fb_destroy_modelist(&info->modelist);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) 	memset(&info->monspecs, 0, sizeof(info->monspecs));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) 	/* Try to (re)read EDID from hardware first
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) 	 * EDID data may return, but not parse as valid
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) 	 * Try again a few times, in case of e.g. analog cable noise */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) 	while (tries--) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) 		i = ufx_read_edid(dev, edid, EDID_LENGTH);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) 		if (i >= EDID_LENGTH)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) 			fb_edid_to_monspecs(edid, &info->monspecs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) 		if (info->monspecs.modedb_len > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) 			dev->edid = edid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) 			dev->edid_size = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) 	/* If that fails, use a previously returned EDID if available */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) 	if (info->monspecs.modedb_len == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) 		pr_err("Unable to get valid EDID from device/display\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) 		if (dev->edid) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) 			fb_edid_to_monspecs(dev->edid, &info->monspecs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) 			if (info->monspecs.modedb_len > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) 				pr_err("Using previously queried EDID\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) 	/* If that fails, use the default EDID we were handed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) 	if (info->monspecs.modedb_len == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) 		if (default_edid_size >= EDID_LENGTH) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529) 			fb_edid_to_monspecs(default_edid, &info->monspecs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530) 			if (info->monspecs.modedb_len > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) 				memcpy(edid, default_edid, default_edid_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) 				dev->edid = edid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) 				dev->edid_size = default_edid_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534) 				pr_err("Using default/backup EDID\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539) 	/* If we've got modes, let's pick a best default mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) 	if (info->monspecs.modedb_len > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542) 		for (i = 0; i < info->monspecs.modedb_len; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) 			if (ufx_is_valid_mode(&info->monspecs.modedb[i], info))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544) 				fb_add_videomode(&info->monspecs.modedb[i],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) 					&info->modelist);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546) 			else /* if we've removed top/best mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547) 				info->monspecs.misc &= ~FB_MISC_1ST_DETAIL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550) 		default_vmode = fb_find_best_display(&info->monspecs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) 						     &info->modelist);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554) 	/* If everything else has failed, fall back to safe default mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555) 	if (default_vmode == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) 		struct fb_videomode fb_vmode = {0};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559) 		/* Add the standard VESA modes to our modelist
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560) 		 * Since we don't have EDID, there may be modes that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561) 		 * overspec monitor and/or are incorrect aspect ratio, etc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562) 		 * But at least the user has a chance to choose
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564) 		for (i = 0; i < VESA_MODEDB_SIZE; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565) 			if (ufx_is_valid_mode((struct fb_videomode *)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566) 						&vesa_modes[i], info))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567) 				fb_add_videomode(&vesa_modes[i],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) 						 &info->modelist);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571) 		/* default to resolution safe for projectors
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572) 		 * (since they are most common case without EDID)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574) 		fb_vmode.xres = 800;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575) 		fb_vmode.yres = 600;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576) 		fb_vmode.refresh = 60;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577) 		default_vmode = fb_find_nearest_mode(&fb_vmode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578) 						     &info->modelist);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581) 	/* If we have good mode and no active clients */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582) 	if ((default_vmode != NULL) && (dev->fb_count == 0)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584) 		fb_videomode_to_var(&info->var, default_vmode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585) 		ufx_var_color_format(&info->var);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587) 		/* with mode size info, we can now alloc our framebuffer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588) 		memcpy(&info->fix, &ufx_fix, sizeof(ufx_fix));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589) 		info->fix.line_length = info->var.xres *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590) 			(info->var.bits_per_pixel / 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592) 		result = ufx_realloc_framebuffer(dev, info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594) 	} else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595) 		result = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597) error:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598) 	if (edid && (dev->edid != edid))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599) 		kfree(edid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601) 	if (info->dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602) 		mutex_unlock(&info->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604) 	return result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607) static int ufx_usb_probe(struct usb_interface *interface,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608) 			const struct usb_device_id *id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610) 	struct usb_device *usbdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611) 	struct ufx_data *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612) 	struct fb_info *info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613) 	int retval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614) 	u32 id_rev, fpga_rev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616) 	/* usb initialization */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617) 	usbdev = interface_to_usbdev(interface);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618) 	BUG_ON(!usbdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620) 	dev = kzalloc(sizeof(*dev), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621) 	if (dev == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622) 		dev_err(&usbdev->dev, "ufx_usb_probe: failed alloc of dev struct\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626) 	/* we need to wait for both usb and fbdev to spin down on disconnect */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627) 	kref_init(&dev->kref); /* matching kref_put in usb .disconnect fn */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628) 	kref_get(&dev->kref); /* matching kref_put in free_framebuffer_work */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630) 	dev->udev = usbdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631) 	dev->gdev = &usbdev->dev; /* our generic struct device * */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632) 	usb_set_intfdata(interface, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634) 	dev_dbg(dev->gdev, "%s %s - serial #%s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635) 		usbdev->manufacturer, usbdev->product, usbdev->serial);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636) 	dev_dbg(dev->gdev, "vid_%04x&pid_%04x&rev_%04x driver's ufx_data struct at %p\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637) 		le16_to_cpu(usbdev->descriptor.idVendor),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638) 		le16_to_cpu(usbdev->descriptor.idProduct),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639) 		le16_to_cpu(usbdev->descriptor.bcdDevice), dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640) 	dev_dbg(dev->gdev, "console enable=%d\n", console);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641) 	dev_dbg(dev->gdev, "fb_defio enable=%d\n", fb_defio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643) 	if (!ufx_alloc_urb_list(dev, WRITES_IN_FLIGHT, MAX_TRANSFER)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644) 		dev_err(dev->gdev, "ufx_alloc_urb_list failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645) 		goto e_nomem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648) 	/* We don't register a new USB class. Our client interface is fbdev */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650) 	/* allocates framebuffer driver structure, not framebuffer memory */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651) 	info = framebuffer_alloc(0, &usbdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652) 	if (!info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653) 		goto e_nomem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655) 	dev->info = info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656) 	info->par = dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657) 	info->pseudo_palette = dev->pseudo_palette;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658) 	info->fbops = &ufx_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659) 	INIT_LIST_HEAD(&info->modelist);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661) 	retval = fb_alloc_cmap(&info->cmap, 256, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662) 	if (retval < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663) 		dev_err(dev->gdev, "fb_alloc_cmap failed %x\n", retval);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664) 		goto destroy_modedb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667) 	INIT_DELAYED_WORK(&dev->free_framebuffer_work,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668) 			  ufx_free_framebuffer_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670) 	retval = ufx_reg_read(dev, 0x3000, &id_rev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671) 	check_warn_goto_error(retval, "error %d reading 0x3000 register from device", retval);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672) 	dev_dbg(dev->gdev, "ID_REV register value 0x%08x", id_rev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674) 	retval = ufx_reg_read(dev, 0x3004, &fpga_rev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675) 	check_warn_goto_error(retval, "error %d reading 0x3004 register from device", retval);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676) 	dev_dbg(dev->gdev, "FPGA_REV register value 0x%08x", fpga_rev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678) 	dev_dbg(dev->gdev, "resetting device");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679) 	retval = ufx_lite_reset(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680) 	check_warn_goto_error(retval, "error %d resetting device", retval);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682) 	dev_dbg(dev->gdev, "configuring system clock");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683) 	retval = ufx_config_sys_clk(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684) 	check_warn_goto_error(retval, "error %d configuring system clock", retval);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686) 	dev_dbg(dev->gdev, "configuring DDR2 controller");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687) 	retval = ufx_config_ddr2(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688) 	check_warn_goto_error(retval, "error %d initialising DDR2 controller", retval);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690) 	dev_dbg(dev->gdev, "configuring I2C controller");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691) 	retval = ufx_i2c_init(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692) 	check_warn_goto_error(retval, "error %d initialising I2C controller", retval);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694) 	dev_dbg(dev->gdev, "selecting display mode");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695) 	retval = ufx_setup_modes(dev, info, NULL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696) 	check_warn_goto_error(retval, "unable to find common mode for display and adapter");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1698) 	retval = ufx_reg_set_bits(dev, 0x4000, 0x00000001);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1699) 	check_warn_goto_error(retval, "error %d enabling graphics engine", retval);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1700) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1701) 	/* ready to begin using device */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1702) 	atomic_set(&dev->usb_active, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1703) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1704) 	dev_dbg(dev->gdev, "checking var");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1705) 	retval = ufx_ops_check_var(&info->var, info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1706) 	check_warn_goto_error(retval, "error %d ufx_ops_check_var", retval);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1707) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1708) 	dev_dbg(dev->gdev, "setting par");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1709) 	retval = ufx_ops_set_par(info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1710) 	check_warn_goto_error(retval, "error %d ufx_ops_set_par", retval);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1711) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1712) 	dev_dbg(dev->gdev, "registering framebuffer");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1713) 	retval = register_framebuffer(info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1714) 	check_warn_goto_error(retval, "error %d register_framebuffer", retval);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1715) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1716) 	dev_info(dev->gdev, "SMSC UDX USB device /dev/fb%d attached. %dx%d resolution."
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1717) 		" Using %dK framebuffer memory\n", info->node,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1718) 		info->var.xres, info->var.yres, info->fix.smem_len >> 10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1719) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1720) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1721) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1722) error:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1723) 	fb_dealloc_cmap(&info->cmap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1724) destroy_modedb:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1725) 	fb_destroy_modedb(info->monspecs.modedb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1726) 	vfree(info->screen_base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1727) 	fb_destroy_modelist(&info->modelist);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1728) 	framebuffer_release(info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1729) put_ref:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1730) 	kref_put(&dev->kref, ufx_free); /* ref for framebuffer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1731) 	kref_put(&dev->kref, ufx_free); /* last ref from kref_init */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1732) 	return retval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1733) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1734) e_nomem:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1735) 	retval = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1736) 	goto put_ref;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1737) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1738) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1739) static void ufx_usb_disconnect(struct usb_interface *interface)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1740) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1741) 	struct ufx_data *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1742) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1743) 	dev = usb_get_intfdata(interface);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1744) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1745) 	pr_debug("USB disconnect starting\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1746) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1747) 	/* we virtualize until all fb clients release. Then we free */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1748) 	dev->virtualized = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1749) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1750) 	/* When non-active we'll update virtual framebuffer, but no new urbs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1751) 	atomic_set(&dev->usb_active, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1752) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1753) 	usb_set_intfdata(interface, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1754) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1755) 	/* if clients still have us open, will be freed on last close */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1756) 	if (dev->fb_count == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1757) 		schedule_delayed_work(&dev->free_framebuffer_work, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1758) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1759) 	/* release reference taken by kref_init in probe() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1760) 	kref_put(&dev->kref, ufx_free);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1761) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1762) 	/* consider ufx_data freed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1763) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1764) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1765) static struct usb_driver ufx_driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1766) 	.name = "smscufx",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1767) 	.probe = ufx_usb_probe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1768) 	.disconnect = ufx_usb_disconnect,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1769) 	.id_table = id_table,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1770) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1771) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1772) module_usb_driver(ufx_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1773) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1774) static void ufx_urb_completion(struct urb *urb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1775) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1776) 	struct urb_node *unode = urb->context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1777) 	struct ufx_data *dev = unode->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1778) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1779) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1780) 	/* sync/async unlink faults aren't errors */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1781) 	if (urb->status) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1782) 		if (!(urb->status == -ENOENT ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1783) 		    urb->status == -ECONNRESET ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1784) 		    urb->status == -ESHUTDOWN)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1785) 			pr_err("%s - nonzero write bulk status received: %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1786) 				__func__, urb->status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1787) 			atomic_set(&dev->lost_pixels, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1788) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1789) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1790) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1791) 	urb->transfer_buffer_length = dev->urbs.size; /* reset to actual */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1792) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1793) 	spin_lock_irqsave(&dev->urbs.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1794) 	list_add_tail(&unode->entry, &dev->urbs.list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1795) 	dev->urbs.available++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1796) 	spin_unlock_irqrestore(&dev->urbs.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1797) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1798) 	/* When using fb_defio, we deadlock if up() is called
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1799) 	 * while another is waiting. So queue to another process */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1800) 	if (fb_defio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1801) 		schedule_delayed_work(&unode->release_urb_work, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1802) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1803) 		up(&dev->urbs.limit_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1804) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1805) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1806) static void ufx_free_urb_list(struct ufx_data *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1807) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1808) 	int count = dev->urbs.count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1809) 	struct list_head *node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1810) 	struct urb_node *unode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1811) 	struct urb *urb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1812) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1813) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1814) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1815) 	pr_debug("Waiting for completes and freeing all render urbs\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1816) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1817) 	/* keep waiting and freeing, until we've got 'em all */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1818) 	while (count--) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1819) 		/* Getting interrupted means a leak, but ok at shutdown*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1820) 		ret = down_interruptible(&dev->urbs.limit_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1821) 		if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1822) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1823) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1824) 		spin_lock_irqsave(&dev->urbs.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1825) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1826) 		node = dev->urbs.list.next; /* have reserved one with sem */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1827) 		list_del_init(node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1828) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1829) 		spin_unlock_irqrestore(&dev->urbs.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1830) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1831) 		unode = list_entry(node, struct urb_node, entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1832) 		urb = unode->urb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1833) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1834) 		/* Free each separately allocated piece */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1835) 		usb_free_coherent(urb->dev, dev->urbs.size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1836) 				  urb->transfer_buffer, urb->transfer_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1837) 		usb_free_urb(urb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1838) 		kfree(node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1839) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1840) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1841) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1842) static int ufx_alloc_urb_list(struct ufx_data *dev, int count, size_t size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1843) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1844) 	int i = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1845) 	struct urb *urb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1846) 	struct urb_node *unode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1847) 	char *buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1848) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1849) 	spin_lock_init(&dev->urbs.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1850) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1851) 	dev->urbs.size = size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1852) 	INIT_LIST_HEAD(&dev->urbs.list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1853) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1854) 	while (i < count) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1855) 		unode = kzalloc(sizeof(*unode), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1856) 		if (!unode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1857) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1858) 		unode->dev = dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1859) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1860) 		INIT_DELAYED_WORK(&unode->release_urb_work,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1861) 			  ufx_release_urb_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1862) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1863) 		urb = usb_alloc_urb(0, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1864) 		if (!urb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1865) 			kfree(unode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1866) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1867) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1868) 		unode->urb = urb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1869) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1870) 		buf = usb_alloc_coherent(dev->udev, size, GFP_KERNEL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1871) 					 &urb->transfer_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1872) 		if (!buf) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1873) 			kfree(unode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1874) 			usb_free_urb(urb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1875) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1876) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1877) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1878) 		/* urb->transfer_buffer_length set to actual before submit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1879) 		usb_fill_bulk_urb(urb, dev->udev, usb_sndbulkpipe(dev->udev, 1),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1880) 			buf, size, ufx_urb_completion, unode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1881) 		urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1882) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1883) 		list_add_tail(&unode->entry, &dev->urbs.list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1884) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1885) 		i++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1886) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1887) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1888) 	sema_init(&dev->urbs.limit_sem, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1889) 	dev->urbs.count = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1890) 	dev->urbs.available = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1891) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1892) 	pr_debug("allocated %d %d byte urbs\n", i, (int) size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1893) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1894) 	return i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1895) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1896) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1897) static struct urb *ufx_get_urb(struct ufx_data *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1898) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1899) 	int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1900) 	struct list_head *entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1901) 	struct urb_node *unode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1902) 	struct urb *urb = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1903) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1904) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1905) 	/* Wait for an in-flight buffer to complete and get re-queued */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1906) 	ret = down_timeout(&dev->urbs.limit_sem, GET_URB_TIMEOUT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1907) 	if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1908) 		atomic_set(&dev->lost_pixels, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1909) 		pr_warn("wait for urb interrupted: %x available: %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1910) 		       ret, dev->urbs.available);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1911) 		goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1912) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1913) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1914) 	spin_lock_irqsave(&dev->urbs.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1915) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1916) 	BUG_ON(list_empty(&dev->urbs.list)); /* reserved one with limit_sem */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1917) 	entry = dev->urbs.list.next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1918) 	list_del_init(entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1919) 	dev->urbs.available--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1920) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1921) 	spin_unlock_irqrestore(&dev->urbs.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1922) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1923) 	unode = list_entry(entry, struct urb_node, entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1924) 	urb = unode->urb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1925) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1926) error:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1927) 	return urb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1928) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1929) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1930) static int ufx_submit_urb(struct ufx_data *dev, struct urb *urb, size_t len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1931) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1932) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1933) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1934) 	BUG_ON(len > dev->urbs.size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1935) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1936) 	urb->transfer_buffer_length = len; /* set to actual payload len */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1937) 	ret = usb_submit_urb(urb, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1938) 	if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1939) 		ufx_urb_completion(urb); /* because no one else will */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1940) 		atomic_set(&dev->lost_pixels, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1941) 		pr_err("usb_submit_urb error %x\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1942) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1943) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1944) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1945) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1946) module_param(console, bool, S_IWUSR | S_IRUSR | S_IWGRP | S_IRGRP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1947) MODULE_PARM_DESC(console, "Allow fbcon to be used on this display");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1948) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1949) module_param(fb_defio, bool, S_IWUSR | S_IRUSR | S_IWGRP | S_IRGRP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1950) MODULE_PARM_DESC(fb_defio, "Enable fb_defio mmap support");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1951) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1952) MODULE_AUTHOR("Steve Glendinning <steve.glendinning@shawell.net>");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1953) MODULE_DESCRIPTION("SMSC UFX kernel framebuffer driver");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1954) MODULE_LICENSE("GPL");