Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    1) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    2)  * Parallel-port resource manager code.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    3)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    4)  * Authors: David Campbell <campbell@tirian.che.curtin.edu.au>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    5)  *          Tim Waugh <tim@cyberelk.demon.co.uk>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    6)  *          Jose Renau <renau@acm.org>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    7)  *          Philip Blundell <philb@gnu.org>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    8)  *	    Andrea Arcangeli
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    9)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   10)  * based on work by Grant Guenther <grant@torque.net>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   11)  *          and Philip Blundell
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   12)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   13)  * Any part of this program may be used in documents licensed under
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   14)  * the GNU Free Documentation License, Version 1.1 or any later version
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   15)  * published by the Free Software Foundation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   16)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   17) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   18) #undef PARPORT_DEBUG_SHARING		/* undef for production */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   19) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   20) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   21) #include <linux/string.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   22) #include <linux/threads.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   23) #include <linux/parport.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   24) #include <linux/delay.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   25) #include <linux/errno.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   26) #include <linux/interrupt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   27) #include <linux/ioport.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   28) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   29) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   30) #include <linux/sched/signal.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   31) #include <linux/kmod.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   32) #include <linux/device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   33) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   34) #include <linux/spinlock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   35) #include <linux/mutex.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   36) #include <asm/irq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   37) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   38) #undef PARPORT_PARANOID
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   39) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   40) #define PARPORT_DEFAULT_TIMESLICE	(HZ/5)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   41) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   42) unsigned long parport_default_timeslice = PARPORT_DEFAULT_TIMESLICE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   43) int parport_default_spintime =  DEFAULT_SPIN_TIME;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   44) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   45) static LIST_HEAD(portlist);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   46) static DEFINE_SPINLOCK(parportlist_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   47) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   48) /* list of all allocated ports, sorted by ->number */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   49) static LIST_HEAD(all_ports);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   50) static DEFINE_SPINLOCK(full_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   51) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   52) static LIST_HEAD(drivers);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   53) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   54) static DEFINE_MUTEX(registration_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   55) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   56) /* What you can do to a port that's gone away.. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   57) static void dead_write_lines(struct parport *p, unsigned char b){}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   58) static unsigned char dead_read_lines(struct parport *p) { return 0; }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   59) static unsigned char dead_frob_lines(struct parport *p, unsigned char b,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   60) 			     unsigned char c) { return 0; }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   61) static void dead_onearg(struct parport *p){}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   62) static void dead_initstate(struct pardevice *d, struct parport_state *s) { }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   63) static void dead_state(struct parport *p, struct parport_state *s) { }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   64) static size_t dead_write(struct parport *p, const void *b, size_t l, int f)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   65) { return 0; }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   66) static size_t dead_read(struct parport *p, void *b, size_t l, int f)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   67) { return 0; }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   68) static struct parport_operations dead_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   69) 	.write_data	= dead_write_lines,	/* data */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   70) 	.read_data	= dead_read_lines,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   71) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   72) 	.write_control	= dead_write_lines,	/* control */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   73) 	.read_control	= dead_read_lines,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   74) 	.frob_control	= dead_frob_lines,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   75) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   76) 	.read_status	= dead_read_lines,	/* status */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   77) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   78) 	.enable_irq	= dead_onearg,		/* enable_irq */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   79) 	.disable_irq	= dead_onearg,		/* disable_irq */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   80) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   81) 	.data_forward	= dead_onearg,		/* data_forward */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   82) 	.data_reverse	= dead_onearg,		/* data_reverse */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   83) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   84) 	.init_state	= dead_initstate,	/* init_state */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   85) 	.save_state	= dead_state,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   86) 	.restore_state	= dead_state,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   87) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   88) 	.epp_write_data	= dead_write,		/* epp */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   89) 	.epp_read_data	= dead_read,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   90) 	.epp_write_addr	= dead_write,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   91) 	.epp_read_addr	= dead_read,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   92) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   93) 	.ecp_write_data	= dead_write,		/* ecp */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   94) 	.ecp_read_data	= dead_read,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   95) 	.ecp_write_addr	= dead_write,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   96) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   97) 	.compat_write_data	= dead_write,	/* compat */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   98) 	.nibble_read_data	= dead_read,	/* nibble */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   99) 	.byte_read_data		= dead_read,	/* byte */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  100) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  101) 	.owner		= NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  102) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  103) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  104) static struct device_type parport_device_type = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  105) 	.name = "parport",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  106) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  107) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  108) static int is_parport(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  109) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  110) 	return dev->type == &parport_device_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  111) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  112) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  113) static int parport_probe(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  114) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  115) 	struct parport_driver *drv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  116) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  117) 	if (is_parport(dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  118) 		return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  119) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  120) 	drv = to_parport_driver(dev->driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  121) 	if (!drv->probe) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  122) 		/* if driver has not defined a custom probe */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  123) 		struct pardevice *par_dev = to_pardevice(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  124) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  125) 		if (strcmp(par_dev->name, drv->name))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  126) 			return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  127) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  128) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  129) 	/* if driver defined its own probe */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  130) 	return drv->probe(to_pardevice(dev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  131) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  132) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  133) static struct bus_type parport_bus_type = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  134) 	.name = "parport",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  135) 	.probe = parport_probe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  136) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  137) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  138) int parport_bus_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  139) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  140) 	return bus_register(&parport_bus_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  141) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  142) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  143) void parport_bus_exit(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  144) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  145) 	bus_unregister(&parport_bus_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  146) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  147) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  148) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  149)  * iterates through all the drivers registered with the bus and sends the port
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  150)  * details to the match_port callback of the driver, so that the driver can
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  151)  * know about the new port that just registered with the bus and decide if it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  152)  * wants to use this new port.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  153)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  154) static int driver_check(struct device_driver *dev_drv, void *_port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  155) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  156) 	struct parport *port = _port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  157) 	struct parport_driver *drv = to_parport_driver(dev_drv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  158) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  159) 	if (drv->match_port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  160) 		drv->match_port(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  161) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  162) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  163) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  164) /* Call attach(port) for each registered driver. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  165) static void attach_driver_chain(struct parport *port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  166) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  167) 	/* caller has exclusive registration_lock */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  168) 	struct parport_driver *drv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  169) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  170) 	list_for_each_entry(drv, &drivers, list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  171) 		drv->attach(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  172) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  173) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  174) 	 * call the driver_check function of the drivers registered in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  175) 	 * new device model
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  176) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  177) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  178) 	bus_for_each_drv(&parport_bus_type, NULL, port, driver_check);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  179) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  180) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  181) static int driver_detach(struct device_driver *_drv, void *_port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  182) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  183) 	struct parport *port = _port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  184) 	struct parport_driver *drv = to_parport_driver(_drv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  185) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  186) 	if (drv->detach)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  187) 		drv->detach(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  188) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  189) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  190) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  191) /* Call detach(port) for each registered driver. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  192) static void detach_driver_chain(struct parport *port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  193) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  194) 	struct parport_driver *drv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  195) 	/* caller has exclusive registration_lock */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  196) 	list_for_each_entry(drv, &drivers, list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  197) 		drv->detach(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  198) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  199) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  200) 	 * call the detach function of the drivers registered in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  201) 	 * new device model
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  202) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  203) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  204) 	bus_for_each_drv(&parport_bus_type, NULL, port, driver_detach);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  205) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  206) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  207) /* Ask kmod for some lowlevel drivers. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  208) static void get_lowlevel_driver(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  209) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  210) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  211) 	 * There is no actual module called this: you should set
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  212) 	 * up an alias for modutils.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  213) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  214) 	request_module("parport_lowlevel");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  215) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  216) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  217) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  218)  * iterates through all the devices connected to the bus and sends the device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  219)  * details to the match_port callback of the driver, so that the driver can
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  220)  * know what are all the ports that are connected to the bus and choose the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  221)  * port to which it wants to register its device.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  222)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  223) static int port_check(struct device *dev, void *dev_drv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  224) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  225) 	struct parport_driver *drv = dev_drv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  226) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  227) 	/* only send ports, do not send other devices connected to bus */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  228) 	if (is_parport(dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  229) 		drv->match_port(to_parport_dev(dev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  230) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  231) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  232) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  233) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  234)  * Iterates through all the devices connected to the bus and return 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  235)  * if the device is a parallel port.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  236)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  237) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  238) static int port_detect(struct device *dev, void *dev_drv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  239) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  240) 	if (is_parport(dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  241) 		return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  242) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  243) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  244) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  245) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  246)  *	parport_register_driver - register a parallel port device driver
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  247)  *	@drv: structure describing the driver
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  248)  *	@owner: owner module of drv
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  249)  *	@mod_name: module name string
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  250)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  251)  *	This can be called by a parallel port device driver in order
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  252)  *	to receive notifications about ports being found in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  253)  *	system, as well as ports no longer available.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  254)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  255)  *	If devmodel is true then the new device model is used
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  256)  *	for registration.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  257)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  258)  *	The @drv structure is allocated by the caller and must not be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  259)  *	deallocated until after calling parport_unregister_driver().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  260)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  261)  *	If using the non device model:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  262)  *	The driver's attach() function may block.  The port that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  263)  *	attach() is given will be valid for the duration of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  264)  *	callback, but if the driver wants to take a copy of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  265)  *	pointer it must call parport_get_port() to do so.  Calling
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  266)  *	parport_register_device() on that port will do this for you.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  267)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  268)  *	The driver's detach() function may block.  The port that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  269)  *	detach() is given will be valid for the duration of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  270)  *	callback, but if the driver wants to take a copy of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  271)  *	pointer it must call parport_get_port() to do so.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  272)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  273)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  274)  *	Returns 0 on success. The non device model will always succeeds.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  275)  *	but the new device model can fail and will return the error code.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  276)  **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  277) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  278) int __parport_register_driver(struct parport_driver *drv, struct module *owner,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  279) 			      const char *mod_name)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  280) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  281) 	/* using device model */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  282) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  283) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  284) 	/* initialize common driver fields */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  285) 	drv->driver.name = drv->name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  286) 	drv->driver.bus = &parport_bus_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  287) 	drv->driver.owner = owner;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  288) 	drv->driver.mod_name = mod_name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  289) 	ret = driver_register(&drv->driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  290) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  291) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  292) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  293) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  294) 	 * check if bus has any parallel port registered, if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  295) 	 * none is found then load the lowlevel driver.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  296) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  297) 	ret = bus_for_each_dev(&parport_bus_type, NULL, NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  298) 			       port_detect);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  299) 	if (!ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  300) 		get_lowlevel_driver();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  301) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  302) 	mutex_lock(&registration_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  303) 	if (drv->match_port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  304) 		bus_for_each_dev(&parport_bus_type, NULL, drv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  305) 				 port_check);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  306) 	mutex_unlock(&registration_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  307) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  308) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  309) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  310) EXPORT_SYMBOL(__parport_register_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  311) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  312) static int port_detach(struct device *dev, void *_drv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  313) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  314) 	struct parport_driver *drv = _drv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  315) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  316) 	if (is_parport(dev) && drv->detach)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  317) 		drv->detach(to_parport_dev(dev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  318) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  319) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  320) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  321) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  322) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  323)  *	parport_unregister_driver - deregister a parallel port device driver
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  324)  *	@drv: structure describing the driver that was given to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  325)  *	      parport_register_driver()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  326)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  327)  *	This should be called by a parallel port device driver that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  328)  *	has registered itself using parport_register_driver() when it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  329)  *	is about to be unloaded.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  330)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  331)  *	When it returns, the driver's attach() routine will no longer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  332)  *	be called, and for each port that attach() was called for, the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  333)  *	detach() routine will have been called.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  334)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  335)  *	All the driver's attach() and detach() calls are guaranteed to have
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  336)  *	finished by the time this function returns.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  337)  **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  338) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  339) void parport_unregister_driver(struct parport_driver *drv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  340) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  341) 	mutex_lock(&registration_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  342) 	bus_for_each_dev(&parport_bus_type, NULL, drv, port_detach);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  343) 	driver_unregister(&drv->driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  344) 	mutex_unlock(&registration_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  345) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  346) EXPORT_SYMBOL(parport_unregister_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  347) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  348) static void free_port(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  349) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  350) 	int d;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  351) 	struct parport *port = to_parport_dev(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  352) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  353) 	spin_lock(&full_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  354) 	list_del(&port->full_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  355) 	spin_unlock(&full_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  356) 	for (d = 0; d < 5; d++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  357) 		kfree(port->probe_info[d].class_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  358) 		kfree(port->probe_info[d].mfr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  359) 		kfree(port->probe_info[d].model);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  360) 		kfree(port->probe_info[d].cmdset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  361) 		kfree(port->probe_info[d].description);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  362) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  363) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  364) 	kfree(port->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  365) 	kfree(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  366) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  367) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  368) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  369)  *	parport_get_port - increment a port's reference count
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  370)  *	@port: the port
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  371)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  372)  *	This ensures that a struct parport pointer remains valid
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  373)  *	until the matching parport_put_port() call.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  374)  **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  375) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  376) struct parport *parport_get_port(struct parport *port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  377) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  378) 	struct device *dev = get_device(&port->bus_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  379) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  380) 	return to_parport_dev(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  381) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  382) EXPORT_SYMBOL(parport_get_port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  383) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  384) void parport_del_port(struct parport *port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  385) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  386) 	device_unregister(&port->bus_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  387) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  388) EXPORT_SYMBOL(parport_del_port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  389) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  390) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  391)  *	parport_put_port - decrement a port's reference count
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  392)  *	@port: the port
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  393)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  394)  *	This should be called once for each call to parport_get_port(),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  395)  *	once the port is no longer needed. When the reference count reaches
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  396)  *	zero (port is no longer used), free_port is called.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  397)  **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  398) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  399) void parport_put_port(struct parport *port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  400) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  401) 	put_device(&port->bus_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  402) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  403) EXPORT_SYMBOL(parport_put_port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  404) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  405) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  406)  *	parport_register_port - register a parallel port
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  407)  *	@base: base I/O address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  408)  *	@irq: IRQ line
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  409)  *	@dma: DMA channel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  410)  *	@ops: pointer to the port driver's port operations structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  411)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  412)  *	When a parallel port (lowlevel) driver finds a port that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  413)  *	should be made available to parallel port device drivers, it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  414)  *	should call parport_register_port().  The @base, @irq, and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  415)  *	@dma parameters are for the convenience of port drivers, and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  416)  *	for ports where they aren't meaningful needn't be set to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  417)  *	anything special.  They can be altered afterwards by adjusting
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  418)  *	the relevant members of the parport structure that is returned
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  419)  *	and represents the port.  They should not be tampered with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  420)  *	after calling parport_announce_port, however.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  421)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  422)  *	If there are parallel port device drivers in the system that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  423)  *	have registered themselves using parport_register_driver(),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  424)  *	they are not told about the port at this time; that is done by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  425)  *	parport_announce_port().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  426)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  427)  *	The @ops structure is allocated by the caller, and must not be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  428)  *	deallocated before calling parport_remove_port().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  429)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  430)  *	If there is no memory to allocate a new parport structure,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  431)  *	this function will return %NULL.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  432)  **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  433) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  434) struct parport *parport_register_port(unsigned long base, int irq, int dma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  435) 				      struct parport_operations *ops)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  436) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  437) 	struct list_head *l;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  438) 	struct parport *tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  439) 	int num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  440) 	int device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  441) 	char *name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  442) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  443) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  444) 	tmp = kzalloc(sizeof(struct parport), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  445) 	if (!tmp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  446) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  447) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  448) 	/* Init our structure */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  449) 	tmp->base = base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  450) 	tmp->irq = irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  451) 	tmp->dma = dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  452) 	tmp->muxport = tmp->daisy = tmp->muxsel = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  453) 	tmp->modes = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  454) 	INIT_LIST_HEAD(&tmp->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  455) 	tmp->devices = tmp->cad = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  456) 	tmp->flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  457) 	tmp->ops = ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  458) 	tmp->physport = tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  459) 	memset(tmp->probe_info, 0, 5 * sizeof(struct parport_device_info));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  460) 	rwlock_init(&tmp->cad_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  461) 	spin_lock_init(&tmp->waitlist_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  462) 	spin_lock_init(&tmp->pardevice_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  463) 	tmp->ieee1284.mode = IEEE1284_MODE_COMPAT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  464) 	tmp->ieee1284.phase = IEEE1284_PH_FWD_IDLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  465) 	sema_init(&tmp->ieee1284.irq, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  466) 	tmp->spintime = parport_default_spintime;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  467) 	atomic_set(&tmp->ref_count, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  468) 	INIT_LIST_HEAD(&tmp->full_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  469) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  470) 	name = kmalloc(15, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  471) 	if (!name) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  472) 		kfree(tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  473) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  474) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  475) 	/* Search for the lowest free parport number. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  476) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  477) 	spin_lock(&full_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  478) 	for (l = all_ports.next, num = 0; l != &all_ports; l = l->next, num++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  479) 		struct parport *p = list_entry(l, struct parport, full_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  480) 		if (p->number != num)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  481) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  482) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  483) 	tmp->portnum = tmp->number = num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  484) 	list_add_tail(&tmp->full_list, l);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  485) 	spin_unlock(&full_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  486) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  487) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  488) 	 * Now that the portnum is known finish doing the Init.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  489) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  490) 	sprintf(name, "parport%d", tmp->portnum = tmp->number);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  491) 	tmp->name = name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  492) 	tmp->bus_dev.bus = &parport_bus_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  493) 	tmp->bus_dev.release = free_port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  494) 	dev_set_name(&tmp->bus_dev, name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  495) 	tmp->bus_dev.type = &parport_device_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  496) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  497) 	for (device = 0; device < 5; device++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  498) 		/* assume the worst */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  499) 		tmp->probe_info[device].class = PARPORT_CLASS_LEGACY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  500) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  501) 	tmp->waithead = tmp->waittail = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  502) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  503) 	ret = device_register(&tmp->bus_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  504) 	if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  505) 		put_device(&tmp->bus_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  506) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  507) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  508) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  509) 	return tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  510) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  511) EXPORT_SYMBOL(parport_register_port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  512) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  513) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  514)  *	parport_announce_port - tell device drivers about a parallel port
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  515)  *	@port: parallel port to announce
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  516)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  517)  *	After a port driver has registered a parallel port with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  518)  *	parport_register_port, and performed any necessary
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  519)  *	initialisation or adjustments, it should call
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  520)  *	parport_announce_port() in order to notify all device drivers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  521)  *	that have called parport_register_driver().  Their attach()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  522)  *	functions will be called, with @port as the parameter.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  523)  **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  524) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  525) void parport_announce_port(struct parport *port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  526) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  527) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  528) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  529) #ifdef CONFIG_PARPORT_1284
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  530) 	/* Analyse the IEEE1284.3 topology of the port. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  531) 	parport_daisy_init(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  532) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  533) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  534) 	if (!port->dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  535) 		pr_warn("%s: fix this legacy no-device port driver!\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  536) 			port->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  537) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  538) 	parport_proc_register(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  539) 	mutex_lock(&registration_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  540) 	spin_lock_irq(&parportlist_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  541) 	list_add_tail(&port->list, &portlist);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  542) 	for (i = 1; i < 3; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  543) 		struct parport *slave = port->slaves[i-1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  544) 		if (slave)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  545) 			list_add_tail(&slave->list, &portlist);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  546) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  547) 	spin_unlock_irq(&parportlist_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  548) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  549) 	/* Let drivers know that new port(s) has arrived. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  550) 	attach_driver_chain(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  551) 	for (i = 1; i < 3; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  552) 		struct parport *slave = port->slaves[i-1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  553) 		if (slave)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  554) 			attach_driver_chain(slave);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  555) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  556) 	mutex_unlock(&registration_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  557) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  558) EXPORT_SYMBOL(parport_announce_port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  559) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  560) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  561)  *	parport_remove_port - deregister a parallel port
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  562)  *	@port: parallel port to deregister
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  563)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  564)  *	When a parallel port driver is forcibly unloaded, or a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  565)  *	parallel port becomes inaccessible, the port driver must call
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  566)  *	this function in order to deal with device drivers that still
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  567)  *	want to use it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  568)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  569)  *	The parport structure associated with the port has its
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  570)  *	operations structure replaced with one containing 'null'
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  571)  *	operations that return errors or just don't do anything.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  572)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  573)  *	Any drivers that have registered themselves using
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  574)  *	parport_register_driver() are notified that the port is no
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  575)  *	longer accessible by having their detach() routines called
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  576)  *	with @port as the parameter.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  577)  **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  578) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  579) void parport_remove_port(struct parport *port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  580) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  581) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  582) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  583) 	mutex_lock(&registration_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  584) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  585) 	/* Spread the word. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  586) 	detach_driver_chain(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  587) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  588) #ifdef CONFIG_PARPORT_1284
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  589) 	/* Forget the IEEE1284.3 topology of the port. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  590) 	parport_daisy_fini(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  591) 	for (i = 1; i < 3; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  592) 		struct parport *slave = port->slaves[i-1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  593) 		if (!slave)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  594) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  595) 		detach_driver_chain(slave);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  596) 		parport_daisy_fini(slave);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  597) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  598) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  599) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  600) 	port->ops = &dead_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  601) 	spin_lock(&parportlist_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  602) 	list_del_init(&port->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  603) 	for (i = 1; i < 3; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  604) 		struct parport *slave = port->slaves[i-1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  605) 		if (slave)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  606) 			list_del_init(&slave->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  607) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  608) 	spin_unlock(&parportlist_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  609) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  610) 	mutex_unlock(&registration_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  611) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  612) 	parport_proc_unregister(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  613) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  614) 	for (i = 1; i < 3; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  615) 		struct parport *slave = port->slaves[i-1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  616) 		if (slave)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  617) 			parport_put_port(slave);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  618) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  619) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  620) EXPORT_SYMBOL(parport_remove_port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  621) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  622) static void free_pardevice(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  623) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  624) 	struct pardevice *par_dev = to_pardevice(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  625) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  626) 	kfree(par_dev->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  627) 	kfree(par_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  628) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  629) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  630) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  631)  *	parport_register_dev_model - register a device on a parallel port
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  632)  *	@port: port to which the device is attached
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  633)  *	@name: a name to refer to the device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  634)  *	@par_dev_cb: struct containing callbacks
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  635)  *	@id: device number to be given to the device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  636)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  637)  *	This function, called by parallel port device drivers,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  638)  *	declares that a device is connected to a port, and tells the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  639)  *	system all it needs to know.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  640)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  641)  *	The struct pardev_cb contains pointer to callbacks. preemption
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  642)  *	callback function, @preempt, is called when this device driver
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  643)  *	has claimed access to the port but another device driver wants
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  644)  *	to use it.  It is given, @private, as its parameter, and should
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  645)  *	return zero if it is willing for the system to release the port
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  646)  *	to another driver on its behalf. If it wants to keep control of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  647)  *	the port it should return non-zero, and no action will be taken.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  648)  *	It is good manners for the driver to try to release the port at
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  649)  *	the earliest opportunity after its preemption callback rejects a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  650)  *	preemption attempt. Note that if a preemption callback is happy
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  651)  *	for preemption to go ahead, there is no need to release the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  652)  *	port; it is done automatically. This function may not block, as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  653)  *	it may be called from interrupt context. If the device driver
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  654)  *	does not support preemption, @preempt can be %NULL.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  655)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  656)  *	The wake-up ("kick") callback function, @wakeup, is called when
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  657)  *	the port is available to be claimed for exclusive access; that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  658)  *	is, parport_claim() is guaranteed to succeed when called from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  659)  *	inside the wake-up callback function.  If the driver wants to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  660)  *	claim the port it should do so; otherwise, it need not take
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  661)  *	any action.  This function may not block, as it may be called
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  662)  *	from interrupt context.  If the device driver does not want to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  663)  *	be explicitly invited to claim the port in this way, @wakeup can
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  664)  *	be %NULL.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  665)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  666)  *	The interrupt handler, @irq_func, is called when an interrupt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  667)  *	arrives from the parallel port.  Note that if a device driver
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  668)  *	wants to use interrupts it should use parport_enable_irq(),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  669)  *	and can also check the irq member of the parport structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  670)  *	representing the port.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  671)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  672)  *	The parallel port (lowlevel) driver is the one that has called
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  673)  *	request_irq() and whose interrupt handler is called first.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  674)  *	This handler does whatever needs to be done to the hardware to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  675)  *	acknowledge the interrupt (for PC-style ports there is nothing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  676)  *	special to be done).  It then tells the IEEE 1284 code about
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  677)  *	the interrupt, which may involve reacting to an IEEE 1284
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  678)  *	event depending on the current IEEE 1284 phase.  After this,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  679)  *	it calls @irq_func.  Needless to say, @irq_func will be called
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  680)  *	from interrupt context, and may not block.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  681)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  682)  *	The %PARPORT_DEV_EXCL flag is for preventing port sharing, and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  683)  *	so should only be used when sharing the port with other device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  684)  *	drivers is impossible and would lead to incorrect behaviour.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  685)  *	Use it sparingly!  Normally, @flags will be zero.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  686)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  687)  *	This function returns a pointer to a structure that represents
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  688)  *	the device on the port, or %NULL if there is not enough memory
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  689)  *	to allocate space for that structure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  690)  **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  691) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  692) struct pardevice *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  693) parport_register_dev_model(struct parport *port, const char *name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  694) 			   const struct pardev_cb *par_dev_cb, int id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  695) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  696) 	struct pardevice *par_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  697) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  698) 	char *devname;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  699) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  700) 	if (port->physport->flags & PARPORT_FLAG_EXCL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  701) 		/* An exclusive device is registered. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  702) 		pr_err("%s: no more devices allowed\n", port->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  703) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  704) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  705) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  706) 	if (par_dev_cb->flags & PARPORT_DEV_LURK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  707) 		if (!par_dev_cb->preempt || !par_dev_cb->wakeup) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  708) 			pr_info("%s: refused to register lurking device (%s) without callbacks\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  709) 				port->name, name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  710) 			return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  711) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  712) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  713) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  714) 	if (par_dev_cb->flags & PARPORT_DEV_EXCL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  715) 		if (port->physport->devices) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  716) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  717) 			 * If a device is already registered and this new
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  718) 			 * device wants exclusive access, then no need to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  719) 			 * continue as we can not grant exclusive access to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  720) 			 * this device.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  721) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  722) 			pr_err("%s: cannot grant exclusive access for device %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  723) 			       port->name, name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  724) 			return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  725) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  726) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  727) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  728) 	if (!try_module_get(port->ops->owner))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  729) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  730) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  731) 	parport_get_port(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  732) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  733) 	par_dev = kzalloc(sizeof(*par_dev), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  734) 	if (!par_dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  735) 		goto err_put_port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  736) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  737) 	par_dev->state = kzalloc(sizeof(*par_dev->state), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  738) 	if (!par_dev->state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  739) 		goto err_put_par_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  740) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  741) 	devname = kstrdup(name, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  742) 	if (!devname)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  743) 		goto err_free_par_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  744) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  745) 	par_dev->name = devname;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  746) 	par_dev->port = port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  747) 	par_dev->daisy = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  748) 	par_dev->preempt = par_dev_cb->preempt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  749) 	par_dev->wakeup = par_dev_cb->wakeup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  750) 	par_dev->private = par_dev_cb->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  751) 	par_dev->flags = par_dev_cb->flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  752) 	par_dev->irq_func = par_dev_cb->irq_func;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  753) 	par_dev->waiting = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  754) 	par_dev->timeout = 5 * HZ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  755) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  756) 	par_dev->dev.parent = &port->bus_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  757) 	par_dev->dev.bus = &parport_bus_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  758) 	ret = dev_set_name(&par_dev->dev, "%s.%d", devname, id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  759) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  760) 		goto err_free_devname;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  761) 	par_dev->dev.release = free_pardevice;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  762) 	par_dev->devmodel = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  763) 	ret = device_register(&par_dev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  764) 	if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  765) 		kfree(par_dev->state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  766) 		put_device(&par_dev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  767) 		goto err_put_port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  768) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  769) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  770) 	/* Chain this onto the list */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  771) 	par_dev->prev = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  772) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  773) 	 * This function must not run from an irq handler so we don' t need
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  774) 	 * to clear irq on the local CPU. -arca
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  775) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  776) 	spin_lock(&port->physport->pardevice_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  777) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  778) 	if (par_dev_cb->flags & PARPORT_DEV_EXCL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  779) 		if (port->physport->devices) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  780) 			spin_unlock(&port->physport->pardevice_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  781) 			pr_debug("%s: cannot grant exclusive access for device %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  782) 				 port->name, name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  783) 			kfree(par_dev->state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  784) 			device_unregister(&par_dev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  785) 			goto err_put_port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  786) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  787) 		port->flags |= PARPORT_FLAG_EXCL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  788) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  789) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  790) 	par_dev->next = port->physport->devices;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  791) 	wmb();	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  792) 		 * Make sure that tmp->next is written before it's
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  793) 		 * added to the list; see comments marked 'no locking
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  794) 		 * required'
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  795) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  796) 	if (port->physport->devices)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  797) 		port->physport->devices->prev = par_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  798) 	port->physport->devices = par_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  799) 	spin_unlock(&port->physport->pardevice_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  800) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  801) 	init_waitqueue_head(&par_dev->wait_q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  802) 	par_dev->timeslice = parport_default_timeslice;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  803) 	par_dev->waitnext = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  804) 	par_dev->waitprev = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  805) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  806) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  807) 	 * This has to be run as last thing since init_state may need other
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  808) 	 * pardevice fields. -arca
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  809) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  810) 	port->ops->init_state(par_dev, par_dev->state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  811) 	if (!test_and_set_bit(PARPORT_DEVPROC_REGISTERED, &port->devflags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  812) 		port->proc_device = par_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  813) 		parport_device_proc_register(par_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  814) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  815) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  816) 	return par_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  817) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  818) err_free_devname:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  819) 	kfree(devname);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  820) err_free_par_dev:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  821) 	kfree(par_dev->state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  822) err_put_par_dev:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  823) 	if (!par_dev->devmodel)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  824) 		kfree(par_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  825) err_put_port:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  826) 	parport_put_port(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  827) 	module_put(port->ops->owner);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  828) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  829) 	return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  830) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  831) EXPORT_SYMBOL(parport_register_dev_model);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  832) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  833) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  834)  *	parport_unregister_device - deregister a device on a parallel port
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  835)  *	@dev: pointer to structure representing device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  836)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  837)  *	This undoes the effect of parport_register_device().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  838)  **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  839) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  840) void parport_unregister_device(struct pardevice *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  841) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  842) 	struct parport *port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  843) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  844) #ifdef PARPORT_PARANOID
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  845) 	if (!dev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  846) 		pr_err("%s: passed NULL\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  847) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  848) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  849) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  850) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  851) 	port = dev->port->physport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  852) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  853) 	if (port->proc_device == dev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  854) 		port->proc_device = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  855) 		clear_bit(PARPORT_DEVPROC_REGISTERED, &port->devflags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  856) 		parport_device_proc_unregister(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  857) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  858) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  859) 	if (port->cad == dev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  860) 		printk(KERN_DEBUG "%s: %s forgot to release port\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  861) 		       port->name, dev->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  862) 		parport_release(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  863) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  864) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  865) 	spin_lock(&port->pardevice_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  866) 	if (dev->next)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  867) 		dev->next->prev = dev->prev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  868) 	if (dev->prev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  869) 		dev->prev->next = dev->next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  870) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  871) 		port->devices = dev->next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  872) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  873) 	if (dev->flags & PARPORT_DEV_EXCL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  874) 		port->flags &= ~PARPORT_FLAG_EXCL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  875) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  876) 	spin_unlock(&port->pardevice_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  877) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  878) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  879) 	 * Make sure we haven't left any pointers around in the wait
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  880) 	 * list.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  881) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  882) 	spin_lock_irq(&port->waitlist_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  883) 	if (dev->waitprev || dev->waitnext || port->waithead == dev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  884) 		if (dev->waitprev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  885) 			dev->waitprev->waitnext = dev->waitnext;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  886) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  887) 			port->waithead = dev->waitnext;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  888) 		if (dev->waitnext)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  889) 			dev->waitnext->waitprev = dev->waitprev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  890) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  891) 			port->waittail = dev->waitprev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  892) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  893) 	spin_unlock_irq(&port->waitlist_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  894) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  895) 	kfree(dev->state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  896) 	device_unregister(&dev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  897) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  898) 	module_put(port->ops->owner);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  899) 	parport_put_port(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  900) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  901) EXPORT_SYMBOL(parport_unregister_device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  902) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  903) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  904)  *	parport_find_number - find a parallel port by number
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  905)  *	@number: parallel port number
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  906)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  907)  *	This returns the parallel port with the specified number, or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  908)  *	%NULL if there is none.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  909)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  910)  *	There is an implicit parport_get_port() done already; to throw
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  911)  *	away the reference to the port that parport_find_number()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  912)  *	gives you, use parport_put_port().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  913)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  914) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  915) struct parport *parport_find_number(int number)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  916) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  917) 	struct parport *port, *result = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  918) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  919) 	if (list_empty(&portlist))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  920) 		get_lowlevel_driver();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  921) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  922) 	spin_lock(&parportlist_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  923) 	list_for_each_entry(port, &portlist, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  924) 		if (port->number == number) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  925) 			result = parport_get_port(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  926) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  927) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  928) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  929) 	spin_unlock(&parportlist_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  930) 	return result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  931) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  932) EXPORT_SYMBOL(parport_find_number);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  933) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  934) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  935)  *	parport_find_base - find a parallel port by base address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  936)  *	@base: base I/O address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  937)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  938)  *	This returns the parallel port with the specified base
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  939)  *	address, or %NULL if there is none.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  940)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  941)  *	There is an implicit parport_get_port() done already; to throw
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  942)  *	away the reference to the port that parport_find_base()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  943)  *	gives you, use parport_put_port().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  944)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  945) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  946) struct parport *parport_find_base(unsigned long base)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  947) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  948) 	struct parport *port, *result = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  949) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  950) 	if (list_empty(&portlist))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  951) 		get_lowlevel_driver();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  952) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  953) 	spin_lock(&parportlist_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  954) 	list_for_each_entry(port, &portlist, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  955) 		if (port->base == base) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  956) 			result = parport_get_port(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  957) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  958) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  959) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  960) 	spin_unlock(&parportlist_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  961) 	return result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  962) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  963) EXPORT_SYMBOL(parport_find_base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  964) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  965) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  966)  *	parport_claim - claim access to a parallel port device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  967)  *	@dev: pointer to structure representing a device on the port
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  968)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  969)  *	This function will not block and so can be used from interrupt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  970)  *	context.  If parport_claim() succeeds in claiming access to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  971)  *	the port it returns zero and the port is available to use.  It
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  972)  *	may fail (returning non-zero) if the port is in use by another
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  973)  *	driver and that driver is not willing to relinquish control of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  974)  *	the port.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  975)  **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  976) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  977) int parport_claim(struct pardevice *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  978) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  979) 	struct pardevice *oldcad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  980) 	struct parport *port = dev->port->physport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  981) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  982) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  983) 	if (port->cad == dev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  984) 		pr_info("%s: %s already owner\n", dev->port->name, dev->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  985) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  986) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  987) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  988) 	/* Preempt any current device */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  989) 	write_lock_irqsave(&port->cad_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  990) 	oldcad = port->cad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  991) 	if (oldcad) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  992) 		if (oldcad->preempt) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  993) 			if (oldcad->preempt(oldcad->private))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  994) 				goto blocked;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  995) 			port->ops->save_state(port, dev->state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  996) 		} else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  997) 			goto blocked;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  998) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  999) 		if (port->cad != oldcad) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) 			 * I think we'll actually deadlock rather than
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) 			 * get here, but just in case..
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) 			pr_warn("%s: %s released port when preempted!\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) 				port->name, oldcad->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) 			if (port->cad)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) 				goto blocked;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) 	/* Can't fail from now on, so mark ourselves as no longer waiting.  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) 	if (dev->waiting & 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) 		dev->waiting = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) 		/* Take ourselves out of the wait list again.  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) 		spin_lock_irq(&port->waitlist_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) 		if (dev->waitprev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) 			dev->waitprev->waitnext = dev->waitnext;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) 			port->waithead = dev->waitnext;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) 		if (dev->waitnext)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) 			dev->waitnext->waitprev = dev->waitprev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) 			port->waittail = dev->waitprev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) 		spin_unlock_irq(&port->waitlist_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) 		dev->waitprev = dev->waitnext = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) 	/* Now we do the change of devices */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) 	port->cad = dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) #ifdef CONFIG_PARPORT_1284
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) 	/* If it's a mux port, select it. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) 	if (dev->port->muxport >= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) 		/* FIXME */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) 		port->muxsel = dev->port->muxport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) 	/* If it's a daisy chain device, select it. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) 	if (dev->daisy >= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) 		/* This could be lazier. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) 		if (!parport_daisy_select(port, dev->daisy,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) 					   IEEE1284_MODE_COMPAT))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) 			port->daisy = dev->daisy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) #endif /* IEEE1284.3 support */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) 	/* Restore control registers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) 	port->ops->restore_state(port, dev->state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) 	write_unlock_irqrestore(&port->cad_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) 	dev->time = jiffies;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) blocked:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) 	 * If this is the first time we tried to claim the port, register an
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) 	 * interest.  This is only allowed for devices sleeping in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) 	 * parport_claim_or_block(), or those with a wakeup function.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) 	/* The cad_lock is still held for writing here */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) 	if (dev->waiting & 2 || dev->wakeup) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) 		spin_lock(&port->waitlist_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) 		if (test_and_set_bit(0, &dev->waiting) == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) 			/* First add ourselves to the end of the wait list. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) 			dev->waitnext = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) 			dev->waitprev = port->waittail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) 			if (port->waittail) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) 				port->waittail->waitnext = dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) 				port->waittail = dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) 			} else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) 				port->waithead = port->waittail = dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) 		spin_unlock(&port->waitlist_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) 	write_unlock_irqrestore(&port->cad_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) 	return -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) EXPORT_SYMBOL(parport_claim);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082)  *	parport_claim_or_block - claim access to a parallel port device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083)  *	@dev: pointer to structure representing a device on the port
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085)  *	This behaves like parport_claim(), but will block if necessary
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086)  *	to wait for the port to be free.  A return value of 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087)  *	indicates that it slept; 0 means that it succeeded without
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088)  *	needing to sleep.  A negative error code indicates failure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089)  **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) int parport_claim_or_block(struct pardevice *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) 	int r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) 	 * Signal to parport_claim() that we can wait even without a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) 	 * wakeup function.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) 	dev->waiting = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) 	/* Try to claim the port.  If this fails, we need to sleep.  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) 	r = parport_claim(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) 	if (r == -EAGAIN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) #ifdef PARPORT_DEBUG_SHARING
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) 		printk(KERN_DEBUG "%s: parport_claim() returned -EAGAIN\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) 		       dev->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) 		 * FIXME!!! Use the proper locking for dev->waiting,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) 		 * and make this use the "wait_event_interruptible()"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) 		 * interfaces. The cli/sti that used to be here
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) 		 * did nothing.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) 		 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) 		 * See also parport_release()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) 		 * If dev->waiting is clear now, an interrupt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) 		 * gave us the port and we would deadlock if we slept.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) 		if (dev->waiting) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) 			wait_event_interruptible(dev->wait_q,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) 						 !dev->waiting);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) 			if (signal_pending(current))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) 				return -EINTR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) 			r = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) 			r = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) #ifdef PARPORT_DEBUG_SHARING
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) 			printk(KERN_DEBUG "%s: didn't sleep in parport_claim_or_block()\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) 			       dev->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) #ifdef PARPORT_DEBUG_SHARING
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) 		if (dev->port->physport->cad != dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) 			printk(KERN_DEBUG "%s: exiting parport_claim_or_block but %s owns port!\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) 			       dev->name, dev->port->physport->cad ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) 			       dev->port->physport->cad->name : "nobody");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) 	dev->waiting = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) 	return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) EXPORT_SYMBOL(parport_claim_or_block);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148)  *	parport_release - give up access to a parallel port device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149)  *	@dev: pointer to structure representing parallel port device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151)  *	This function cannot fail, but it should not be called without
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152)  *	the port claimed.  Similarly, if the port is already claimed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153)  *	you should not try claiming it again.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154)  **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) void parport_release(struct pardevice *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) 	struct parport *port = dev->port->physport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) 	struct pardevice *pd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) 	/* Make sure that dev is the current device */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) 	write_lock_irqsave(&port->cad_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) 	if (port->cad != dev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) 		write_unlock_irqrestore(&port->cad_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) 		pr_warn("%s: %s tried to release parport when not owner\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) 			port->name, dev->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) #ifdef CONFIG_PARPORT_1284
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) 	/* If this is on a mux port, deselect it. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) 	if (dev->port->muxport >= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) 		/* FIXME */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) 		port->muxsel = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) 	/* If this is a daisy device, deselect it. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) 	if (dev->daisy >= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) 		parport_daisy_deselect_all(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) 		port->daisy = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) 	port->cad = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) 	write_unlock_irqrestore(&port->cad_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) 	/* Save control registers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) 	port->ops->save_state(port, dev->state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) 	 * If anybody is waiting, find out who's been there longest and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) 	 * then wake them up. (Note: no locking required)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) 	/* !!! LOCKING IS NEEDED HERE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) 	for (pd = port->waithead; pd; pd = pd->waitnext) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) 		if (pd->waiting & 2) { /* sleeping in claim_or_block */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) 			parport_claim(pd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) 			if (waitqueue_active(&pd->wait_q))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) 				wake_up_interruptible(&pd->wait_q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) 			return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) 		} else if (pd->wakeup) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) 			pd->wakeup(pd->private);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) 			if (dev->port->cad) /* racy but no matter */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) 				return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) 			pr_err("%s: don't know how to wake %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) 			       port->name, pd->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) 	 * Nobody was waiting, so walk the list to see if anyone is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) 	 * interested in being woken up. (Note: no locking required)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) 	/* !!! LOCKING IS NEEDED HERE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) 	for (pd = port->devices; !port->cad && pd; pd = pd->next) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) 		if (pd->wakeup && pd != dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) 			pd->wakeup(pd->private);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) EXPORT_SYMBOL(parport_release);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) irqreturn_t parport_irq_handler(int irq, void *dev_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) 	struct parport *port = dev_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) 	parport_generic_irq(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) 	return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) EXPORT_SYMBOL(parport_irq_handler);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) MODULE_LICENSE("GPL");