share.c 37 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370
  1. /*
  2. * Parallel-port resource manager code.
  3. *
  4. * Authors: David Campbell <campbell@tirian.che.curtin.edu.au>
  5. * Tim Waugh <tim@cyberelk.demon.co.uk>
  6. * Jose Renau <renau@acm.org>
  7. * Philip Blundell <philb@gnu.org>
  8. * Andrea Arcangeli
  9. *
  10. * based on work by Grant Guenther <grant@torque.net>
  11. * and Philip Blundell
  12. *
  13. * Any part of this program may be used in documents licensed under
  14. * the GNU Free Documentation License, Version 1.1 or any later version
  15. * published by the Free Software Foundation.
  16. */
  17. #undef PARPORT_DEBUG_SHARING /* undef for production */
  18. #include <linux/module.h>
  19. #include <linux/string.h>
  20. #include <linux/threads.h>
  21. #include <linux/parport.h>
  22. #include <linux/delay.h>
  23. #include <linux/errno.h>
  24. #include <linux/interrupt.h>
  25. #include <linux/ioport.h>
  26. #include <linux/kernel.h>
  27. #include <linux/slab.h>
  28. #include <linux/sched/signal.h>
  29. #include <linux/kmod.h>
  30. #include <linux/device.h>
  31. #include <linux/spinlock.h>
  32. #include <linux/mutex.h>
  33. #include <asm/irq.h>
  34. #undef PARPORT_PARANOID
  35. #define PARPORT_DEFAULT_TIMESLICE (HZ/5)
  36. unsigned long parport_default_timeslice = PARPORT_DEFAULT_TIMESLICE;
  37. int parport_default_spintime = DEFAULT_SPIN_TIME;
  38. static LIST_HEAD(portlist);
  39. static DEFINE_SPINLOCK(parportlist_lock);
  40. /* list of all allocated ports, sorted by ->number */
  41. static LIST_HEAD(all_ports);
  42. static DEFINE_SPINLOCK(full_list_lock);
  43. static LIST_HEAD(drivers);
  44. static DEFINE_MUTEX(registration_lock);
  45. /* What you can do to a port that's gone away.. */
  46. static void dead_write_lines(struct parport *p, unsigned char b){}
  47. static unsigned char dead_read_lines(struct parport *p) { return 0; }
  48. static unsigned char dead_frob_lines(struct parport *p, unsigned char b,
  49. unsigned char c) { return 0; }
  50. static void dead_onearg(struct parport *p){}
  51. static void dead_initstate(struct pardevice *d, struct parport_state *s) { }
  52. static void dead_state(struct parport *p, struct parport_state *s) { }
  53. static size_t dead_write(struct parport *p, const void *b, size_t l, int f)
  54. { return 0; }
  55. static size_t dead_read(struct parport *p, void *b, size_t l, int f)
  56. { return 0; }
  57. static struct parport_operations dead_ops = {
  58. .write_data = dead_write_lines, /* data */
  59. .read_data = dead_read_lines,
  60. .write_control = dead_write_lines, /* control */
  61. .read_control = dead_read_lines,
  62. .frob_control = dead_frob_lines,
  63. .read_status = dead_read_lines, /* status */
  64. .enable_irq = dead_onearg, /* enable_irq */
  65. .disable_irq = dead_onearg, /* disable_irq */
  66. .data_forward = dead_onearg, /* data_forward */
  67. .data_reverse = dead_onearg, /* data_reverse */
  68. .init_state = dead_initstate, /* init_state */
  69. .save_state = dead_state,
  70. .restore_state = dead_state,
  71. .epp_write_data = dead_write, /* epp */
  72. .epp_read_data = dead_read,
  73. .epp_write_addr = dead_write,
  74. .epp_read_addr = dead_read,
  75. .ecp_write_data = dead_write, /* ecp */
  76. .ecp_read_data = dead_read,
  77. .ecp_write_addr = dead_write,
  78. .compat_write_data = dead_write, /* compat */
  79. .nibble_read_data = dead_read, /* nibble */
  80. .byte_read_data = dead_read, /* byte */
  81. .owner = NULL,
  82. };
  83. static struct device_type parport_device_type = {
  84. .name = "parport",
  85. };
  86. static int is_parport(struct device *dev)
  87. {
  88. return dev->type == &parport_device_type;
  89. }
  90. static int parport_probe(struct device *dev)
  91. {
  92. struct parport_driver *drv;
  93. if (is_parport(dev))
  94. return -ENODEV;
  95. drv = to_parport_driver(dev->driver);
  96. if (!drv->probe) {
  97. /* if driver has not defined a custom probe */
  98. struct pardevice *par_dev = to_pardevice(dev);
  99. if (strcmp(par_dev->name, drv->name))
  100. return -ENODEV;
  101. return 0;
  102. }
  103. /* if driver defined its own probe */
  104. return drv->probe(to_pardevice(dev));
  105. }
  106. static struct bus_type parport_bus_type = {
  107. .name = "parport",
  108. .probe = parport_probe,
  109. };
  110. int parport_bus_init(void)
  111. {
  112. return bus_register(&parport_bus_type);
  113. }
  114. void parport_bus_exit(void)
  115. {
  116. bus_unregister(&parport_bus_type);
  117. }
  118. /*
  119. * iterates through all the drivers registered with the bus and sends the port
  120. * details to the match_port callback of the driver, so that the driver can
  121. * know about the new port that just registered with the bus and decide if it
  122. * wants to use this new port.
  123. */
  124. static int driver_check(struct device_driver *dev_drv, void *_port)
  125. {
  126. struct parport *port = _port;
  127. struct parport_driver *drv = to_parport_driver(dev_drv);
  128. if (drv->match_port)
  129. drv->match_port(port);
  130. return 0;
  131. }
  132. /* Call attach(port) for each registered driver. */
  133. static void attach_driver_chain(struct parport *port)
  134. {
  135. /* caller has exclusive registration_lock */
  136. struct parport_driver *drv;
  137. list_for_each_entry(drv, &drivers, list)
  138. drv->attach(port);
  139. /*
  140. * call the driver_check function of the drivers registered in
  141. * new device model
  142. */
  143. bus_for_each_drv(&parport_bus_type, NULL, port, driver_check);
  144. }
  145. static int driver_detach(struct device_driver *_drv, void *_port)
  146. {
  147. struct parport *port = _port;
  148. struct parport_driver *drv = to_parport_driver(_drv);
  149. if (drv->detach)
  150. drv->detach(port);
  151. return 0;
  152. }
  153. /* Call detach(port) for each registered driver. */
  154. static void detach_driver_chain(struct parport *port)
  155. {
  156. struct parport_driver *drv;
  157. /* caller has exclusive registration_lock */
  158. list_for_each_entry(drv, &drivers, list)
  159. drv->detach(port);
  160. /*
  161. * call the detach function of the drivers registered in
  162. * new device model
  163. */
  164. bus_for_each_drv(&parport_bus_type, NULL, port, driver_detach);
  165. }
  166. /* Ask kmod for some lowlevel drivers. */
  167. static void get_lowlevel_driver(void)
  168. {
  169. /*
  170. * There is no actual module called this: you should set
  171. * up an alias for modutils.
  172. */
  173. request_module("parport_lowlevel");
  174. }
  175. /*
  176. * iterates through all the devices connected to the bus and sends the device
  177. * details to the match_port callback of the driver, so that the driver can
  178. * know what are all the ports that are connected to the bus and choose the
  179. * port to which it wants to register its device.
  180. */
  181. static int port_check(struct device *dev, void *dev_drv)
  182. {
  183. struct parport_driver *drv = dev_drv;
  184. /* only send ports, do not send other devices connected to bus */
  185. if (is_parport(dev))
  186. drv->match_port(to_parport_dev(dev));
  187. return 0;
  188. }
  189. /**
  190. * parport_register_driver - register a parallel port device driver
  191. * @drv: structure describing the driver
  192. * @owner: owner module of drv
  193. * @mod_name: module name string
  194. *
  195. * This can be called by a parallel port device driver in order
  196. * to receive notifications about ports being found in the
  197. * system, as well as ports no longer available.
  198. *
  199. * If devmodel is true then the new device model is used
  200. * for registration.
  201. *
  202. * The @drv structure is allocated by the caller and must not be
  203. * deallocated until after calling parport_unregister_driver().
  204. *
  205. * If using the non device model:
  206. * The driver's attach() function may block. The port that
  207. * attach() is given will be valid for the duration of the
  208. * callback, but if the driver wants to take a copy of the
  209. * pointer it must call parport_get_port() to do so. Calling
  210. * parport_register_device() on that port will do this for you.
  211. *
  212. * The driver's detach() function may block. The port that
  213. * detach() is given will be valid for the duration of the
  214. * callback, but if the driver wants to take a copy of the
  215. * pointer it must call parport_get_port() to do so.
  216. *
  217. *
  218. * Returns 0 on success. The non device model will always succeeds.
  219. * but the new device model can fail and will return the error code.
  220. **/
  221. int __parport_register_driver(struct parport_driver *drv, struct module *owner,
  222. const char *mod_name)
  223. {
  224. if (list_empty(&portlist))
  225. get_lowlevel_driver();
  226. if (drv->devmodel) {
  227. /* using device model */
  228. int ret;
  229. /* initialize common driver fields */
  230. drv->driver.name = drv->name;
  231. drv->driver.bus = &parport_bus_type;
  232. drv->driver.owner = owner;
  233. drv->driver.mod_name = mod_name;
  234. ret = driver_register(&drv->driver);
  235. if (ret)
  236. return ret;
  237. mutex_lock(&registration_lock);
  238. if (drv->match_port)
  239. bus_for_each_dev(&parport_bus_type, NULL, drv,
  240. port_check);
  241. mutex_unlock(&registration_lock);
  242. } else {
  243. struct parport *port;
  244. drv->devmodel = false;
  245. mutex_lock(&registration_lock);
  246. list_for_each_entry(port, &portlist, list)
  247. drv->attach(port);
  248. list_add(&drv->list, &drivers);
  249. mutex_unlock(&registration_lock);
  250. }
  251. return 0;
  252. }
  253. EXPORT_SYMBOL(__parport_register_driver);
  254. static int port_detach(struct device *dev, void *_drv)
  255. {
  256. struct parport_driver *drv = _drv;
  257. if (is_parport(dev) && drv->detach)
  258. drv->detach(to_parport_dev(dev));
  259. return 0;
  260. }
  261. /**
  262. * parport_unregister_driver - deregister a parallel port device driver
  263. * @drv: structure describing the driver that was given to
  264. * parport_register_driver()
  265. *
  266. * This should be called by a parallel port device driver that
  267. * has registered itself using parport_register_driver() when it
  268. * is about to be unloaded.
  269. *
  270. * When it returns, the driver's attach() routine will no longer
  271. * be called, and for each port that attach() was called for, the
  272. * detach() routine will have been called.
  273. *
  274. * All the driver's attach() and detach() calls are guaranteed to have
  275. * finished by the time this function returns.
  276. **/
  277. void parport_unregister_driver(struct parport_driver *drv)
  278. {
  279. struct parport *port;
  280. mutex_lock(&registration_lock);
  281. if (drv->devmodel) {
  282. bus_for_each_dev(&parport_bus_type, NULL, drv, port_detach);
  283. driver_unregister(&drv->driver);
  284. } else {
  285. list_del_init(&drv->list);
  286. list_for_each_entry(port, &portlist, list)
  287. drv->detach(port);
  288. }
  289. mutex_unlock(&registration_lock);
  290. }
  291. EXPORT_SYMBOL(parport_unregister_driver);
  292. static void free_port(struct device *dev)
  293. {
  294. int d;
  295. struct parport *port = to_parport_dev(dev);
  296. spin_lock(&full_list_lock);
  297. list_del(&port->full_list);
  298. spin_unlock(&full_list_lock);
  299. for (d = 0; d < 5; d++) {
  300. kfree(port->probe_info[d].class_name);
  301. kfree(port->probe_info[d].mfr);
  302. kfree(port->probe_info[d].model);
  303. kfree(port->probe_info[d].cmdset);
  304. kfree(port->probe_info[d].description);
  305. }
  306. kfree(port->name);
  307. kfree(port);
  308. }
  309. /**
  310. * parport_get_port - increment a port's reference count
  311. * @port: the port
  312. *
  313. * This ensures that a struct parport pointer remains valid
  314. * until the matching parport_put_port() call.
  315. **/
  316. struct parport *parport_get_port(struct parport *port)
  317. {
  318. struct device *dev = get_device(&port->bus_dev);
  319. return to_parport_dev(dev);
  320. }
  321. EXPORT_SYMBOL(parport_get_port);
  322. void parport_del_port(struct parport *port)
  323. {
  324. device_unregister(&port->bus_dev);
  325. }
  326. EXPORT_SYMBOL(parport_del_port);
  327. /**
  328. * parport_put_port - decrement a port's reference count
  329. * @port: the port
  330. *
  331. * This should be called once for each call to parport_get_port(),
  332. * once the port is no longer needed. When the reference count reaches
  333. * zero (port is no longer used), free_port is called.
  334. **/
  335. void parport_put_port(struct parport *port)
  336. {
  337. put_device(&port->bus_dev);
  338. }
  339. EXPORT_SYMBOL(parport_put_port);
  340. /**
  341. * parport_register_port - register a parallel port
  342. * @base: base I/O address
  343. * @irq: IRQ line
  344. * @dma: DMA channel
  345. * @ops: pointer to the port driver's port operations structure
  346. *
  347. * When a parallel port (lowlevel) driver finds a port that
  348. * should be made available to parallel port device drivers, it
  349. * should call parport_register_port(). The @base, @irq, and
  350. * @dma parameters are for the convenience of port drivers, and
  351. * for ports where they aren't meaningful needn't be set to
  352. * anything special. They can be altered afterwards by adjusting
  353. * the relevant members of the parport structure that is returned
  354. * and represents the port. They should not be tampered with
  355. * after calling parport_announce_port, however.
  356. *
  357. * If there are parallel port device drivers in the system that
  358. * have registered themselves using parport_register_driver(),
  359. * they are not told about the port at this time; that is done by
  360. * parport_announce_port().
  361. *
  362. * The @ops structure is allocated by the caller, and must not be
  363. * deallocated before calling parport_remove_port().
  364. *
  365. * If there is no memory to allocate a new parport structure,
  366. * this function will return %NULL.
  367. **/
  368. struct parport *parport_register_port(unsigned long base, int irq, int dma,
  369. struct parport_operations *ops)
  370. {
  371. struct list_head *l;
  372. struct parport *tmp;
  373. int num;
  374. int device;
  375. char *name;
  376. int ret;
  377. tmp = kzalloc(sizeof(struct parport), GFP_KERNEL);
  378. if (!tmp)
  379. return NULL;
  380. /* Init our structure */
  381. tmp->base = base;
  382. tmp->irq = irq;
  383. tmp->dma = dma;
  384. tmp->muxport = tmp->daisy = tmp->muxsel = -1;
  385. tmp->modes = 0;
  386. INIT_LIST_HEAD(&tmp->list);
  387. tmp->devices = tmp->cad = NULL;
  388. tmp->flags = 0;
  389. tmp->ops = ops;
  390. tmp->physport = tmp;
  391. memset(tmp->probe_info, 0, 5 * sizeof(struct parport_device_info));
  392. rwlock_init(&tmp->cad_lock);
  393. spin_lock_init(&tmp->waitlist_lock);
  394. spin_lock_init(&tmp->pardevice_lock);
  395. tmp->ieee1284.mode = IEEE1284_MODE_COMPAT;
  396. tmp->ieee1284.phase = IEEE1284_PH_FWD_IDLE;
  397. sema_init(&tmp->ieee1284.irq, 0);
  398. tmp->spintime = parport_default_spintime;
  399. atomic_set(&tmp->ref_count, 1);
  400. INIT_LIST_HEAD(&tmp->full_list);
  401. name = kmalloc(15, GFP_KERNEL);
  402. if (!name) {
  403. kfree(tmp);
  404. return NULL;
  405. }
  406. /* Search for the lowest free parport number. */
  407. spin_lock(&full_list_lock);
  408. for (l = all_ports.next, num = 0; l != &all_ports; l = l->next, num++) {
  409. struct parport *p = list_entry(l, struct parport, full_list);
  410. if (p->number != num)
  411. break;
  412. }
  413. tmp->portnum = tmp->number = num;
  414. list_add_tail(&tmp->full_list, l);
  415. spin_unlock(&full_list_lock);
  416. /*
  417. * Now that the portnum is known finish doing the Init.
  418. */
  419. sprintf(name, "parport%d", tmp->portnum = tmp->number);
  420. tmp->name = name;
  421. tmp->bus_dev.bus = &parport_bus_type;
  422. tmp->bus_dev.release = free_port;
  423. dev_set_name(&tmp->bus_dev, name);
  424. tmp->bus_dev.type = &parport_device_type;
  425. for (device = 0; device < 5; device++)
  426. /* assume the worst */
  427. tmp->probe_info[device].class = PARPORT_CLASS_LEGACY;
  428. tmp->waithead = tmp->waittail = NULL;
  429. ret = device_register(&tmp->bus_dev);
  430. if (ret) {
  431. put_device(&tmp->bus_dev);
  432. return NULL;
  433. }
  434. return tmp;
  435. }
  436. EXPORT_SYMBOL(parport_register_port);
  437. /**
  438. * parport_announce_port - tell device drivers about a parallel port
  439. * @port: parallel port to announce
  440. *
  441. * After a port driver has registered a parallel port with
  442. * parport_register_port, and performed any necessary
  443. * initialisation or adjustments, it should call
  444. * parport_announce_port() in order to notify all device drivers
  445. * that have called parport_register_driver(). Their attach()
  446. * functions will be called, with @port as the parameter.
  447. **/
  448. void parport_announce_port(struct parport *port)
  449. {
  450. int i;
  451. #ifdef CONFIG_PARPORT_1284
  452. /* Analyse the IEEE1284.3 topology of the port. */
  453. parport_daisy_init(port);
  454. #endif
  455. if (!port->dev)
  456. printk(KERN_WARNING "%s: fix this legacy no-device port driver!\n",
  457. port->name);
  458. parport_proc_register(port);
  459. mutex_lock(&registration_lock);
  460. spin_lock_irq(&parportlist_lock);
  461. list_add_tail(&port->list, &portlist);
  462. for (i = 1; i < 3; i++) {
  463. struct parport *slave = port->slaves[i-1];
  464. if (slave)
  465. list_add_tail(&slave->list, &portlist);
  466. }
  467. spin_unlock_irq(&parportlist_lock);
  468. /* Let drivers know that new port(s) has arrived. */
  469. attach_driver_chain(port);
  470. for (i = 1; i < 3; i++) {
  471. struct parport *slave = port->slaves[i-1];
  472. if (slave)
  473. attach_driver_chain(slave);
  474. }
  475. mutex_unlock(&registration_lock);
  476. }
  477. EXPORT_SYMBOL(parport_announce_port);
  478. /**
  479. * parport_remove_port - deregister a parallel port
  480. * @port: parallel port to deregister
  481. *
  482. * When a parallel port driver is forcibly unloaded, or a
  483. * parallel port becomes inaccessible, the port driver must call
  484. * this function in order to deal with device drivers that still
  485. * want to use it.
  486. *
  487. * The parport structure associated with the port has its
  488. * operations structure replaced with one containing 'null'
  489. * operations that return errors or just don't do anything.
  490. *
  491. * Any drivers that have registered themselves using
  492. * parport_register_driver() are notified that the port is no
  493. * longer accessible by having their detach() routines called
  494. * with @port as the parameter.
  495. **/
  496. void parport_remove_port(struct parport *port)
  497. {
  498. int i;
  499. mutex_lock(&registration_lock);
  500. /* Spread the word. */
  501. detach_driver_chain(port);
  502. #ifdef CONFIG_PARPORT_1284
  503. /* Forget the IEEE1284.3 topology of the port. */
  504. parport_daisy_fini(port);
  505. for (i = 1; i < 3; i++) {
  506. struct parport *slave = port->slaves[i-1];
  507. if (!slave)
  508. continue;
  509. detach_driver_chain(slave);
  510. parport_daisy_fini(slave);
  511. }
  512. #endif
  513. port->ops = &dead_ops;
  514. spin_lock(&parportlist_lock);
  515. list_del_init(&port->list);
  516. for (i = 1; i < 3; i++) {
  517. struct parport *slave = port->slaves[i-1];
  518. if (slave)
  519. list_del_init(&slave->list);
  520. }
  521. spin_unlock(&parportlist_lock);
  522. mutex_unlock(&registration_lock);
  523. parport_proc_unregister(port);
  524. for (i = 1; i < 3; i++) {
  525. struct parport *slave = port->slaves[i-1];
  526. if (slave)
  527. parport_put_port(slave);
  528. }
  529. }
  530. EXPORT_SYMBOL(parport_remove_port);
  531. /**
  532. * parport_register_device - register a device on a parallel port
  533. * @port: port to which the device is attached
  534. * @name: a name to refer to the device
  535. * @pf: preemption callback
  536. * @kf: kick callback (wake-up)
  537. * @irq_func: interrupt handler
  538. * @flags: registration flags
  539. * @handle: data for callback functions
  540. *
  541. * This function, called by parallel port device drivers,
  542. * declares that a device is connected to a port, and tells the
  543. * system all it needs to know.
  544. *
  545. * The @name is allocated by the caller and must not be
  546. * deallocated until the caller calls @parport_unregister_device
  547. * for that device.
  548. *
  549. * The preemption callback function, @pf, is called when this
  550. * device driver has claimed access to the port but another
  551. * device driver wants to use it. It is given @handle as its
  552. * parameter, and should return zero if it is willing for the
  553. * system to release the port to another driver on its behalf.
  554. * If it wants to keep control of the port it should return
  555. * non-zero, and no action will be taken. It is good manners for
  556. * the driver to try to release the port at the earliest
  557. * opportunity after its preemption callback rejects a preemption
  558. * attempt. Note that if a preemption callback is happy for
  559. * preemption to go ahead, there is no need to release the port;
  560. * it is done automatically. This function may not block, as it
  561. * may be called from interrupt context. If the device driver
  562. * does not support preemption, @pf can be %NULL.
  563. *
  564. * The wake-up ("kick") callback function, @kf, is called when
  565. * the port is available to be claimed for exclusive access; that
  566. * is, parport_claim() is guaranteed to succeed when called from
  567. * inside the wake-up callback function. If the driver wants to
  568. * claim the port it should do so; otherwise, it need not take
  569. * any action. This function may not block, as it may be called
  570. * from interrupt context. If the device driver does not want to
  571. * be explicitly invited to claim the port in this way, @kf can
  572. * be %NULL.
  573. *
  574. * The interrupt handler, @irq_func, is called when an interrupt
  575. * arrives from the parallel port. Note that if a device driver
  576. * wants to use interrupts it should use parport_enable_irq(),
  577. * and can also check the irq member of the parport structure
  578. * representing the port.
  579. *
  580. * The parallel port (lowlevel) driver is the one that has called
  581. * request_irq() and whose interrupt handler is called first.
  582. * This handler does whatever needs to be done to the hardware to
  583. * acknowledge the interrupt (for PC-style ports there is nothing
  584. * special to be done). It then tells the IEEE 1284 code about
  585. * the interrupt, which may involve reacting to an IEEE 1284
  586. * event depending on the current IEEE 1284 phase. After this,
  587. * it calls @irq_func. Needless to say, @irq_func will be called
  588. * from interrupt context, and may not block.
  589. *
  590. * The %PARPORT_DEV_EXCL flag is for preventing port sharing, and
  591. * so should only be used when sharing the port with other device
  592. * drivers is impossible and would lead to incorrect behaviour.
  593. * Use it sparingly! Normally, @flags will be zero.
  594. *
  595. * This function returns a pointer to a structure that represents
  596. * the device on the port, or %NULL if there is not enough memory
  597. * to allocate space for that structure.
  598. **/
  599. struct pardevice *
  600. parport_register_device(struct parport *port, const char *name,
  601. int (*pf)(void *), void (*kf)(void *),
  602. void (*irq_func)(void *),
  603. int flags, void *handle)
  604. {
  605. struct pardevice *tmp;
  606. if (port->physport->flags & PARPORT_FLAG_EXCL) {
  607. /* An exclusive device is registered. */
  608. printk(KERN_DEBUG "%s: no more devices allowed\n",
  609. port->name);
  610. return NULL;
  611. }
  612. if (flags & PARPORT_DEV_LURK) {
  613. if (!pf || !kf) {
  614. printk(KERN_INFO "%s: refused to register lurking device (%s) without callbacks\n", port->name, name);
  615. return NULL;
  616. }
  617. }
  618. if (flags & PARPORT_DEV_EXCL) {
  619. if (port->physport->devices) {
  620. /*
  621. * If a device is already registered and this new
  622. * device wants exclusive access, then no need to
  623. * continue as we can not grant exclusive access to
  624. * this device.
  625. */
  626. pr_err("%s: cannot grant exclusive access for device %s\n",
  627. port->name, name);
  628. return NULL;
  629. }
  630. }
  631. /*
  632. * We up our own module reference count, and that of the port
  633. * on which a device is to be registered, to ensure that
  634. * neither of us gets unloaded while we sleep in (e.g.)
  635. * kmalloc.
  636. */
  637. if (!try_module_get(port->ops->owner))
  638. return NULL;
  639. parport_get_port(port);
  640. tmp = kmalloc(sizeof(struct pardevice), GFP_KERNEL);
  641. if (!tmp)
  642. goto out;
  643. tmp->state = kmalloc(sizeof(struct parport_state), GFP_KERNEL);
  644. if (!tmp->state)
  645. goto out_free_pardevice;
  646. tmp->name = name;
  647. tmp->port = port;
  648. tmp->daisy = -1;
  649. tmp->preempt = pf;
  650. tmp->wakeup = kf;
  651. tmp->private = handle;
  652. tmp->flags = flags;
  653. tmp->irq_func = irq_func;
  654. tmp->waiting = 0;
  655. tmp->timeout = 5 * HZ;
  656. tmp->devmodel = false;
  657. /* Chain this onto the list */
  658. tmp->prev = NULL;
  659. /*
  660. * This function must not run from an irq handler so we don' t need
  661. * to clear irq on the local CPU. -arca
  662. */
  663. spin_lock(&port->physport->pardevice_lock);
  664. if (flags & PARPORT_DEV_EXCL) {
  665. if (port->physport->devices) {
  666. spin_unlock(&port->physport->pardevice_lock);
  667. printk(KERN_DEBUG
  668. "%s: cannot grant exclusive access for device %s\n",
  669. port->name, name);
  670. goto out_free_all;
  671. }
  672. port->flags |= PARPORT_FLAG_EXCL;
  673. }
  674. tmp->next = port->physport->devices;
  675. wmb(); /*
  676. * Make sure that tmp->next is written before it's
  677. * added to the list; see comments marked 'no locking
  678. * required'
  679. */
  680. if (port->physport->devices)
  681. port->physport->devices->prev = tmp;
  682. port->physport->devices = tmp;
  683. spin_unlock(&port->physport->pardevice_lock);
  684. init_waitqueue_head(&tmp->wait_q);
  685. tmp->timeslice = parport_default_timeslice;
  686. tmp->waitnext = tmp->waitprev = NULL;
  687. /*
  688. * This has to be run as last thing since init_state may need other
  689. * pardevice fields. -arca
  690. */
  691. port->ops->init_state(tmp, tmp->state);
  692. if (!test_and_set_bit(PARPORT_DEVPROC_REGISTERED, &port->devflags)) {
  693. port->proc_device = tmp;
  694. parport_device_proc_register(tmp);
  695. }
  696. return tmp;
  697. out_free_all:
  698. kfree(tmp->state);
  699. out_free_pardevice:
  700. kfree(tmp);
  701. out:
  702. parport_put_port(port);
  703. module_put(port->ops->owner);
  704. return NULL;
  705. }
  706. EXPORT_SYMBOL(parport_register_device);
  707. static void free_pardevice(struct device *dev)
  708. {
  709. struct pardevice *par_dev = to_pardevice(dev);
  710. kfree(par_dev->name);
  711. kfree(par_dev);
  712. }
  713. struct pardevice *
  714. parport_register_dev_model(struct parport *port, const char *name,
  715. const struct pardev_cb *par_dev_cb, int id)
  716. {
  717. struct pardevice *par_dev;
  718. int ret;
  719. char *devname;
  720. if (port->physport->flags & PARPORT_FLAG_EXCL) {
  721. /* An exclusive device is registered. */
  722. pr_err("%s: no more devices allowed\n", port->name);
  723. return NULL;
  724. }
  725. if (par_dev_cb->flags & PARPORT_DEV_LURK) {
  726. if (!par_dev_cb->preempt || !par_dev_cb->wakeup) {
  727. pr_info("%s: refused to register lurking device (%s) without callbacks\n",
  728. port->name, name);
  729. return NULL;
  730. }
  731. }
  732. if (par_dev_cb->flags & PARPORT_DEV_EXCL) {
  733. if (port->physport->devices) {
  734. /*
  735. * If a device is already registered and this new
  736. * device wants exclusive access, then no need to
  737. * continue as we can not grant exclusive access to
  738. * this device.
  739. */
  740. pr_err("%s: cannot grant exclusive access for device %s\n",
  741. port->name, name);
  742. return NULL;
  743. }
  744. }
  745. if (!try_module_get(port->ops->owner))
  746. return NULL;
  747. parport_get_port(port);
  748. par_dev = kzalloc(sizeof(*par_dev), GFP_KERNEL);
  749. if (!par_dev)
  750. goto err_put_port;
  751. par_dev->state = kzalloc(sizeof(*par_dev->state), GFP_KERNEL);
  752. if (!par_dev->state)
  753. goto err_put_par_dev;
  754. devname = kstrdup(name, GFP_KERNEL);
  755. if (!devname)
  756. goto err_free_par_dev;
  757. par_dev->name = devname;
  758. par_dev->port = port;
  759. par_dev->daisy = -1;
  760. par_dev->preempt = par_dev_cb->preempt;
  761. par_dev->wakeup = par_dev_cb->wakeup;
  762. par_dev->private = par_dev_cb->private;
  763. par_dev->flags = par_dev_cb->flags;
  764. par_dev->irq_func = par_dev_cb->irq_func;
  765. par_dev->waiting = 0;
  766. par_dev->timeout = 5 * HZ;
  767. par_dev->dev.parent = &port->bus_dev;
  768. par_dev->dev.bus = &parport_bus_type;
  769. ret = dev_set_name(&par_dev->dev, "%s.%d", devname, id);
  770. if (ret)
  771. goto err_free_devname;
  772. par_dev->dev.release = free_pardevice;
  773. par_dev->devmodel = true;
  774. ret = device_register(&par_dev->dev);
  775. if (ret) {
  776. kfree(par_dev->state);
  777. put_device(&par_dev->dev);
  778. goto err_put_port;
  779. }
  780. /* Chain this onto the list */
  781. par_dev->prev = NULL;
  782. /*
  783. * This function must not run from an irq handler so we don' t need
  784. * to clear irq on the local CPU. -arca
  785. */
  786. spin_lock(&port->physport->pardevice_lock);
  787. if (par_dev_cb->flags & PARPORT_DEV_EXCL) {
  788. if (port->physport->devices) {
  789. spin_unlock(&port->physport->pardevice_lock);
  790. pr_debug("%s: cannot grant exclusive access for device %s\n",
  791. port->name, name);
  792. kfree(par_dev->state);
  793. device_unregister(&par_dev->dev);
  794. goto err_put_port;
  795. }
  796. port->flags |= PARPORT_FLAG_EXCL;
  797. }
  798. par_dev->next = port->physport->devices;
  799. wmb(); /*
  800. * Make sure that tmp->next is written before it's
  801. * added to the list; see comments marked 'no locking
  802. * required'
  803. */
  804. if (port->physport->devices)
  805. port->physport->devices->prev = par_dev;
  806. port->physport->devices = par_dev;
  807. spin_unlock(&port->physport->pardevice_lock);
  808. init_waitqueue_head(&par_dev->wait_q);
  809. par_dev->timeslice = parport_default_timeslice;
  810. par_dev->waitnext = NULL;
  811. par_dev->waitprev = NULL;
  812. /*
  813. * This has to be run as last thing since init_state may need other
  814. * pardevice fields. -arca
  815. */
  816. port->ops->init_state(par_dev, par_dev->state);
  817. if (!test_and_set_bit(PARPORT_DEVPROC_REGISTERED, &port->devflags)) {
  818. port->proc_device = par_dev;
  819. parport_device_proc_register(par_dev);
  820. }
  821. return par_dev;
  822. err_free_devname:
  823. kfree(devname);
  824. err_free_par_dev:
  825. kfree(par_dev->state);
  826. err_put_par_dev:
  827. if (!par_dev->devmodel)
  828. kfree(par_dev);
  829. err_put_port:
  830. parport_put_port(port);
  831. module_put(port->ops->owner);
  832. return NULL;
  833. }
  834. EXPORT_SYMBOL(parport_register_dev_model);
  835. /**
  836. * parport_unregister_device - deregister a device on a parallel port
  837. * @dev: pointer to structure representing device
  838. *
  839. * This undoes the effect of parport_register_device().
  840. **/
  841. void parport_unregister_device(struct pardevice *dev)
  842. {
  843. struct parport *port;
  844. #ifdef PARPORT_PARANOID
  845. if (!dev) {
  846. printk(KERN_ERR "parport_unregister_device: passed NULL\n");
  847. return;
  848. }
  849. #endif
  850. port = dev->port->physport;
  851. if (port->proc_device == dev) {
  852. port->proc_device = NULL;
  853. clear_bit(PARPORT_DEVPROC_REGISTERED, &port->devflags);
  854. parport_device_proc_unregister(dev);
  855. }
  856. if (port->cad == dev) {
  857. printk(KERN_DEBUG "%s: %s forgot to release port\n",
  858. port->name, dev->name);
  859. parport_release(dev);
  860. }
  861. spin_lock(&port->pardevice_lock);
  862. if (dev->next)
  863. dev->next->prev = dev->prev;
  864. if (dev->prev)
  865. dev->prev->next = dev->next;
  866. else
  867. port->devices = dev->next;
  868. if (dev->flags & PARPORT_DEV_EXCL)
  869. port->flags &= ~PARPORT_FLAG_EXCL;
  870. spin_unlock(&port->pardevice_lock);
  871. /*
  872. * Make sure we haven't left any pointers around in the wait
  873. * list.
  874. */
  875. spin_lock_irq(&port->waitlist_lock);
  876. if (dev->waitprev || dev->waitnext || port->waithead == dev) {
  877. if (dev->waitprev)
  878. dev->waitprev->waitnext = dev->waitnext;
  879. else
  880. port->waithead = dev->waitnext;
  881. if (dev->waitnext)
  882. dev->waitnext->waitprev = dev->waitprev;
  883. else
  884. port->waittail = dev->waitprev;
  885. }
  886. spin_unlock_irq(&port->waitlist_lock);
  887. kfree(dev->state);
  888. if (dev->devmodel)
  889. device_unregister(&dev->dev);
  890. else
  891. kfree(dev);
  892. module_put(port->ops->owner);
  893. parport_put_port(port);
  894. }
  895. EXPORT_SYMBOL(parport_unregister_device);
  896. /**
  897. * parport_find_number - find a parallel port by number
  898. * @number: parallel port number
  899. *
  900. * This returns the parallel port with the specified number, or
  901. * %NULL if there is none.
  902. *
  903. * There is an implicit parport_get_port() done already; to throw
  904. * away the reference to the port that parport_find_number()
  905. * gives you, use parport_put_port().
  906. */
  907. struct parport *parport_find_number(int number)
  908. {
  909. struct parport *port, *result = NULL;
  910. if (list_empty(&portlist))
  911. get_lowlevel_driver();
  912. spin_lock(&parportlist_lock);
  913. list_for_each_entry(port, &portlist, list) {
  914. if (port->number == number) {
  915. result = parport_get_port(port);
  916. break;
  917. }
  918. }
  919. spin_unlock(&parportlist_lock);
  920. return result;
  921. }
  922. EXPORT_SYMBOL(parport_find_number);
  923. /**
  924. * parport_find_base - find a parallel port by base address
  925. * @base: base I/O address
  926. *
  927. * This returns the parallel port with the specified base
  928. * address, or %NULL if there is none.
  929. *
  930. * There is an implicit parport_get_port() done already; to throw
  931. * away the reference to the port that parport_find_base()
  932. * gives you, use parport_put_port().
  933. */
  934. struct parport *parport_find_base(unsigned long base)
  935. {
  936. struct parport *port, *result = NULL;
  937. if (list_empty(&portlist))
  938. get_lowlevel_driver();
  939. spin_lock(&parportlist_lock);
  940. list_for_each_entry(port, &portlist, list) {
  941. if (port->base == base) {
  942. result = parport_get_port(port);
  943. break;
  944. }
  945. }
  946. spin_unlock(&parportlist_lock);
  947. return result;
  948. }
  949. EXPORT_SYMBOL(parport_find_base);
  950. /**
  951. * parport_claim - claim access to a parallel port device
  952. * @dev: pointer to structure representing a device on the port
  953. *
  954. * This function will not block and so can be used from interrupt
  955. * context. If parport_claim() succeeds in claiming access to
  956. * the port it returns zero and the port is available to use. It
  957. * may fail (returning non-zero) if the port is in use by another
  958. * driver and that driver is not willing to relinquish control of
  959. * the port.
  960. **/
  961. int parport_claim(struct pardevice *dev)
  962. {
  963. struct pardevice *oldcad;
  964. struct parport *port = dev->port->physport;
  965. unsigned long flags;
  966. if (port->cad == dev) {
  967. printk(KERN_INFO "%s: %s already owner\n",
  968. dev->port->name,dev->name);
  969. return 0;
  970. }
  971. /* Preempt any current device */
  972. write_lock_irqsave(&port->cad_lock, flags);
  973. oldcad = port->cad;
  974. if (oldcad) {
  975. if (oldcad->preempt) {
  976. if (oldcad->preempt(oldcad->private))
  977. goto blocked;
  978. port->ops->save_state(port, dev->state);
  979. } else
  980. goto blocked;
  981. if (port->cad != oldcad) {
  982. /*
  983. * I think we'll actually deadlock rather than
  984. * get here, but just in case..
  985. */
  986. printk(KERN_WARNING
  987. "%s: %s released port when preempted!\n",
  988. port->name, oldcad->name);
  989. if (port->cad)
  990. goto blocked;
  991. }
  992. }
  993. /* Can't fail from now on, so mark ourselves as no longer waiting. */
  994. if (dev->waiting & 1) {
  995. dev->waiting = 0;
  996. /* Take ourselves out of the wait list again. */
  997. spin_lock_irq(&port->waitlist_lock);
  998. if (dev->waitprev)
  999. dev->waitprev->waitnext = dev->waitnext;
  1000. else
  1001. port->waithead = dev->waitnext;
  1002. if (dev->waitnext)
  1003. dev->waitnext->waitprev = dev->waitprev;
  1004. else
  1005. port->waittail = dev->waitprev;
  1006. spin_unlock_irq(&port->waitlist_lock);
  1007. dev->waitprev = dev->waitnext = NULL;
  1008. }
  1009. /* Now we do the change of devices */
  1010. port->cad = dev;
  1011. #ifdef CONFIG_PARPORT_1284
  1012. /* If it's a mux port, select it. */
  1013. if (dev->port->muxport >= 0) {
  1014. /* FIXME */
  1015. port->muxsel = dev->port->muxport;
  1016. }
  1017. /* If it's a daisy chain device, select it. */
  1018. if (dev->daisy >= 0) {
  1019. /* This could be lazier. */
  1020. if (!parport_daisy_select(port, dev->daisy,
  1021. IEEE1284_MODE_COMPAT))
  1022. port->daisy = dev->daisy;
  1023. }
  1024. #endif /* IEEE1284.3 support */
  1025. /* Restore control registers */
  1026. port->ops->restore_state(port, dev->state);
  1027. write_unlock_irqrestore(&port->cad_lock, flags);
  1028. dev->time = jiffies;
  1029. return 0;
  1030. blocked:
  1031. /*
  1032. * If this is the first time we tried to claim the port, register an
  1033. * interest. This is only allowed for devices sleeping in
  1034. * parport_claim_or_block(), or those with a wakeup function.
  1035. */
  1036. /* The cad_lock is still held for writing here */
  1037. if (dev->waiting & 2 || dev->wakeup) {
  1038. spin_lock(&port->waitlist_lock);
  1039. if (test_and_set_bit(0, &dev->waiting) == 0) {
  1040. /* First add ourselves to the end of the wait list. */
  1041. dev->waitnext = NULL;
  1042. dev->waitprev = port->waittail;
  1043. if (port->waittail) {
  1044. port->waittail->waitnext = dev;
  1045. port->waittail = dev;
  1046. } else
  1047. port->waithead = port->waittail = dev;
  1048. }
  1049. spin_unlock(&port->waitlist_lock);
  1050. }
  1051. write_unlock_irqrestore(&port->cad_lock, flags);
  1052. return -EAGAIN;
  1053. }
  1054. EXPORT_SYMBOL(parport_claim);
  1055. /**
  1056. * parport_claim_or_block - claim access to a parallel port device
  1057. * @dev: pointer to structure representing a device on the port
  1058. *
  1059. * This behaves like parport_claim(), but will block if necessary
  1060. * to wait for the port to be free. A return value of 1
  1061. * indicates that it slept; 0 means that it succeeded without
  1062. * needing to sleep. A negative error code indicates failure.
  1063. **/
  1064. int parport_claim_or_block(struct pardevice *dev)
  1065. {
  1066. int r;
  1067. /*
  1068. * Signal to parport_claim() that we can wait even without a
  1069. * wakeup function.
  1070. */
  1071. dev->waiting = 2;
  1072. /* Try to claim the port. If this fails, we need to sleep. */
  1073. r = parport_claim(dev);
  1074. if (r == -EAGAIN) {
  1075. #ifdef PARPORT_DEBUG_SHARING
  1076. printk(KERN_DEBUG "%s: parport_claim() returned -EAGAIN\n", dev->name);
  1077. #endif
  1078. /*
  1079. * FIXME!!! Use the proper locking for dev->waiting,
  1080. * and make this use the "wait_event_interruptible()"
  1081. * interfaces. The cli/sti that used to be here
  1082. * did nothing.
  1083. *
  1084. * See also parport_release()
  1085. */
  1086. /*
  1087. * If dev->waiting is clear now, an interrupt
  1088. * gave us the port and we would deadlock if we slept.
  1089. */
  1090. if (dev->waiting) {
  1091. wait_event_interruptible(dev->wait_q,
  1092. !dev->waiting);
  1093. if (signal_pending(current))
  1094. return -EINTR;
  1095. r = 1;
  1096. } else {
  1097. r = 0;
  1098. #ifdef PARPORT_DEBUG_SHARING
  1099. printk(KERN_DEBUG "%s: didn't sleep in parport_claim_or_block()\n",
  1100. dev->name);
  1101. #endif
  1102. }
  1103. #ifdef PARPORT_DEBUG_SHARING
  1104. if (dev->port->physport->cad != dev)
  1105. printk(KERN_DEBUG "%s: exiting parport_claim_or_block but %s owns port!\n",
  1106. dev->name, dev->port->physport->cad ?
  1107. dev->port->physport->cad->name:"nobody");
  1108. #endif
  1109. }
  1110. dev->waiting = 0;
  1111. return r;
  1112. }
  1113. EXPORT_SYMBOL(parport_claim_or_block);
  1114. /**
  1115. * parport_release - give up access to a parallel port device
  1116. * @dev: pointer to structure representing parallel port device
  1117. *
  1118. * This function cannot fail, but it should not be called without
  1119. * the port claimed. Similarly, if the port is already claimed
  1120. * you should not try claiming it again.
  1121. **/
  1122. void parport_release(struct pardevice *dev)
  1123. {
  1124. struct parport *port = dev->port->physport;
  1125. struct pardevice *pd;
  1126. unsigned long flags;
  1127. /* Make sure that dev is the current device */
  1128. write_lock_irqsave(&port->cad_lock, flags);
  1129. if (port->cad != dev) {
  1130. write_unlock_irqrestore(&port->cad_lock, flags);
  1131. printk(KERN_WARNING "%s: %s tried to release parport when not owner\n",
  1132. port->name, dev->name);
  1133. return;
  1134. }
  1135. #ifdef CONFIG_PARPORT_1284
  1136. /* If this is on a mux port, deselect it. */
  1137. if (dev->port->muxport >= 0) {
  1138. /* FIXME */
  1139. port->muxsel = -1;
  1140. }
  1141. /* If this is a daisy device, deselect it. */
  1142. if (dev->daisy >= 0) {
  1143. parport_daisy_deselect_all(port);
  1144. port->daisy = -1;
  1145. }
  1146. #endif
  1147. port->cad = NULL;
  1148. write_unlock_irqrestore(&port->cad_lock, flags);
  1149. /* Save control registers */
  1150. port->ops->save_state(port, dev->state);
  1151. /*
  1152. * If anybody is waiting, find out who's been there longest and
  1153. * then wake them up. (Note: no locking required)
  1154. */
  1155. /* !!! LOCKING IS NEEDED HERE */
  1156. for (pd = port->waithead; pd; pd = pd->waitnext) {
  1157. if (pd->waiting & 2) { /* sleeping in claim_or_block */
  1158. parport_claim(pd);
  1159. if (waitqueue_active(&pd->wait_q))
  1160. wake_up_interruptible(&pd->wait_q);
  1161. return;
  1162. } else if (pd->wakeup) {
  1163. pd->wakeup(pd->private);
  1164. if (dev->port->cad) /* racy but no matter */
  1165. return;
  1166. } else {
  1167. printk(KERN_ERR "%s: don't know how to wake %s\n", port->name, pd->name);
  1168. }
  1169. }
  1170. /*
  1171. * Nobody was waiting, so walk the list to see if anyone is
  1172. * interested in being woken up. (Note: no locking required)
  1173. */
  1174. /* !!! LOCKING IS NEEDED HERE */
  1175. for (pd = port->devices; !port->cad && pd; pd = pd->next) {
  1176. if (pd->wakeup && pd != dev)
  1177. pd->wakeup(pd->private);
  1178. }
  1179. }
  1180. EXPORT_SYMBOL(parport_release);
  1181. irqreturn_t parport_irq_handler(int irq, void *dev_id)
  1182. {
  1183. struct parport *port = dev_id;
  1184. parport_generic_irq(port);
  1185. return IRQ_HANDLED;
  1186. }
  1187. EXPORT_SYMBOL(parport_irq_handler);
  1188. MODULE_LICENSE("GPL");