share.c 33 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234
  1. /*
  2. * Parallel-port resource manager code.
  3. *
  4. * Authors: David Campbell <[email protected]>
  5. * Tim Waugh <[email protected]>
  6. * Jose Renau <[email protected]>
  7. * Philip Blundell <[email protected]>
  8. * Andrea Arcangeli
  9. *
  10. * based on work by Grant Guenther <[email protected]>
  11. * and Philip Blundell
  12. *
  13. * Any part of this program may be used in documents licensed under
  14. * the GNU Free Documentation License, Version 1.1 or any later version
  15. * published by the Free Software Foundation.
  16. */
  17. #undef PARPORT_DEBUG_SHARING /* undef for production */
  18. #include <linux/module.h>
  19. #include <linux/string.h>
  20. #include <linux/threads.h>
  21. #include <linux/parport.h>
  22. #include <linux/delay.h>
  23. #include <linux/errno.h>
  24. #include <linux/interrupt.h>
  25. #include <linux/ioport.h>
  26. #include <linux/kernel.h>
  27. #include <linux/slab.h>
  28. #include <linux/sched/signal.h>
  29. #include <linux/kmod.h>
  30. #include <linux/device.h>
  31. #include <linux/spinlock.h>
  32. #include <linux/mutex.h>
  33. #include <asm/irq.h>
  34. #undef PARPORT_PARANOID
  35. #define PARPORT_DEFAULT_TIMESLICE (HZ/5)
  36. unsigned long parport_default_timeslice = PARPORT_DEFAULT_TIMESLICE;
  37. int parport_default_spintime = DEFAULT_SPIN_TIME;
  38. static LIST_HEAD(portlist);
  39. static DEFINE_SPINLOCK(parportlist_lock);
  40. /* list of all allocated ports, sorted by ->number */
  41. static LIST_HEAD(all_ports);
  42. static DEFINE_SPINLOCK(full_list_lock);
  43. static LIST_HEAD(drivers);
  44. static DEFINE_MUTEX(registration_lock);
  45. /* What you can do to a port that's gone away.. */
  46. static void dead_write_lines(struct parport *p, unsigned char b){}
  47. static unsigned char dead_read_lines(struct parport *p) { return 0; }
  48. static unsigned char dead_frob_lines(struct parport *p, unsigned char b,
  49. unsigned char c) { return 0; }
  50. static void dead_onearg(struct parport *p){}
  51. static void dead_initstate(struct pardevice *d, struct parport_state *s) { }
  52. static void dead_state(struct parport *p, struct parport_state *s) { }
  53. static size_t dead_write(struct parport *p, const void *b, size_t l, int f)
  54. { return 0; }
  55. static size_t dead_read(struct parport *p, void *b, size_t l, int f)
  56. { return 0; }
  57. static struct parport_operations dead_ops = {
  58. .write_data = dead_write_lines, /* data */
  59. .read_data = dead_read_lines,
  60. .write_control = dead_write_lines, /* control */
  61. .read_control = dead_read_lines,
  62. .frob_control = dead_frob_lines,
  63. .read_status = dead_read_lines, /* status */
  64. .enable_irq = dead_onearg, /* enable_irq */
  65. .disable_irq = dead_onearg, /* disable_irq */
  66. .data_forward = dead_onearg, /* data_forward */
  67. .data_reverse = dead_onearg, /* data_reverse */
  68. .init_state = dead_initstate, /* init_state */
  69. .save_state = dead_state,
  70. .restore_state = dead_state,
  71. .epp_write_data = dead_write, /* epp */
  72. .epp_read_data = dead_read,
  73. .epp_write_addr = dead_write,
  74. .epp_read_addr = dead_read,
  75. .ecp_write_data = dead_write, /* ecp */
  76. .ecp_read_data = dead_read,
  77. .ecp_write_addr = dead_write,
  78. .compat_write_data = dead_write, /* compat */
  79. .nibble_read_data = dead_read, /* nibble */
  80. .byte_read_data = dead_read, /* byte */
  81. .owner = NULL,
  82. };
  83. static struct device_type parport_device_type = {
  84. .name = "parport",
  85. };
  86. static int is_parport(struct device *dev)
  87. {
  88. return dev->type == &parport_device_type;
  89. }
  90. static int parport_probe(struct device *dev)
  91. {
  92. struct parport_driver *drv;
  93. if (is_parport(dev))
  94. return -ENODEV;
  95. drv = to_parport_driver(dev->driver);
  96. if (!drv->probe) {
  97. /* if driver has not defined a custom probe */
  98. struct pardevice *par_dev = to_pardevice(dev);
  99. if (strcmp(par_dev->name, drv->name))
  100. return -ENODEV;
  101. return 0;
  102. }
  103. /* if driver defined its own probe */
  104. return drv->probe(to_pardevice(dev));
  105. }
  106. static struct bus_type parport_bus_type = {
  107. .name = "parport",
  108. .probe = parport_probe,
  109. };
  110. int parport_bus_init(void)
  111. {
  112. return bus_register(&parport_bus_type);
  113. }
  114. void parport_bus_exit(void)
  115. {
  116. bus_unregister(&parport_bus_type);
  117. }
  118. /*
  119. * iterates through all the drivers registered with the bus and sends the port
  120. * details to the match_port callback of the driver, so that the driver can
  121. * know about the new port that just registered with the bus and decide if it
  122. * wants to use this new port.
  123. */
  124. static int driver_check(struct device_driver *dev_drv, void *_port)
  125. {
  126. struct parport *port = _port;
  127. struct parport_driver *drv = to_parport_driver(dev_drv);
  128. if (drv->match_port)
  129. drv->match_port(port);
  130. return 0;
  131. }
  132. /* Call attach(port) for each registered driver. */
  133. static void attach_driver_chain(struct parport *port)
  134. {
  135. /* caller has exclusive registration_lock */
  136. struct parport_driver *drv;
  137. list_for_each_entry(drv, &drivers, list)
  138. drv->attach(port);
  139. /*
  140. * call the driver_check function of the drivers registered in
  141. * new device model
  142. */
  143. bus_for_each_drv(&parport_bus_type, NULL, port, driver_check);
  144. }
  145. static int driver_detach(struct device_driver *_drv, void *_port)
  146. {
  147. struct parport *port = _port;
  148. struct parport_driver *drv = to_parport_driver(_drv);
  149. if (drv->detach)
  150. drv->detach(port);
  151. return 0;
  152. }
  153. /* Call detach(port) for each registered driver. */
  154. static void detach_driver_chain(struct parport *port)
  155. {
  156. struct parport_driver *drv;
  157. /* caller has exclusive registration_lock */
  158. list_for_each_entry(drv, &drivers, list)
  159. drv->detach(port);
  160. /*
  161. * call the detach function of the drivers registered in
  162. * new device model
  163. */
  164. bus_for_each_drv(&parport_bus_type, NULL, port, driver_detach);
  165. }
  166. /* Ask kmod for some lowlevel drivers. */
  167. static void get_lowlevel_driver(void)
  168. {
  169. /*
  170. * There is no actual module called this: you should set
  171. * up an alias for modutils.
  172. */
  173. request_module("parport_lowlevel");
  174. }
  175. /*
  176. * iterates through all the devices connected to the bus and sends the device
  177. * details to the match_port callback of the driver, so that the driver can
  178. * know what are all the ports that are connected to the bus and choose the
  179. * port to which it wants to register its device.
  180. */
  181. static int port_check(struct device *dev, void *dev_drv)
  182. {
  183. struct parport_driver *drv = dev_drv;
  184. /* only send ports, do not send other devices connected to bus */
  185. if (is_parport(dev))
  186. drv->match_port(to_parport_dev(dev));
  187. return 0;
  188. }
  189. /*
  190. * Iterates through all the devices connected to the bus and return 1
  191. * if the device is a parallel port.
  192. */
  193. static int port_detect(struct device *dev, void *dev_drv)
  194. {
  195. if (is_parport(dev))
  196. return 1;
  197. return 0;
  198. }
  199. /**
  200. * __parport_register_driver - register a parallel port device driver
  201. * @drv: structure describing the driver
  202. * @owner: owner module of drv
  203. * @mod_name: module name string
  204. *
  205. * This can be called by a parallel port device driver in order
  206. * to receive notifications about ports being found in the
  207. * system, as well as ports no longer available.
  208. *
  209. * If devmodel is true then the new device model is used
  210. * for registration.
  211. *
  212. * The @drv structure is allocated by the caller and must not be
  213. * deallocated until after calling parport_unregister_driver().
  214. *
  215. * If using the non device model:
  216. * The driver's attach() function may block. The port that
  217. * attach() is given will be valid for the duration of the
  218. * callback, but if the driver wants to take a copy of the
  219. * pointer it must call parport_get_port() to do so. Calling
  220. * parport_register_device() on that port will do this for you.
  221. *
  222. * The driver's detach() function may block. The port that
  223. * detach() is given will be valid for the duration of the
  224. * callback, but if the driver wants to take a copy of the
  225. * pointer it must call parport_get_port() to do so.
  226. *
  227. *
  228. * Returns 0 on success. The non device model will always succeeds.
  229. * but the new device model can fail and will return the error code.
  230. **/
  231. int __parport_register_driver(struct parport_driver *drv, struct module *owner,
  232. const char *mod_name)
  233. {
  234. /* using device model */
  235. int ret;
  236. /* initialize common driver fields */
  237. drv->driver.name = drv->name;
  238. drv->driver.bus = &parport_bus_type;
  239. drv->driver.owner = owner;
  240. drv->driver.mod_name = mod_name;
  241. ret = driver_register(&drv->driver);
  242. if (ret)
  243. return ret;
  244. /*
  245. * check if bus has any parallel port registered, if
  246. * none is found then load the lowlevel driver.
  247. */
  248. ret = bus_for_each_dev(&parport_bus_type, NULL, NULL,
  249. port_detect);
  250. if (!ret)
  251. get_lowlevel_driver();
  252. mutex_lock(&registration_lock);
  253. if (drv->match_port)
  254. bus_for_each_dev(&parport_bus_type, NULL, drv,
  255. port_check);
  256. mutex_unlock(&registration_lock);
  257. return 0;
  258. }
  259. EXPORT_SYMBOL(__parport_register_driver);
  260. static int port_detach(struct device *dev, void *_drv)
  261. {
  262. struct parport_driver *drv = _drv;
  263. if (is_parport(dev) && drv->detach)
  264. drv->detach(to_parport_dev(dev));
  265. return 0;
  266. }
  267. /**
  268. * parport_unregister_driver - deregister a parallel port device driver
  269. * @drv: structure describing the driver that was given to
  270. * parport_register_driver()
  271. *
  272. * This should be called by a parallel port device driver that
  273. * has registered itself using parport_register_driver() when it
  274. * is about to be unloaded.
  275. *
  276. * When it returns, the driver's attach() routine will no longer
  277. * be called, and for each port that attach() was called for, the
  278. * detach() routine will have been called.
  279. *
  280. * All the driver's attach() and detach() calls are guaranteed to have
  281. * finished by the time this function returns.
  282. **/
  283. void parport_unregister_driver(struct parport_driver *drv)
  284. {
  285. mutex_lock(&registration_lock);
  286. bus_for_each_dev(&parport_bus_type, NULL, drv, port_detach);
  287. driver_unregister(&drv->driver);
  288. mutex_unlock(&registration_lock);
  289. }
  290. EXPORT_SYMBOL(parport_unregister_driver);
  291. static void free_port(struct device *dev)
  292. {
  293. int d;
  294. struct parport *port = to_parport_dev(dev);
  295. spin_lock(&full_list_lock);
  296. list_del(&port->full_list);
  297. spin_unlock(&full_list_lock);
  298. for (d = 0; d < 5; d++) {
  299. kfree(port->probe_info[d].class_name);
  300. kfree(port->probe_info[d].mfr);
  301. kfree(port->probe_info[d].model);
  302. kfree(port->probe_info[d].cmdset);
  303. kfree(port->probe_info[d].description);
  304. }
  305. kfree(port->name);
  306. kfree(port);
  307. }
  308. /**
  309. * parport_get_port - increment a port's reference count
  310. * @port: the port
  311. *
  312. * This ensures that a struct parport pointer remains valid
  313. * until the matching parport_put_port() call.
  314. **/
  315. struct parport *parport_get_port(struct parport *port)
  316. {
  317. struct device *dev = get_device(&port->bus_dev);
  318. return to_parport_dev(dev);
  319. }
  320. EXPORT_SYMBOL(parport_get_port);
  321. void parport_del_port(struct parport *port)
  322. {
  323. device_unregister(&port->bus_dev);
  324. }
  325. EXPORT_SYMBOL(parport_del_port);
  326. /**
  327. * parport_put_port - decrement a port's reference count
  328. * @port: the port
  329. *
  330. * This should be called once for each call to parport_get_port(),
  331. * once the port is no longer needed. When the reference count reaches
  332. * zero (port is no longer used), free_port is called.
  333. **/
  334. void parport_put_port(struct parport *port)
  335. {
  336. put_device(&port->bus_dev);
  337. }
  338. EXPORT_SYMBOL(parport_put_port);
  339. /**
  340. * parport_register_port - register a parallel port
  341. * @base: base I/O address
  342. * @irq: IRQ line
  343. * @dma: DMA channel
  344. * @ops: pointer to the port driver's port operations structure
  345. *
  346. * When a parallel port (lowlevel) driver finds a port that
  347. * should be made available to parallel port device drivers, it
  348. * should call parport_register_port(). The @base, @irq, and
  349. * @dma parameters are for the convenience of port drivers, and
  350. * for ports where they aren't meaningful needn't be set to
  351. * anything special. They can be altered afterwards by adjusting
  352. * the relevant members of the parport structure that is returned
  353. * and represents the port. They should not be tampered with
  354. * after calling parport_announce_port, however.
  355. *
  356. * If there are parallel port device drivers in the system that
  357. * have registered themselves using parport_register_driver(),
  358. * they are not told about the port at this time; that is done by
  359. * parport_announce_port().
  360. *
  361. * The @ops structure is allocated by the caller, and must not be
  362. * deallocated before calling parport_remove_port().
  363. *
  364. * If there is no memory to allocate a new parport structure,
  365. * this function will return %NULL.
  366. **/
  367. struct parport *parport_register_port(unsigned long base, int irq, int dma,
  368. struct parport_operations *ops)
  369. {
  370. struct list_head *l;
  371. struct parport *tmp;
  372. int num;
  373. int device;
  374. char *name;
  375. int ret;
  376. tmp = kzalloc(sizeof(struct parport), GFP_KERNEL);
  377. if (!tmp)
  378. return NULL;
  379. /* Init our structure */
  380. tmp->base = base;
  381. tmp->irq = irq;
  382. tmp->dma = dma;
  383. tmp->muxport = tmp->daisy = tmp->muxsel = -1;
  384. tmp->modes = 0;
  385. INIT_LIST_HEAD(&tmp->list);
  386. tmp->devices = tmp->cad = NULL;
  387. tmp->flags = 0;
  388. tmp->ops = ops;
  389. tmp->physport = tmp;
  390. memset(tmp->probe_info, 0, 5 * sizeof(struct parport_device_info));
  391. rwlock_init(&tmp->cad_lock);
  392. spin_lock_init(&tmp->waitlist_lock);
  393. spin_lock_init(&tmp->pardevice_lock);
  394. tmp->ieee1284.mode = IEEE1284_MODE_COMPAT;
  395. tmp->ieee1284.phase = IEEE1284_PH_FWD_IDLE;
  396. sema_init(&tmp->ieee1284.irq, 0);
  397. tmp->spintime = parport_default_spintime;
  398. atomic_set(&tmp->ref_count, 1);
  399. INIT_LIST_HEAD(&tmp->full_list);
  400. name = kmalloc(15, GFP_KERNEL);
  401. if (!name) {
  402. kfree(tmp);
  403. return NULL;
  404. }
  405. /* Search for the lowest free parport number. */
  406. spin_lock(&full_list_lock);
  407. for (l = all_ports.next, num = 0; l != &all_ports; l = l->next, num++) {
  408. struct parport *p = list_entry(l, struct parport, full_list);
  409. if (p->number != num)
  410. break;
  411. }
  412. tmp->portnum = tmp->number = num;
  413. list_add_tail(&tmp->full_list, l);
  414. spin_unlock(&full_list_lock);
  415. /*
  416. * Now that the portnum is known finish doing the Init.
  417. */
  418. sprintf(name, "parport%d", tmp->portnum = tmp->number);
  419. tmp->name = name;
  420. tmp->bus_dev.bus = &parport_bus_type;
  421. tmp->bus_dev.release = free_port;
  422. dev_set_name(&tmp->bus_dev, name);
  423. tmp->bus_dev.type = &parport_device_type;
  424. for (device = 0; device < 5; device++)
  425. /* assume the worst */
  426. tmp->probe_info[device].class = PARPORT_CLASS_LEGACY;
  427. tmp->waithead = tmp->waittail = NULL;
  428. ret = device_register(&tmp->bus_dev);
  429. if (ret) {
  430. put_device(&tmp->bus_dev);
  431. return NULL;
  432. }
  433. return tmp;
  434. }
  435. EXPORT_SYMBOL(parport_register_port);
  436. /**
  437. * parport_announce_port - tell device drivers about a parallel port
  438. * @port: parallel port to announce
  439. *
  440. * After a port driver has registered a parallel port with
  441. * parport_register_port, and performed any necessary
  442. * initialisation or adjustments, it should call
  443. * parport_announce_port() in order to notify all device drivers
  444. * that have called parport_register_driver(). Their attach()
  445. * functions will be called, with @port as the parameter.
  446. **/
  447. void parport_announce_port(struct parport *port)
  448. {
  449. int i;
  450. #ifdef CONFIG_PARPORT_1284
  451. /* Analyse the IEEE1284.3 topology of the port. */
  452. parport_daisy_init(port);
  453. #endif
  454. if (!port->dev)
  455. pr_warn("%s: fix this legacy no-device port driver!\n",
  456. port->name);
  457. parport_proc_register(port);
  458. mutex_lock(&registration_lock);
  459. spin_lock_irq(&parportlist_lock);
  460. list_add_tail(&port->list, &portlist);
  461. for (i = 1; i < 3; i++) {
  462. struct parport *slave = port->slaves[i-1];
  463. if (slave)
  464. list_add_tail(&slave->list, &portlist);
  465. }
  466. spin_unlock_irq(&parportlist_lock);
  467. /* Let drivers know that new port(s) has arrived. */
  468. attach_driver_chain(port);
  469. for (i = 1; i < 3; i++) {
  470. struct parport *slave = port->slaves[i-1];
  471. if (slave)
  472. attach_driver_chain(slave);
  473. }
  474. mutex_unlock(&registration_lock);
  475. }
  476. EXPORT_SYMBOL(parport_announce_port);
  477. /**
  478. * parport_remove_port - deregister a parallel port
  479. * @port: parallel port to deregister
  480. *
  481. * When a parallel port driver is forcibly unloaded, or a
  482. * parallel port becomes inaccessible, the port driver must call
  483. * this function in order to deal with device drivers that still
  484. * want to use it.
  485. *
  486. * The parport structure associated with the port has its
  487. * operations structure replaced with one containing 'null'
  488. * operations that return errors or just don't do anything.
  489. *
  490. * Any drivers that have registered themselves using
  491. * parport_register_driver() are notified that the port is no
  492. * longer accessible by having their detach() routines called
  493. * with @port as the parameter.
  494. **/
  495. void parport_remove_port(struct parport *port)
  496. {
  497. int i;
  498. mutex_lock(&registration_lock);
  499. /* Spread the word. */
  500. detach_driver_chain(port);
  501. #ifdef CONFIG_PARPORT_1284
  502. /* Forget the IEEE1284.3 topology of the port. */
  503. parport_daisy_fini(port);
  504. for (i = 1; i < 3; i++) {
  505. struct parport *slave = port->slaves[i-1];
  506. if (!slave)
  507. continue;
  508. detach_driver_chain(slave);
  509. parport_daisy_fini(slave);
  510. }
  511. #endif
  512. port->ops = &dead_ops;
  513. spin_lock(&parportlist_lock);
  514. list_del_init(&port->list);
  515. for (i = 1; i < 3; i++) {
  516. struct parport *slave = port->slaves[i-1];
  517. if (slave)
  518. list_del_init(&slave->list);
  519. }
  520. spin_unlock(&parportlist_lock);
  521. mutex_unlock(&registration_lock);
  522. parport_proc_unregister(port);
  523. for (i = 1; i < 3; i++) {
  524. struct parport *slave = port->slaves[i-1];
  525. if (slave)
  526. parport_put_port(slave);
  527. }
  528. }
  529. EXPORT_SYMBOL(parport_remove_port);
  530. static void free_pardevice(struct device *dev)
  531. {
  532. struct pardevice *par_dev = to_pardevice(dev);
  533. kfree(par_dev->name);
  534. kfree(par_dev);
  535. }
  536. /**
  537. * parport_register_dev_model - register a device on a parallel port
  538. * @port: port to which the device is attached
  539. * @name: a name to refer to the device
  540. * @par_dev_cb: struct containing callbacks
  541. * @id: device number to be given to the device
  542. *
  543. * This function, called by parallel port device drivers,
  544. * declares that a device is connected to a port, and tells the
  545. * system all it needs to know.
  546. *
  547. * The struct pardev_cb contains pointer to callbacks. preemption
  548. * callback function, @preempt, is called when this device driver
  549. * has claimed access to the port but another device driver wants
  550. * to use it. It is given, @private, as its parameter, and should
  551. * return zero if it is willing for the system to release the port
  552. * to another driver on its behalf. If it wants to keep control of
  553. * the port it should return non-zero, and no action will be taken.
  554. * It is good manners for the driver to try to release the port at
  555. * the earliest opportunity after its preemption callback rejects a
  556. * preemption attempt. Note that if a preemption callback is happy
  557. * for preemption to go ahead, there is no need to release the
  558. * port; it is done automatically. This function may not block, as
  559. * it may be called from interrupt context. If the device driver
  560. * does not support preemption, @preempt can be %NULL.
  561. *
  562. * The wake-up ("kick") callback function, @wakeup, is called when
  563. * the port is available to be claimed for exclusive access; that
  564. * is, parport_claim() is guaranteed to succeed when called from
  565. * inside the wake-up callback function. If the driver wants to
  566. * claim the port it should do so; otherwise, it need not take
  567. * any action. This function may not block, as it may be called
  568. * from interrupt context. If the device driver does not want to
  569. * be explicitly invited to claim the port in this way, @wakeup can
  570. * be %NULL.
  571. *
  572. * The interrupt handler, @irq_func, is called when an interrupt
  573. * arrives from the parallel port. Note that if a device driver
  574. * wants to use interrupts it should use parport_enable_irq(),
  575. * and can also check the irq member of the parport structure
  576. * representing the port.
  577. *
  578. * The parallel port (lowlevel) driver is the one that has called
  579. * request_irq() and whose interrupt handler is called first.
  580. * This handler does whatever needs to be done to the hardware to
  581. * acknowledge the interrupt (for PC-style ports there is nothing
  582. * special to be done). It then tells the IEEE 1284 code about
  583. * the interrupt, which may involve reacting to an IEEE 1284
  584. * event depending on the current IEEE 1284 phase. After this,
  585. * it calls @irq_func. Needless to say, @irq_func will be called
  586. * from interrupt context, and may not block.
  587. *
  588. * The %PARPORT_DEV_EXCL flag is for preventing port sharing, and
  589. * so should only be used when sharing the port with other device
  590. * drivers is impossible and would lead to incorrect behaviour.
  591. * Use it sparingly! Normally, @flags will be zero.
  592. *
  593. * This function returns a pointer to a structure that represents
  594. * the device on the port, or %NULL if there is not enough memory
  595. * to allocate space for that structure.
  596. **/
  597. struct pardevice *
  598. parport_register_dev_model(struct parport *port, const char *name,
  599. const struct pardev_cb *par_dev_cb, int id)
  600. {
  601. struct pardevice *par_dev;
  602. int ret;
  603. char *devname;
  604. if (port->physport->flags & PARPORT_FLAG_EXCL) {
  605. /* An exclusive device is registered. */
  606. pr_err("%s: no more devices allowed\n", port->name);
  607. return NULL;
  608. }
  609. if (par_dev_cb->flags & PARPORT_DEV_LURK) {
  610. if (!par_dev_cb->preempt || !par_dev_cb->wakeup) {
  611. pr_info("%s: refused to register lurking device (%s) without callbacks\n",
  612. port->name, name);
  613. return NULL;
  614. }
  615. }
  616. if (par_dev_cb->flags & PARPORT_DEV_EXCL) {
  617. if (port->physport->devices) {
  618. /*
  619. * If a device is already registered and this new
  620. * device wants exclusive access, then no need to
  621. * continue as we can not grant exclusive access to
  622. * this device.
  623. */
  624. pr_err("%s: cannot grant exclusive access for device %s\n",
  625. port->name, name);
  626. return NULL;
  627. }
  628. }
  629. if (!try_module_get(port->ops->owner))
  630. return NULL;
  631. parport_get_port(port);
  632. par_dev = kzalloc(sizeof(*par_dev), GFP_KERNEL);
  633. if (!par_dev)
  634. goto err_put_port;
  635. par_dev->state = kzalloc(sizeof(*par_dev->state), GFP_KERNEL);
  636. if (!par_dev->state)
  637. goto err_put_par_dev;
  638. devname = kstrdup(name, GFP_KERNEL);
  639. if (!devname)
  640. goto err_free_par_dev;
  641. par_dev->name = devname;
  642. par_dev->port = port;
  643. par_dev->daisy = -1;
  644. par_dev->preempt = par_dev_cb->preempt;
  645. par_dev->wakeup = par_dev_cb->wakeup;
  646. par_dev->private = par_dev_cb->private;
  647. par_dev->flags = par_dev_cb->flags;
  648. par_dev->irq_func = par_dev_cb->irq_func;
  649. par_dev->waiting = 0;
  650. par_dev->timeout = 5 * HZ;
  651. par_dev->dev.parent = &port->bus_dev;
  652. par_dev->dev.bus = &parport_bus_type;
  653. ret = dev_set_name(&par_dev->dev, "%s.%d", devname, id);
  654. if (ret)
  655. goto err_free_devname;
  656. par_dev->dev.release = free_pardevice;
  657. par_dev->devmodel = true;
  658. ret = device_register(&par_dev->dev);
  659. if (ret) {
  660. kfree(par_dev->state);
  661. put_device(&par_dev->dev);
  662. goto err_put_port;
  663. }
  664. /* Chain this onto the list */
  665. par_dev->prev = NULL;
  666. /*
  667. * This function must not run from an irq handler so we don' t need
  668. * to clear irq on the local CPU. -arca
  669. */
  670. spin_lock(&port->physport->pardevice_lock);
  671. if (par_dev_cb->flags & PARPORT_DEV_EXCL) {
  672. if (port->physport->devices) {
  673. spin_unlock(&port->physport->pardevice_lock);
  674. pr_debug("%s: cannot grant exclusive access for device %s\n",
  675. port->name, name);
  676. kfree(par_dev->state);
  677. device_unregister(&par_dev->dev);
  678. goto err_put_port;
  679. }
  680. port->flags |= PARPORT_FLAG_EXCL;
  681. }
  682. par_dev->next = port->physport->devices;
  683. wmb(); /*
  684. * Make sure that tmp->next is written before it's
  685. * added to the list; see comments marked 'no locking
  686. * required'
  687. */
  688. if (port->physport->devices)
  689. port->physport->devices->prev = par_dev;
  690. port->physport->devices = par_dev;
  691. spin_unlock(&port->physport->pardevice_lock);
  692. init_waitqueue_head(&par_dev->wait_q);
  693. par_dev->timeslice = parport_default_timeslice;
  694. par_dev->waitnext = NULL;
  695. par_dev->waitprev = NULL;
  696. /*
  697. * This has to be run as last thing since init_state may need other
  698. * pardevice fields. -arca
  699. */
  700. port->ops->init_state(par_dev, par_dev->state);
  701. if (!test_and_set_bit(PARPORT_DEVPROC_REGISTERED, &port->devflags)) {
  702. port->proc_device = par_dev;
  703. parport_device_proc_register(par_dev);
  704. }
  705. return par_dev;
  706. err_free_devname:
  707. kfree(devname);
  708. err_free_par_dev:
  709. kfree(par_dev->state);
  710. err_put_par_dev:
  711. if (!par_dev->devmodel)
  712. kfree(par_dev);
  713. err_put_port:
  714. parport_put_port(port);
  715. module_put(port->ops->owner);
  716. return NULL;
  717. }
  718. EXPORT_SYMBOL(parport_register_dev_model);
  719. /**
  720. * parport_unregister_device - deregister a device on a parallel port
  721. * @dev: pointer to structure representing device
  722. *
  723. * This undoes the effect of parport_register_device().
  724. **/
  725. void parport_unregister_device(struct pardevice *dev)
  726. {
  727. struct parport *port;
  728. #ifdef PARPORT_PARANOID
  729. if (!dev) {
  730. pr_err("%s: passed NULL\n", __func__);
  731. return;
  732. }
  733. #endif
  734. port = dev->port->physport;
  735. if (port->proc_device == dev) {
  736. port->proc_device = NULL;
  737. clear_bit(PARPORT_DEVPROC_REGISTERED, &port->devflags);
  738. parport_device_proc_unregister(dev);
  739. }
  740. if (port->cad == dev) {
  741. printk(KERN_DEBUG "%s: %s forgot to release port\n",
  742. port->name, dev->name);
  743. parport_release(dev);
  744. }
  745. spin_lock(&port->pardevice_lock);
  746. if (dev->next)
  747. dev->next->prev = dev->prev;
  748. if (dev->prev)
  749. dev->prev->next = dev->next;
  750. else
  751. port->devices = dev->next;
  752. if (dev->flags & PARPORT_DEV_EXCL)
  753. port->flags &= ~PARPORT_FLAG_EXCL;
  754. spin_unlock(&port->pardevice_lock);
  755. /*
  756. * Make sure we haven't left any pointers around in the wait
  757. * list.
  758. */
  759. spin_lock_irq(&port->waitlist_lock);
  760. if (dev->waitprev || dev->waitnext || port->waithead == dev) {
  761. if (dev->waitprev)
  762. dev->waitprev->waitnext = dev->waitnext;
  763. else
  764. port->waithead = dev->waitnext;
  765. if (dev->waitnext)
  766. dev->waitnext->waitprev = dev->waitprev;
  767. else
  768. port->waittail = dev->waitprev;
  769. }
  770. spin_unlock_irq(&port->waitlist_lock);
  771. kfree(dev->state);
  772. device_unregister(&dev->dev);
  773. module_put(port->ops->owner);
  774. parport_put_port(port);
  775. }
  776. EXPORT_SYMBOL(parport_unregister_device);
  777. /**
  778. * parport_find_number - find a parallel port by number
  779. * @number: parallel port number
  780. *
  781. * This returns the parallel port with the specified number, or
  782. * %NULL if there is none.
  783. *
  784. * There is an implicit parport_get_port() done already; to throw
  785. * away the reference to the port that parport_find_number()
  786. * gives you, use parport_put_port().
  787. */
  788. struct parport *parport_find_number(int number)
  789. {
  790. struct parport *port, *result = NULL;
  791. if (list_empty(&portlist))
  792. get_lowlevel_driver();
  793. spin_lock(&parportlist_lock);
  794. list_for_each_entry(port, &portlist, list) {
  795. if (port->number == number) {
  796. result = parport_get_port(port);
  797. break;
  798. }
  799. }
  800. spin_unlock(&parportlist_lock);
  801. return result;
  802. }
  803. EXPORT_SYMBOL(parport_find_number);
  804. /**
  805. * parport_find_base - find a parallel port by base address
  806. * @base: base I/O address
  807. *
  808. * This returns the parallel port with the specified base
  809. * address, or %NULL if there is none.
  810. *
  811. * There is an implicit parport_get_port() done already; to throw
  812. * away the reference to the port that parport_find_base()
  813. * gives you, use parport_put_port().
  814. */
  815. struct parport *parport_find_base(unsigned long base)
  816. {
  817. struct parport *port, *result = NULL;
  818. if (list_empty(&portlist))
  819. get_lowlevel_driver();
  820. spin_lock(&parportlist_lock);
  821. list_for_each_entry(port, &portlist, list) {
  822. if (port->base == base) {
  823. result = parport_get_port(port);
  824. break;
  825. }
  826. }
  827. spin_unlock(&parportlist_lock);
  828. return result;
  829. }
  830. EXPORT_SYMBOL(parport_find_base);
  831. /**
  832. * parport_claim - claim access to a parallel port device
  833. * @dev: pointer to structure representing a device on the port
  834. *
  835. * This function will not block and so can be used from interrupt
  836. * context. If parport_claim() succeeds in claiming access to
  837. * the port it returns zero and the port is available to use. It
  838. * may fail (returning non-zero) if the port is in use by another
  839. * driver and that driver is not willing to relinquish control of
  840. * the port.
  841. **/
  842. int parport_claim(struct pardevice *dev)
  843. {
  844. struct pardevice *oldcad;
  845. struct parport *port = dev->port->physport;
  846. unsigned long flags;
  847. if (port->cad == dev) {
  848. pr_info("%s: %s already owner\n", dev->port->name, dev->name);
  849. return 0;
  850. }
  851. /* Preempt any current device */
  852. write_lock_irqsave(&port->cad_lock, flags);
  853. oldcad = port->cad;
  854. if (oldcad) {
  855. if (oldcad->preempt) {
  856. if (oldcad->preempt(oldcad->private))
  857. goto blocked;
  858. port->ops->save_state(port, dev->state);
  859. } else
  860. goto blocked;
  861. if (port->cad != oldcad) {
  862. /*
  863. * I think we'll actually deadlock rather than
  864. * get here, but just in case..
  865. */
  866. pr_warn("%s: %s released port when preempted!\n",
  867. port->name, oldcad->name);
  868. if (port->cad)
  869. goto blocked;
  870. }
  871. }
  872. /* Can't fail from now on, so mark ourselves as no longer waiting. */
  873. if (dev->waiting & 1) {
  874. dev->waiting = 0;
  875. /* Take ourselves out of the wait list again. */
  876. spin_lock_irq(&port->waitlist_lock);
  877. if (dev->waitprev)
  878. dev->waitprev->waitnext = dev->waitnext;
  879. else
  880. port->waithead = dev->waitnext;
  881. if (dev->waitnext)
  882. dev->waitnext->waitprev = dev->waitprev;
  883. else
  884. port->waittail = dev->waitprev;
  885. spin_unlock_irq(&port->waitlist_lock);
  886. dev->waitprev = dev->waitnext = NULL;
  887. }
  888. /* Now we do the change of devices */
  889. port->cad = dev;
  890. #ifdef CONFIG_PARPORT_1284
  891. /* If it's a mux port, select it. */
  892. if (dev->port->muxport >= 0) {
  893. /* FIXME */
  894. port->muxsel = dev->port->muxport;
  895. }
  896. /* If it's a daisy chain device, select it. */
  897. if (dev->daisy >= 0) {
  898. /* This could be lazier. */
  899. if (!parport_daisy_select(port, dev->daisy,
  900. IEEE1284_MODE_COMPAT))
  901. port->daisy = dev->daisy;
  902. }
  903. #endif /* IEEE1284.3 support */
  904. /* Restore control registers */
  905. port->ops->restore_state(port, dev->state);
  906. write_unlock_irqrestore(&port->cad_lock, flags);
  907. dev->time = jiffies;
  908. return 0;
  909. blocked:
  910. /*
  911. * If this is the first time we tried to claim the port, register an
  912. * interest. This is only allowed for devices sleeping in
  913. * parport_claim_or_block(), or those with a wakeup function.
  914. */
  915. /* The cad_lock is still held for writing here */
  916. if (dev->waiting & 2 || dev->wakeup) {
  917. spin_lock(&port->waitlist_lock);
  918. if (test_and_set_bit(0, &dev->waiting) == 0) {
  919. /* First add ourselves to the end of the wait list. */
  920. dev->waitnext = NULL;
  921. dev->waitprev = port->waittail;
  922. if (port->waittail) {
  923. port->waittail->waitnext = dev;
  924. port->waittail = dev;
  925. } else
  926. port->waithead = port->waittail = dev;
  927. }
  928. spin_unlock(&port->waitlist_lock);
  929. }
  930. write_unlock_irqrestore(&port->cad_lock, flags);
  931. return -EAGAIN;
  932. }
  933. EXPORT_SYMBOL(parport_claim);
  934. /**
  935. * parport_claim_or_block - claim access to a parallel port device
  936. * @dev: pointer to structure representing a device on the port
  937. *
  938. * This behaves like parport_claim(), but will block if necessary
  939. * to wait for the port to be free. A return value of 1
  940. * indicates that it slept; 0 means that it succeeded without
  941. * needing to sleep. A negative error code indicates failure.
  942. **/
  943. int parport_claim_or_block(struct pardevice *dev)
  944. {
  945. int r;
  946. /*
  947. * Signal to parport_claim() that we can wait even without a
  948. * wakeup function.
  949. */
  950. dev->waiting = 2;
  951. /* Try to claim the port. If this fails, we need to sleep. */
  952. r = parport_claim(dev);
  953. if (r == -EAGAIN) {
  954. #ifdef PARPORT_DEBUG_SHARING
  955. printk(KERN_DEBUG "%s: parport_claim() returned -EAGAIN\n",
  956. dev->name);
  957. #endif
  958. /*
  959. * FIXME!!! Use the proper locking for dev->waiting,
  960. * and make this use the "wait_event_interruptible()"
  961. * interfaces. The cli/sti that used to be here
  962. * did nothing.
  963. *
  964. * See also parport_release()
  965. */
  966. /*
  967. * If dev->waiting is clear now, an interrupt
  968. * gave us the port and we would deadlock if we slept.
  969. */
  970. if (dev->waiting) {
  971. wait_event_interruptible(dev->wait_q,
  972. !dev->waiting);
  973. if (signal_pending(current))
  974. return -EINTR;
  975. r = 1;
  976. } else {
  977. r = 0;
  978. #ifdef PARPORT_DEBUG_SHARING
  979. printk(KERN_DEBUG "%s: didn't sleep in parport_claim_or_block()\n",
  980. dev->name);
  981. #endif
  982. }
  983. #ifdef PARPORT_DEBUG_SHARING
  984. if (dev->port->physport->cad != dev)
  985. printk(KERN_DEBUG "%s: exiting parport_claim_or_block but %s owns port!\n",
  986. dev->name, dev->port->physport->cad ?
  987. dev->port->physport->cad->name : "nobody");
  988. #endif
  989. }
  990. dev->waiting = 0;
  991. return r;
  992. }
  993. EXPORT_SYMBOL(parport_claim_or_block);
  994. /**
  995. * parport_release - give up access to a parallel port device
  996. * @dev: pointer to structure representing parallel port device
  997. *
  998. * This function cannot fail, but it should not be called without
  999. * the port claimed. Similarly, if the port is already claimed
  1000. * you should not try claiming it again.
  1001. **/
  1002. void parport_release(struct pardevice *dev)
  1003. {
  1004. struct parport *port = dev->port->physport;
  1005. struct pardevice *pd;
  1006. unsigned long flags;
  1007. /* Make sure that dev is the current device */
  1008. write_lock_irqsave(&port->cad_lock, flags);
  1009. if (port->cad != dev) {
  1010. write_unlock_irqrestore(&port->cad_lock, flags);
  1011. pr_warn("%s: %s tried to release parport when not owner\n",
  1012. port->name, dev->name);
  1013. return;
  1014. }
  1015. #ifdef CONFIG_PARPORT_1284
  1016. /* If this is on a mux port, deselect it. */
  1017. if (dev->port->muxport >= 0) {
  1018. /* FIXME */
  1019. port->muxsel = -1;
  1020. }
  1021. /* If this is a daisy device, deselect it. */
  1022. if (dev->daisy >= 0) {
  1023. parport_daisy_deselect_all(port);
  1024. port->daisy = -1;
  1025. }
  1026. #endif
  1027. port->cad = NULL;
  1028. write_unlock_irqrestore(&port->cad_lock, flags);
  1029. /* Save control registers */
  1030. port->ops->save_state(port, dev->state);
  1031. /*
  1032. * If anybody is waiting, find out who's been there longest and
  1033. * then wake them up. (Note: no locking required)
  1034. */
  1035. /* !!! LOCKING IS NEEDED HERE */
  1036. for (pd = port->waithead; pd; pd = pd->waitnext) {
  1037. if (pd->waiting & 2) { /* sleeping in claim_or_block */
  1038. parport_claim(pd);
  1039. if (waitqueue_active(&pd->wait_q))
  1040. wake_up_interruptible(&pd->wait_q);
  1041. return;
  1042. } else if (pd->wakeup) {
  1043. pd->wakeup(pd->private);
  1044. if (dev->port->cad) /* racy but no matter */
  1045. return;
  1046. } else {
  1047. pr_err("%s: don't know how to wake %s\n",
  1048. port->name, pd->name);
  1049. }
  1050. }
  1051. /*
  1052. * Nobody was waiting, so walk the list to see if anyone is
  1053. * interested in being woken up. (Note: no locking required)
  1054. */
  1055. /* !!! LOCKING IS NEEDED HERE */
  1056. for (pd = port->devices; !port->cad && pd; pd = pd->next) {
  1057. if (pd->wakeup && pd != dev)
  1058. pd->wakeup(pd->private);
  1059. }
  1060. }
  1061. EXPORT_SYMBOL(parport_release);
  1062. irqreturn_t parport_irq_handler(int irq, void *dev_id)
  1063. {
  1064. struct parport *port = dev_id;
  1065. parport_generic_irq(port);
  1066. return IRQ_HANDLED;
  1067. }
  1068. EXPORT_SYMBOL(parport_irq_handler);
  1069. MODULE_LICENSE("GPL");