vme.c 35 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555
  1. /*
  2. * VME Bridge Framework
  3. *
  4. * Author: Martyn Welch <martyn.welch@ge.com>
  5. * Copyright 2008 GE Intelligent Platforms Embedded Systems, Inc.
  6. *
  7. * Based on work by Tom Armistead and Ajit Prem
  8. * Copyright 2004 Motorola Inc.
  9. *
  10. * This program is free software; you can redistribute it and/or modify it
  11. * under the terms of the GNU General Public License as published by the
  12. * Free Software Foundation; either version 2 of the License, or (at your
  13. * option) any later version.
  14. */
  15. #include <linux/module.h>
  16. #include <linux/moduleparam.h>
  17. #include <linux/mm.h>
  18. #include <linux/types.h>
  19. #include <linux/kernel.h>
  20. #include <linux/errno.h>
  21. #include <linux/pci.h>
  22. #include <linux/poll.h>
  23. #include <linux/highmem.h>
  24. #include <linux/interrupt.h>
  25. #include <linux/pagemap.h>
  26. #include <linux/device.h>
  27. #include <linux/dma-mapping.h>
  28. #include <linux/syscalls.h>
  29. #include <linux/mutex.h>
  30. #include <linux/spinlock.h>
  31. #include <linux/slab.h>
  32. #include <linux/vme.h>
  33. #include "vme_bridge.h"
  34. /* Bitmask and list of registered buses both protected by common mutex */
  35. static unsigned int vme_bus_numbers;
  36. static LIST_HEAD(vme_bus_list);
  37. static DEFINE_MUTEX(vme_buses_lock);
  38. static void __exit vme_exit(void);
  39. static int __init vme_init(void);
  40. static struct vme_dev *dev_to_vme_dev(struct device *dev)
  41. {
  42. return container_of(dev, struct vme_dev, dev);
  43. }
  44. /*
  45. * Find the bridge that the resource is associated with.
  46. */
  47. static struct vme_bridge *find_bridge(struct vme_resource *resource)
  48. {
  49. /* Get list to search */
  50. switch (resource->type) {
  51. case VME_MASTER:
  52. return list_entry(resource->entry, struct vme_master_resource,
  53. list)->parent;
  54. break;
  55. case VME_SLAVE:
  56. return list_entry(resource->entry, struct vme_slave_resource,
  57. list)->parent;
  58. break;
  59. case VME_DMA:
  60. return list_entry(resource->entry, struct vme_dma_resource,
  61. list)->parent;
  62. break;
  63. case VME_LM:
  64. return list_entry(resource->entry, struct vme_lm_resource,
  65. list)->parent;
  66. break;
  67. default:
  68. printk(KERN_ERR "Unknown resource type\n");
  69. return NULL;
  70. break;
  71. }
  72. }
  73. /*
  74. * Allocate a contiguous block of memory for use by the driver. This is used to
  75. * create the buffers for the slave windows.
  76. */
  77. void *vme_alloc_consistent(struct vme_resource *resource, size_t size,
  78. dma_addr_t *dma)
  79. {
  80. struct vme_bridge *bridge;
  81. if (resource == NULL) {
  82. printk(KERN_ERR "No resource\n");
  83. return NULL;
  84. }
  85. bridge = find_bridge(resource);
  86. if (bridge == NULL) {
  87. printk(KERN_ERR "Can't find bridge\n");
  88. return NULL;
  89. }
  90. if (bridge->parent == NULL) {
  91. printk(KERN_ERR "Dev entry NULL for bridge %s\n", bridge->name);
  92. return NULL;
  93. }
  94. if (bridge->alloc_consistent == NULL) {
  95. printk(KERN_ERR "alloc_consistent not supported by bridge %s\n",
  96. bridge->name);
  97. return NULL;
  98. }
  99. return bridge->alloc_consistent(bridge->parent, size, dma);
  100. }
  101. EXPORT_SYMBOL(vme_alloc_consistent);
  102. /*
  103. * Free previously allocated contiguous block of memory.
  104. */
  105. void vme_free_consistent(struct vme_resource *resource, size_t size,
  106. void *vaddr, dma_addr_t dma)
  107. {
  108. struct vme_bridge *bridge;
  109. if (resource == NULL) {
  110. printk(KERN_ERR "No resource\n");
  111. return;
  112. }
  113. bridge = find_bridge(resource);
  114. if (bridge == NULL) {
  115. printk(KERN_ERR "Can't find bridge\n");
  116. return;
  117. }
  118. if (bridge->parent == NULL) {
  119. printk(KERN_ERR "Dev entry NULL for bridge %s\n", bridge->name);
  120. return;
  121. }
  122. if (bridge->free_consistent == NULL) {
  123. printk(KERN_ERR "free_consistent not supported by bridge %s\n",
  124. bridge->name);
  125. return;
  126. }
  127. bridge->free_consistent(bridge->parent, size, vaddr, dma);
  128. }
  129. EXPORT_SYMBOL(vme_free_consistent);
  130. size_t vme_get_size(struct vme_resource *resource)
  131. {
  132. int enabled, retval;
  133. unsigned long long base, size;
  134. dma_addr_t buf_base;
  135. u32 aspace, cycle, dwidth;
  136. switch (resource->type) {
  137. case VME_MASTER:
  138. retval = vme_master_get(resource, &enabled, &base, &size,
  139. &aspace, &cycle, &dwidth);
  140. return size;
  141. break;
  142. case VME_SLAVE:
  143. retval = vme_slave_get(resource, &enabled, &base, &size,
  144. &buf_base, &aspace, &cycle);
  145. return size;
  146. break;
  147. case VME_DMA:
  148. return 0;
  149. break;
  150. default:
  151. printk(KERN_ERR "Unknown resource type\n");
  152. return 0;
  153. break;
  154. }
  155. }
  156. EXPORT_SYMBOL(vme_get_size);
  157. static int vme_check_window(u32 aspace, unsigned long long vme_base,
  158. unsigned long long size)
  159. {
  160. int retval = 0;
  161. switch (aspace) {
  162. case VME_A16:
  163. if (((vme_base + size) > VME_A16_MAX) ||
  164. (vme_base > VME_A16_MAX))
  165. retval = -EFAULT;
  166. break;
  167. case VME_A24:
  168. if (((vme_base + size) > VME_A24_MAX) ||
  169. (vme_base > VME_A24_MAX))
  170. retval = -EFAULT;
  171. break;
  172. case VME_A32:
  173. if (((vme_base + size) > VME_A32_MAX) ||
  174. (vme_base > VME_A32_MAX))
  175. retval = -EFAULT;
  176. break;
  177. case VME_A64:
  178. /*
  179. * Any value held in an unsigned long long can be used as the
  180. * base
  181. */
  182. break;
  183. case VME_CRCSR:
  184. if (((vme_base + size) > VME_CRCSR_MAX) ||
  185. (vme_base > VME_CRCSR_MAX))
  186. retval = -EFAULT;
  187. break;
  188. case VME_USER1:
  189. case VME_USER2:
  190. case VME_USER3:
  191. case VME_USER4:
  192. /* User Defined */
  193. break;
  194. default:
  195. printk(KERN_ERR "Invalid address space\n");
  196. retval = -EINVAL;
  197. break;
  198. }
  199. return retval;
  200. }
  201. /*
  202. * Request a slave image with specific attributes, return some unique
  203. * identifier.
  204. */
  205. struct vme_resource *vme_slave_request(struct vme_dev *vdev, u32 address,
  206. u32 cycle)
  207. {
  208. struct vme_bridge *bridge;
  209. struct list_head *slave_pos = NULL;
  210. struct vme_slave_resource *allocated_image = NULL;
  211. struct vme_slave_resource *slave_image = NULL;
  212. struct vme_resource *resource = NULL;
  213. bridge = vdev->bridge;
  214. if (bridge == NULL) {
  215. printk(KERN_ERR "Can't find VME bus\n");
  216. goto err_bus;
  217. }
  218. /* Loop through slave resources */
  219. list_for_each(slave_pos, &bridge->slave_resources) {
  220. slave_image = list_entry(slave_pos,
  221. struct vme_slave_resource, list);
  222. if (slave_image == NULL) {
  223. printk(KERN_ERR "Registered NULL Slave resource\n");
  224. continue;
  225. }
  226. /* Find an unlocked and compatible image */
  227. mutex_lock(&slave_image->mtx);
  228. if (((slave_image->address_attr & address) == address) &&
  229. ((slave_image->cycle_attr & cycle) == cycle) &&
  230. (slave_image->locked == 0)) {
  231. slave_image->locked = 1;
  232. mutex_unlock(&slave_image->mtx);
  233. allocated_image = slave_image;
  234. break;
  235. }
  236. mutex_unlock(&slave_image->mtx);
  237. }
  238. /* No free image */
  239. if (allocated_image == NULL)
  240. goto err_image;
  241. resource = kmalloc(sizeof(struct vme_resource), GFP_KERNEL);
  242. if (resource == NULL) {
  243. printk(KERN_WARNING "Unable to allocate resource structure\n");
  244. goto err_alloc;
  245. }
  246. resource->type = VME_SLAVE;
  247. resource->entry = &allocated_image->list;
  248. return resource;
  249. err_alloc:
  250. /* Unlock image */
  251. mutex_lock(&slave_image->mtx);
  252. slave_image->locked = 0;
  253. mutex_unlock(&slave_image->mtx);
  254. err_image:
  255. err_bus:
  256. return NULL;
  257. }
  258. EXPORT_SYMBOL(vme_slave_request);
  259. int vme_slave_set(struct vme_resource *resource, int enabled,
  260. unsigned long long vme_base, unsigned long long size,
  261. dma_addr_t buf_base, u32 aspace, u32 cycle)
  262. {
  263. struct vme_bridge *bridge = find_bridge(resource);
  264. struct vme_slave_resource *image;
  265. int retval;
  266. if (resource->type != VME_SLAVE) {
  267. printk(KERN_ERR "Not a slave resource\n");
  268. return -EINVAL;
  269. }
  270. image = list_entry(resource->entry, struct vme_slave_resource, list);
  271. if (bridge->slave_set == NULL) {
  272. printk(KERN_ERR "Function not supported\n");
  273. return -ENOSYS;
  274. }
  275. if (!(((image->address_attr & aspace) == aspace) &&
  276. ((image->cycle_attr & cycle) == cycle))) {
  277. printk(KERN_ERR "Invalid attributes\n");
  278. return -EINVAL;
  279. }
  280. retval = vme_check_window(aspace, vme_base, size);
  281. if (retval)
  282. return retval;
  283. return bridge->slave_set(image, enabled, vme_base, size, buf_base,
  284. aspace, cycle);
  285. }
  286. EXPORT_SYMBOL(vme_slave_set);
  287. int vme_slave_get(struct vme_resource *resource, int *enabled,
  288. unsigned long long *vme_base, unsigned long long *size,
  289. dma_addr_t *buf_base, u32 *aspace, u32 *cycle)
  290. {
  291. struct vme_bridge *bridge = find_bridge(resource);
  292. struct vme_slave_resource *image;
  293. if (resource->type != VME_SLAVE) {
  294. printk(KERN_ERR "Not a slave resource\n");
  295. return -EINVAL;
  296. }
  297. image = list_entry(resource->entry, struct vme_slave_resource, list);
  298. if (bridge->slave_get == NULL) {
  299. printk(KERN_ERR "vme_slave_get not supported\n");
  300. return -EINVAL;
  301. }
  302. return bridge->slave_get(image, enabled, vme_base, size, buf_base,
  303. aspace, cycle);
  304. }
  305. EXPORT_SYMBOL(vme_slave_get);
  306. void vme_slave_free(struct vme_resource *resource)
  307. {
  308. struct vme_slave_resource *slave_image;
  309. if (resource->type != VME_SLAVE) {
  310. printk(KERN_ERR "Not a slave resource\n");
  311. return;
  312. }
  313. slave_image = list_entry(resource->entry, struct vme_slave_resource,
  314. list);
  315. if (slave_image == NULL) {
  316. printk(KERN_ERR "Can't find slave resource\n");
  317. return;
  318. }
  319. /* Unlock image */
  320. mutex_lock(&slave_image->mtx);
  321. if (slave_image->locked == 0)
  322. printk(KERN_ERR "Image is already free\n");
  323. slave_image->locked = 0;
  324. mutex_unlock(&slave_image->mtx);
  325. /* Free up resource memory */
  326. kfree(resource);
  327. }
  328. EXPORT_SYMBOL(vme_slave_free);
  329. /*
  330. * Request a master image with specific attributes, return some unique
  331. * identifier.
  332. */
  333. struct vme_resource *vme_master_request(struct vme_dev *vdev, u32 address,
  334. u32 cycle, u32 dwidth)
  335. {
  336. struct vme_bridge *bridge;
  337. struct list_head *master_pos = NULL;
  338. struct vme_master_resource *allocated_image = NULL;
  339. struct vme_master_resource *master_image = NULL;
  340. struct vme_resource *resource = NULL;
  341. bridge = vdev->bridge;
  342. if (bridge == NULL) {
  343. printk(KERN_ERR "Can't find VME bus\n");
  344. goto err_bus;
  345. }
  346. /* Loop through master resources */
  347. list_for_each(master_pos, &bridge->master_resources) {
  348. master_image = list_entry(master_pos,
  349. struct vme_master_resource, list);
  350. if (master_image == NULL) {
  351. printk(KERN_WARNING "Registered NULL master resource\n");
  352. continue;
  353. }
  354. /* Find an unlocked and compatible image */
  355. spin_lock(&master_image->lock);
  356. if (((master_image->address_attr & address) == address) &&
  357. ((master_image->cycle_attr & cycle) == cycle) &&
  358. ((master_image->width_attr & dwidth) == dwidth) &&
  359. (master_image->locked == 0)) {
  360. master_image->locked = 1;
  361. spin_unlock(&master_image->lock);
  362. allocated_image = master_image;
  363. break;
  364. }
  365. spin_unlock(&master_image->lock);
  366. }
  367. /* Check to see if we found a resource */
  368. if (allocated_image == NULL) {
  369. printk(KERN_ERR "Can't find a suitable resource\n");
  370. goto err_image;
  371. }
  372. resource = kmalloc(sizeof(struct vme_resource), GFP_KERNEL);
  373. if (resource == NULL) {
  374. printk(KERN_ERR "Unable to allocate resource structure\n");
  375. goto err_alloc;
  376. }
  377. resource->type = VME_MASTER;
  378. resource->entry = &allocated_image->list;
  379. return resource;
  380. err_alloc:
  381. /* Unlock image */
  382. spin_lock(&master_image->lock);
  383. master_image->locked = 0;
  384. spin_unlock(&master_image->lock);
  385. err_image:
  386. err_bus:
  387. return NULL;
  388. }
  389. EXPORT_SYMBOL(vme_master_request);
  390. int vme_master_set(struct vme_resource *resource, int enabled,
  391. unsigned long long vme_base, unsigned long long size, u32 aspace,
  392. u32 cycle, u32 dwidth)
  393. {
  394. struct vme_bridge *bridge = find_bridge(resource);
  395. struct vme_master_resource *image;
  396. int retval;
  397. if (resource->type != VME_MASTER) {
  398. printk(KERN_ERR "Not a master resource\n");
  399. return -EINVAL;
  400. }
  401. image = list_entry(resource->entry, struct vme_master_resource, list);
  402. if (bridge->master_set == NULL) {
  403. printk(KERN_WARNING "vme_master_set not supported\n");
  404. return -EINVAL;
  405. }
  406. if (!(((image->address_attr & aspace) == aspace) &&
  407. ((image->cycle_attr & cycle) == cycle) &&
  408. ((image->width_attr & dwidth) == dwidth))) {
  409. printk(KERN_WARNING "Invalid attributes\n");
  410. return -EINVAL;
  411. }
  412. retval = vme_check_window(aspace, vme_base, size);
  413. if (retval)
  414. return retval;
  415. return bridge->master_set(image, enabled, vme_base, size, aspace,
  416. cycle, dwidth);
  417. }
  418. EXPORT_SYMBOL(vme_master_set);
  419. int vme_master_get(struct vme_resource *resource, int *enabled,
  420. unsigned long long *vme_base, unsigned long long *size, u32 *aspace,
  421. u32 *cycle, u32 *dwidth)
  422. {
  423. struct vme_bridge *bridge = find_bridge(resource);
  424. struct vme_master_resource *image;
  425. if (resource->type != VME_MASTER) {
  426. printk(KERN_ERR "Not a master resource\n");
  427. return -EINVAL;
  428. }
  429. image = list_entry(resource->entry, struct vme_master_resource, list);
  430. if (bridge->master_get == NULL) {
  431. printk(KERN_WARNING "%s not supported\n", __func__);
  432. return -EINVAL;
  433. }
  434. return bridge->master_get(image, enabled, vme_base, size, aspace,
  435. cycle, dwidth);
  436. }
  437. EXPORT_SYMBOL(vme_master_get);
  438. /*
  439. * Read data out of VME space into a buffer.
  440. */
  441. ssize_t vme_master_read(struct vme_resource *resource, void *buf, size_t count,
  442. loff_t offset)
  443. {
  444. struct vme_bridge *bridge = find_bridge(resource);
  445. struct vme_master_resource *image;
  446. size_t length;
  447. if (bridge->master_read == NULL) {
  448. printk(KERN_WARNING "Reading from resource not supported\n");
  449. return -EINVAL;
  450. }
  451. if (resource->type != VME_MASTER) {
  452. printk(KERN_ERR "Not a master resource\n");
  453. return -EINVAL;
  454. }
  455. image = list_entry(resource->entry, struct vme_master_resource, list);
  456. length = vme_get_size(resource);
  457. if (offset > length) {
  458. printk(KERN_WARNING "Invalid Offset\n");
  459. return -EFAULT;
  460. }
  461. if ((offset + count) > length)
  462. count = length - offset;
  463. return bridge->master_read(image, buf, count, offset);
  464. }
  465. EXPORT_SYMBOL(vme_master_read);
  466. /*
  467. * Write data out to VME space from a buffer.
  468. */
  469. ssize_t vme_master_write(struct vme_resource *resource, void *buf,
  470. size_t count, loff_t offset)
  471. {
  472. struct vme_bridge *bridge = find_bridge(resource);
  473. struct vme_master_resource *image;
  474. size_t length;
  475. if (bridge->master_write == NULL) {
  476. printk(KERN_WARNING "Writing to resource not supported\n");
  477. return -EINVAL;
  478. }
  479. if (resource->type != VME_MASTER) {
  480. printk(KERN_ERR "Not a master resource\n");
  481. return -EINVAL;
  482. }
  483. image = list_entry(resource->entry, struct vme_master_resource, list);
  484. length = vme_get_size(resource);
  485. if (offset > length) {
  486. printk(KERN_WARNING "Invalid Offset\n");
  487. return -EFAULT;
  488. }
  489. if ((offset + count) > length)
  490. count = length - offset;
  491. return bridge->master_write(image, buf, count, offset);
  492. }
  493. EXPORT_SYMBOL(vme_master_write);
  494. /*
  495. * Perform RMW cycle to provided location.
  496. */
  497. unsigned int vme_master_rmw(struct vme_resource *resource, unsigned int mask,
  498. unsigned int compare, unsigned int swap, loff_t offset)
  499. {
  500. struct vme_bridge *bridge = find_bridge(resource);
  501. struct vme_master_resource *image;
  502. if (bridge->master_rmw == NULL) {
  503. printk(KERN_WARNING "Writing to resource not supported\n");
  504. return -EINVAL;
  505. }
  506. if (resource->type != VME_MASTER) {
  507. printk(KERN_ERR "Not a master resource\n");
  508. return -EINVAL;
  509. }
  510. image = list_entry(resource->entry, struct vme_master_resource, list);
  511. return bridge->master_rmw(image, mask, compare, swap, offset);
  512. }
  513. EXPORT_SYMBOL(vme_master_rmw);
  514. int vme_master_mmap(struct vme_resource *resource, struct vm_area_struct *vma)
  515. {
  516. struct vme_master_resource *image;
  517. phys_addr_t phys_addr;
  518. unsigned long vma_size;
  519. if (resource->type != VME_MASTER) {
  520. pr_err("Not a master resource\n");
  521. return -EINVAL;
  522. }
  523. image = list_entry(resource->entry, struct vme_master_resource, list);
  524. phys_addr = image->bus_resource.start + (vma->vm_pgoff << PAGE_SHIFT);
  525. vma_size = vma->vm_end - vma->vm_start;
  526. if (phys_addr + vma_size > image->bus_resource.end + 1) {
  527. pr_err("Map size cannot exceed the window size\n");
  528. return -EFAULT;
  529. }
  530. vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
  531. return vm_iomap_memory(vma, phys_addr, vma->vm_end - vma->vm_start);
  532. }
  533. EXPORT_SYMBOL(vme_master_mmap);
  534. void vme_master_free(struct vme_resource *resource)
  535. {
  536. struct vme_master_resource *master_image;
  537. if (resource->type != VME_MASTER) {
  538. printk(KERN_ERR "Not a master resource\n");
  539. return;
  540. }
  541. master_image = list_entry(resource->entry, struct vme_master_resource,
  542. list);
  543. if (master_image == NULL) {
  544. printk(KERN_ERR "Can't find master resource\n");
  545. return;
  546. }
  547. /* Unlock image */
  548. spin_lock(&master_image->lock);
  549. if (master_image->locked == 0)
  550. printk(KERN_ERR "Image is already free\n");
  551. master_image->locked = 0;
  552. spin_unlock(&master_image->lock);
  553. /* Free up resource memory */
  554. kfree(resource);
  555. }
  556. EXPORT_SYMBOL(vme_master_free);
  557. /*
  558. * Request a DMA controller with specific attributes, return some unique
  559. * identifier.
  560. */
  561. struct vme_resource *vme_dma_request(struct vme_dev *vdev, u32 route)
  562. {
  563. struct vme_bridge *bridge;
  564. struct list_head *dma_pos = NULL;
  565. struct vme_dma_resource *allocated_ctrlr = NULL;
  566. struct vme_dma_resource *dma_ctrlr = NULL;
  567. struct vme_resource *resource = NULL;
  568. /* XXX Not checking resource attributes */
  569. printk(KERN_ERR "No VME resource Attribute tests done\n");
  570. bridge = vdev->bridge;
  571. if (bridge == NULL) {
  572. printk(KERN_ERR "Can't find VME bus\n");
  573. goto err_bus;
  574. }
  575. /* Loop through DMA resources */
  576. list_for_each(dma_pos, &bridge->dma_resources) {
  577. dma_ctrlr = list_entry(dma_pos,
  578. struct vme_dma_resource, list);
  579. if (dma_ctrlr == NULL) {
  580. printk(KERN_ERR "Registered NULL DMA resource\n");
  581. continue;
  582. }
  583. /* Find an unlocked and compatible controller */
  584. mutex_lock(&dma_ctrlr->mtx);
  585. if (((dma_ctrlr->route_attr & route) == route) &&
  586. (dma_ctrlr->locked == 0)) {
  587. dma_ctrlr->locked = 1;
  588. mutex_unlock(&dma_ctrlr->mtx);
  589. allocated_ctrlr = dma_ctrlr;
  590. break;
  591. }
  592. mutex_unlock(&dma_ctrlr->mtx);
  593. }
  594. /* Check to see if we found a resource */
  595. if (allocated_ctrlr == NULL)
  596. goto err_ctrlr;
  597. resource = kmalloc(sizeof(struct vme_resource), GFP_KERNEL);
  598. if (resource == NULL) {
  599. printk(KERN_WARNING "Unable to allocate resource structure\n");
  600. goto err_alloc;
  601. }
  602. resource->type = VME_DMA;
  603. resource->entry = &allocated_ctrlr->list;
  604. return resource;
  605. err_alloc:
  606. /* Unlock image */
  607. mutex_lock(&dma_ctrlr->mtx);
  608. dma_ctrlr->locked = 0;
  609. mutex_unlock(&dma_ctrlr->mtx);
  610. err_ctrlr:
  611. err_bus:
  612. return NULL;
  613. }
  614. EXPORT_SYMBOL(vme_dma_request);
  615. /*
  616. * Start new list
  617. */
  618. struct vme_dma_list *vme_new_dma_list(struct vme_resource *resource)
  619. {
  620. struct vme_dma_resource *ctrlr;
  621. struct vme_dma_list *dma_list;
  622. if (resource->type != VME_DMA) {
  623. printk(KERN_ERR "Not a DMA resource\n");
  624. return NULL;
  625. }
  626. ctrlr = list_entry(resource->entry, struct vme_dma_resource, list);
  627. dma_list = kmalloc(sizeof(struct vme_dma_list), GFP_KERNEL);
  628. if (dma_list == NULL) {
  629. printk(KERN_ERR "Unable to allocate memory for new dma list\n");
  630. return NULL;
  631. }
  632. INIT_LIST_HEAD(&dma_list->entries);
  633. dma_list->parent = ctrlr;
  634. mutex_init(&dma_list->mtx);
  635. return dma_list;
  636. }
  637. EXPORT_SYMBOL(vme_new_dma_list);
  638. /*
  639. * Create "Pattern" type attributes
  640. */
  641. struct vme_dma_attr *vme_dma_pattern_attribute(u32 pattern, u32 type)
  642. {
  643. struct vme_dma_attr *attributes;
  644. struct vme_dma_pattern *pattern_attr;
  645. attributes = kmalloc(sizeof(struct vme_dma_attr), GFP_KERNEL);
  646. if (attributes == NULL) {
  647. printk(KERN_ERR "Unable to allocate memory for attributes structure\n");
  648. goto err_attr;
  649. }
  650. pattern_attr = kmalloc(sizeof(struct vme_dma_pattern), GFP_KERNEL);
  651. if (pattern_attr == NULL) {
  652. printk(KERN_ERR "Unable to allocate memory for pattern attributes\n");
  653. goto err_pat;
  654. }
  655. attributes->type = VME_DMA_PATTERN;
  656. attributes->private = (void *)pattern_attr;
  657. pattern_attr->pattern = pattern;
  658. pattern_attr->type = type;
  659. return attributes;
  660. err_pat:
  661. kfree(attributes);
  662. err_attr:
  663. return NULL;
  664. }
  665. EXPORT_SYMBOL(vme_dma_pattern_attribute);
  666. /*
  667. * Create "PCI" type attributes
  668. */
  669. struct vme_dma_attr *vme_dma_pci_attribute(dma_addr_t address)
  670. {
  671. struct vme_dma_attr *attributes;
  672. struct vme_dma_pci *pci_attr;
  673. /* XXX Run some sanity checks here */
  674. attributes = kmalloc(sizeof(struct vme_dma_attr), GFP_KERNEL);
  675. if (attributes == NULL) {
  676. printk(KERN_ERR "Unable to allocate memory for attributes structure\n");
  677. goto err_attr;
  678. }
  679. pci_attr = kmalloc(sizeof(struct vme_dma_pci), GFP_KERNEL);
  680. if (pci_attr == NULL) {
  681. printk(KERN_ERR "Unable to allocate memory for pci attributes\n");
  682. goto err_pci;
  683. }
  684. attributes->type = VME_DMA_PCI;
  685. attributes->private = (void *)pci_attr;
  686. pci_attr->address = address;
  687. return attributes;
  688. err_pci:
  689. kfree(attributes);
  690. err_attr:
  691. return NULL;
  692. }
  693. EXPORT_SYMBOL(vme_dma_pci_attribute);
  694. /*
  695. * Create "VME" type attributes
  696. */
  697. struct vme_dma_attr *vme_dma_vme_attribute(unsigned long long address,
  698. u32 aspace, u32 cycle, u32 dwidth)
  699. {
  700. struct vme_dma_attr *attributes;
  701. struct vme_dma_vme *vme_attr;
  702. attributes = kmalloc(
  703. sizeof(struct vme_dma_attr), GFP_KERNEL);
  704. if (attributes == NULL) {
  705. printk(KERN_ERR "Unable to allocate memory for attributes structure\n");
  706. goto err_attr;
  707. }
  708. vme_attr = kmalloc(sizeof(struct vme_dma_vme), GFP_KERNEL);
  709. if (vme_attr == NULL) {
  710. printk(KERN_ERR "Unable to allocate memory for vme attributes\n");
  711. goto err_vme;
  712. }
  713. attributes->type = VME_DMA_VME;
  714. attributes->private = (void *)vme_attr;
  715. vme_attr->address = address;
  716. vme_attr->aspace = aspace;
  717. vme_attr->cycle = cycle;
  718. vme_attr->dwidth = dwidth;
  719. return attributes;
  720. err_vme:
  721. kfree(attributes);
  722. err_attr:
  723. return NULL;
  724. }
  725. EXPORT_SYMBOL(vme_dma_vme_attribute);
  726. /*
  727. * Free attribute
  728. */
  729. void vme_dma_free_attribute(struct vme_dma_attr *attributes)
  730. {
  731. kfree(attributes->private);
  732. kfree(attributes);
  733. }
  734. EXPORT_SYMBOL(vme_dma_free_attribute);
  735. int vme_dma_list_add(struct vme_dma_list *list, struct vme_dma_attr *src,
  736. struct vme_dma_attr *dest, size_t count)
  737. {
  738. struct vme_bridge *bridge = list->parent->parent;
  739. int retval;
  740. if (bridge->dma_list_add == NULL) {
  741. printk(KERN_WARNING "Link List DMA generation not supported\n");
  742. return -EINVAL;
  743. }
  744. if (!mutex_trylock(&list->mtx)) {
  745. printk(KERN_ERR "Link List already submitted\n");
  746. return -EINVAL;
  747. }
  748. retval = bridge->dma_list_add(list, src, dest, count);
  749. mutex_unlock(&list->mtx);
  750. return retval;
  751. }
  752. EXPORT_SYMBOL(vme_dma_list_add);
  753. int vme_dma_list_exec(struct vme_dma_list *list)
  754. {
  755. struct vme_bridge *bridge = list->parent->parent;
  756. int retval;
  757. if (bridge->dma_list_exec == NULL) {
  758. printk(KERN_ERR "Link List DMA execution not supported\n");
  759. return -EINVAL;
  760. }
  761. mutex_lock(&list->mtx);
  762. retval = bridge->dma_list_exec(list);
  763. mutex_unlock(&list->mtx);
  764. return retval;
  765. }
  766. EXPORT_SYMBOL(vme_dma_list_exec);
  767. int vme_dma_list_free(struct vme_dma_list *list)
  768. {
  769. struct vme_bridge *bridge = list->parent->parent;
  770. int retval;
  771. if (bridge->dma_list_empty == NULL) {
  772. printk(KERN_WARNING "Emptying of Link Lists not supported\n");
  773. return -EINVAL;
  774. }
  775. if (!mutex_trylock(&list->mtx)) {
  776. printk(KERN_ERR "Link List in use\n");
  777. return -EINVAL;
  778. }
  779. /*
  780. * Empty out all of the entries from the dma list. We need to go to the
  781. * low level driver as dma entries are driver specific.
  782. */
  783. retval = bridge->dma_list_empty(list);
  784. if (retval) {
  785. printk(KERN_ERR "Unable to empty link-list entries\n");
  786. mutex_unlock(&list->mtx);
  787. return retval;
  788. }
  789. mutex_unlock(&list->mtx);
  790. kfree(list);
  791. return retval;
  792. }
  793. EXPORT_SYMBOL(vme_dma_list_free);
  794. int vme_dma_free(struct vme_resource *resource)
  795. {
  796. struct vme_dma_resource *ctrlr;
  797. if (resource->type != VME_DMA) {
  798. printk(KERN_ERR "Not a DMA resource\n");
  799. return -EINVAL;
  800. }
  801. ctrlr = list_entry(resource->entry, struct vme_dma_resource, list);
  802. if (!mutex_trylock(&ctrlr->mtx)) {
  803. printk(KERN_ERR "Resource busy, can't free\n");
  804. return -EBUSY;
  805. }
  806. if (!(list_empty(&ctrlr->pending) && list_empty(&ctrlr->running))) {
  807. printk(KERN_WARNING "Resource still processing transfers\n");
  808. mutex_unlock(&ctrlr->mtx);
  809. return -EBUSY;
  810. }
  811. ctrlr->locked = 0;
  812. mutex_unlock(&ctrlr->mtx);
  813. kfree(resource);
  814. return 0;
  815. }
  816. EXPORT_SYMBOL(vme_dma_free);
  817. void vme_irq_handler(struct vme_bridge *bridge, int level, int statid)
  818. {
  819. void (*call)(int, int, void *);
  820. void *priv_data;
  821. call = bridge->irq[level - 1].callback[statid].func;
  822. priv_data = bridge->irq[level - 1].callback[statid].priv_data;
  823. if (call != NULL)
  824. call(level, statid, priv_data);
  825. else
  826. printk(KERN_WARNING "Spurilous VME interrupt, level:%x, vector:%x\n",
  827. level, statid);
  828. }
  829. EXPORT_SYMBOL(vme_irq_handler);
  830. int vme_irq_request(struct vme_dev *vdev, int level, int statid,
  831. void (*callback)(int, int, void *),
  832. void *priv_data)
  833. {
  834. struct vme_bridge *bridge;
  835. bridge = vdev->bridge;
  836. if (bridge == NULL) {
  837. printk(KERN_ERR "Can't find VME bus\n");
  838. return -EINVAL;
  839. }
  840. if ((level < 1) || (level > 7)) {
  841. printk(KERN_ERR "Invalid interrupt level\n");
  842. return -EINVAL;
  843. }
  844. if (bridge->irq_set == NULL) {
  845. printk(KERN_ERR "Configuring interrupts not supported\n");
  846. return -EINVAL;
  847. }
  848. mutex_lock(&bridge->irq_mtx);
  849. if (bridge->irq[level - 1].callback[statid].func) {
  850. mutex_unlock(&bridge->irq_mtx);
  851. printk(KERN_WARNING "VME Interrupt already taken\n");
  852. return -EBUSY;
  853. }
  854. bridge->irq[level - 1].count++;
  855. bridge->irq[level - 1].callback[statid].priv_data = priv_data;
  856. bridge->irq[level - 1].callback[statid].func = callback;
  857. /* Enable IRQ level */
  858. bridge->irq_set(bridge, level, 1, 1);
  859. mutex_unlock(&bridge->irq_mtx);
  860. return 0;
  861. }
  862. EXPORT_SYMBOL(vme_irq_request);
  863. void vme_irq_free(struct vme_dev *vdev, int level, int statid)
  864. {
  865. struct vme_bridge *bridge;
  866. bridge = vdev->bridge;
  867. if (bridge == NULL) {
  868. printk(KERN_ERR "Can't find VME bus\n");
  869. return;
  870. }
  871. if ((level < 1) || (level > 7)) {
  872. printk(KERN_ERR "Invalid interrupt level\n");
  873. return;
  874. }
  875. if (bridge->irq_set == NULL) {
  876. printk(KERN_ERR "Configuring interrupts not supported\n");
  877. return;
  878. }
  879. mutex_lock(&bridge->irq_mtx);
  880. bridge->irq[level - 1].count--;
  881. /* Disable IRQ level if no more interrupts attached at this level*/
  882. if (bridge->irq[level - 1].count == 0)
  883. bridge->irq_set(bridge, level, 0, 1);
  884. bridge->irq[level - 1].callback[statid].func = NULL;
  885. bridge->irq[level - 1].callback[statid].priv_data = NULL;
  886. mutex_unlock(&bridge->irq_mtx);
  887. }
  888. EXPORT_SYMBOL(vme_irq_free);
  889. int vme_irq_generate(struct vme_dev *vdev, int level, int statid)
  890. {
  891. struct vme_bridge *bridge;
  892. bridge = vdev->bridge;
  893. if (bridge == NULL) {
  894. printk(KERN_ERR "Can't find VME bus\n");
  895. return -EINVAL;
  896. }
  897. if ((level < 1) || (level > 7)) {
  898. printk(KERN_WARNING "Invalid interrupt level\n");
  899. return -EINVAL;
  900. }
  901. if (bridge->irq_generate == NULL) {
  902. printk(KERN_WARNING "Interrupt generation not supported\n");
  903. return -EINVAL;
  904. }
  905. return bridge->irq_generate(bridge, level, statid);
  906. }
  907. EXPORT_SYMBOL(vme_irq_generate);
  908. /*
  909. * Request the location monitor, return resource or NULL
  910. */
  911. struct vme_resource *vme_lm_request(struct vme_dev *vdev)
  912. {
  913. struct vme_bridge *bridge;
  914. struct list_head *lm_pos = NULL;
  915. struct vme_lm_resource *allocated_lm = NULL;
  916. struct vme_lm_resource *lm = NULL;
  917. struct vme_resource *resource = NULL;
  918. bridge = vdev->bridge;
  919. if (bridge == NULL) {
  920. printk(KERN_ERR "Can't find VME bus\n");
  921. goto err_bus;
  922. }
  923. /* Loop through DMA resources */
  924. list_for_each(lm_pos, &bridge->lm_resources) {
  925. lm = list_entry(lm_pos,
  926. struct vme_lm_resource, list);
  927. if (lm == NULL) {
  928. printk(KERN_ERR "Registered NULL Location Monitor resource\n");
  929. continue;
  930. }
  931. /* Find an unlocked controller */
  932. mutex_lock(&lm->mtx);
  933. if (lm->locked == 0) {
  934. lm->locked = 1;
  935. mutex_unlock(&lm->mtx);
  936. allocated_lm = lm;
  937. break;
  938. }
  939. mutex_unlock(&lm->mtx);
  940. }
  941. /* Check to see if we found a resource */
  942. if (allocated_lm == NULL)
  943. goto err_lm;
  944. resource = kmalloc(sizeof(struct vme_resource), GFP_KERNEL);
  945. if (resource == NULL) {
  946. printk(KERN_ERR "Unable to allocate resource structure\n");
  947. goto err_alloc;
  948. }
  949. resource->type = VME_LM;
  950. resource->entry = &allocated_lm->list;
  951. return resource;
  952. err_alloc:
  953. /* Unlock image */
  954. mutex_lock(&lm->mtx);
  955. lm->locked = 0;
  956. mutex_unlock(&lm->mtx);
  957. err_lm:
  958. err_bus:
  959. return NULL;
  960. }
  961. EXPORT_SYMBOL(vme_lm_request);
  962. int vme_lm_count(struct vme_resource *resource)
  963. {
  964. struct vme_lm_resource *lm;
  965. if (resource->type != VME_LM) {
  966. printk(KERN_ERR "Not a Location Monitor resource\n");
  967. return -EINVAL;
  968. }
  969. lm = list_entry(resource->entry, struct vme_lm_resource, list);
  970. return lm->monitors;
  971. }
  972. EXPORT_SYMBOL(vme_lm_count);
  973. int vme_lm_set(struct vme_resource *resource, unsigned long long lm_base,
  974. u32 aspace, u32 cycle)
  975. {
  976. struct vme_bridge *bridge = find_bridge(resource);
  977. struct vme_lm_resource *lm;
  978. if (resource->type != VME_LM) {
  979. printk(KERN_ERR "Not a Location Monitor resource\n");
  980. return -EINVAL;
  981. }
  982. lm = list_entry(resource->entry, struct vme_lm_resource, list);
  983. if (bridge->lm_set == NULL) {
  984. printk(KERN_ERR "vme_lm_set not supported\n");
  985. return -EINVAL;
  986. }
  987. return bridge->lm_set(lm, lm_base, aspace, cycle);
  988. }
  989. EXPORT_SYMBOL(vme_lm_set);
  990. int vme_lm_get(struct vme_resource *resource, unsigned long long *lm_base,
  991. u32 *aspace, u32 *cycle)
  992. {
  993. struct vme_bridge *bridge = find_bridge(resource);
  994. struct vme_lm_resource *lm;
  995. if (resource->type != VME_LM) {
  996. printk(KERN_ERR "Not a Location Monitor resource\n");
  997. return -EINVAL;
  998. }
  999. lm = list_entry(resource->entry, struct vme_lm_resource, list);
  1000. if (bridge->lm_get == NULL) {
  1001. printk(KERN_ERR "vme_lm_get not supported\n");
  1002. return -EINVAL;
  1003. }
  1004. return bridge->lm_get(lm, lm_base, aspace, cycle);
  1005. }
  1006. EXPORT_SYMBOL(vme_lm_get);
  1007. int vme_lm_attach(struct vme_resource *resource, int monitor,
  1008. void (*callback)(int))
  1009. {
  1010. struct vme_bridge *bridge = find_bridge(resource);
  1011. struct vme_lm_resource *lm;
  1012. if (resource->type != VME_LM) {
  1013. printk(KERN_ERR "Not a Location Monitor resource\n");
  1014. return -EINVAL;
  1015. }
  1016. lm = list_entry(resource->entry, struct vme_lm_resource, list);
  1017. if (bridge->lm_attach == NULL) {
  1018. printk(KERN_ERR "vme_lm_attach not supported\n");
  1019. return -EINVAL;
  1020. }
  1021. return bridge->lm_attach(lm, monitor, callback);
  1022. }
  1023. EXPORT_SYMBOL(vme_lm_attach);
  1024. int vme_lm_detach(struct vme_resource *resource, int monitor)
  1025. {
  1026. struct vme_bridge *bridge = find_bridge(resource);
  1027. struct vme_lm_resource *lm;
  1028. if (resource->type != VME_LM) {
  1029. printk(KERN_ERR "Not a Location Monitor resource\n");
  1030. return -EINVAL;
  1031. }
  1032. lm = list_entry(resource->entry, struct vme_lm_resource, list);
  1033. if (bridge->lm_detach == NULL) {
  1034. printk(KERN_ERR "vme_lm_detach not supported\n");
  1035. return -EINVAL;
  1036. }
  1037. return bridge->lm_detach(lm, monitor);
  1038. }
  1039. EXPORT_SYMBOL(vme_lm_detach);
  1040. void vme_lm_free(struct vme_resource *resource)
  1041. {
  1042. struct vme_lm_resource *lm;
  1043. if (resource->type != VME_LM) {
  1044. printk(KERN_ERR "Not a Location Monitor resource\n");
  1045. return;
  1046. }
  1047. lm = list_entry(resource->entry, struct vme_lm_resource, list);
  1048. mutex_lock(&lm->mtx);
  1049. /* XXX
  1050. * Check to see that there aren't any callbacks still attached, if
  1051. * there are we should probably be detaching them!
  1052. */
  1053. lm->locked = 0;
  1054. mutex_unlock(&lm->mtx);
  1055. kfree(resource);
  1056. }
  1057. EXPORT_SYMBOL(vme_lm_free);
  1058. int vme_slot_num(struct vme_dev *vdev)
  1059. {
  1060. struct vme_bridge *bridge;
  1061. bridge = vdev->bridge;
  1062. if (bridge == NULL) {
  1063. printk(KERN_ERR "Can't find VME bus\n");
  1064. return -EINVAL;
  1065. }
  1066. if (bridge->slot_get == NULL) {
  1067. printk(KERN_WARNING "vme_slot_num not supported\n");
  1068. return -EINVAL;
  1069. }
  1070. return bridge->slot_get(bridge);
  1071. }
  1072. EXPORT_SYMBOL(vme_slot_num);
  1073. int vme_bus_num(struct vme_dev *vdev)
  1074. {
  1075. struct vme_bridge *bridge;
  1076. bridge = vdev->bridge;
  1077. if (bridge == NULL) {
  1078. pr_err("Can't find VME bus\n");
  1079. return -EINVAL;
  1080. }
  1081. return bridge->num;
  1082. }
  1083. EXPORT_SYMBOL(vme_bus_num);
  1084. /* - Bridge Registration --------------------------------------------------- */
  1085. static void vme_dev_release(struct device *dev)
  1086. {
  1087. kfree(dev_to_vme_dev(dev));
  1088. }
  1089. int vme_register_bridge(struct vme_bridge *bridge)
  1090. {
  1091. int i;
  1092. int ret = -1;
  1093. mutex_lock(&vme_buses_lock);
  1094. for (i = 0; i < sizeof(vme_bus_numbers) * 8; i++) {
  1095. if ((vme_bus_numbers & (1 << i)) == 0) {
  1096. vme_bus_numbers |= (1 << i);
  1097. bridge->num = i;
  1098. INIT_LIST_HEAD(&bridge->devices);
  1099. list_add_tail(&bridge->bus_list, &vme_bus_list);
  1100. ret = 0;
  1101. break;
  1102. }
  1103. }
  1104. mutex_unlock(&vme_buses_lock);
  1105. return ret;
  1106. }
  1107. EXPORT_SYMBOL(vme_register_bridge);
  1108. void vme_unregister_bridge(struct vme_bridge *bridge)
  1109. {
  1110. struct vme_dev *vdev;
  1111. struct vme_dev *tmp;
  1112. mutex_lock(&vme_buses_lock);
  1113. vme_bus_numbers &= ~(1 << bridge->num);
  1114. list_for_each_entry_safe(vdev, tmp, &bridge->devices, bridge_list) {
  1115. list_del(&vdev->drv_list);
  1116. list_del(&vdev->bridge_list);
  1117. device_unregister(&vdev->dev);
  1118. }
  1119. list_del(&bridge->bus_list);
  1120. mutex_unlock(&vme_buses_lock);
  1121. }
  1122. EXPORT_SYMBOL(vme_unregister_bridge);
  1123. /* - Driver Registration --------------------------------------------------- */
  1124. static int __vme_register_driver_bus(struct vme_driver *drv,
  1125. struct vme_bridge *bridge, unsigned int ndevs)
  1126. {
  1127. int err;
  1128. unsigned int i;
  1129. struct vme_dev *vdev;
  1130. struct vme_dev *tmp;
  1131. for (i = 0; i < ndevs; i++) {
  1132. vdev = kzalloc(sizeof(struct vme_dev), GFP_KERNEL);
  1133. if (!vdev) {
  1134. err = -ENOMEM;
  1135. goto err_devalloc;
  1136. }
  1137. vdev->num = i;
  1138. vdev->bridge = bridge;
  1139. vdev->dev.platform_data = drv;
  1140. vdev->dev.release = vme_dev_release;
  1141. vdev->dev.parent = bridge->parent;
  1142. vdev->dev.bus = &vme_bus_type;
  1143. dev_set_name(&vdev->dev, "%s.%u-%u", drv->name, bridge->num,
  1144. vdev->num);
  1145. err = device_register(&vdev->dev);
  1146. if (err)
  1147. goto err_reg;
  1148. if (vdev->dev.platform_data) {
  1149. list_add_tail(&vdev->drv_list, &drv->devices);
  1150. list_add_tail(&vdev->bridge_list, &bridge->devices);
  1151. } else
  1152. device_unregister(&vdev->dev);
  1153. }
  1154. return 0;
  1155. err_reg:
  1156. put_device(&vdev->dev);
  1157. kfree(vdev);
  1158. err_devalloc:
  1159. list_for_each_entry_safe(vdev, tmp, &drv->devices, drv_list) {
  1160. list_del(&vdev->drv_list);
  1161. list_del(&vdev->bridge_list);
  1162. device_unregister(&vdev->dev);
  1163. }
  1164. return err;
  1165. }
  1166. static int __vme_register_driver(struct vme_driver *drv, unsigned int ndevs)
  1167. {
  1168. struct vme_bridge *bridge;
  1169. int err = 0;
  1170. mutex_lock(&vme_buses_lock);
  1171. list_for_each_entry(bridge, &vme_bus_list, bus_list) {
  1172. /*
  1173. * This cannot cause trouble as we already have vme_buses_lock
  1174. * and if the bridge is removed, it will have to go through
  1175. * vme_unregister_bridge() to do it (which calls remove() on
  1176. * the bridge which in turn tries to acquire vme_buses_lock and
  1177. * will have to wait).
  1178. */
  1179. err = __vme_register_driver_bus(drv, bridge, ndevs);
  1180. if (err)
  1181. break;
  1182. }
  1183. mutex_unlock(&vme_buses_lock);
  1184. return err;
  1185. }
  1186. int vme_register_driver(struct vme_driver *drv, unsigned int ndevs)
  1187. {
  1188. int err;
  1189. drv->driver.name = drv->name;
  1190. drv->driver.bus = &vme_bus_type;
  1191. INIT_LIST_HEAD(&drv->devices);
  1192. err = driver_register(&drv->driver);
  1193. if (err)
  1194. return err;
  1195. err = __vme_register_driver(drv, ndevs);
  1196. if (err)
  1197. driver_unregister(&drv->driver);
  1198. return err;
  1199. }
  1200. EXPORT_SYMBOL(vme_register_driver);
  1201. void vme_unregister_driver(struct vme_driver *drv)
  1202. {
  1203. struct vme_dev *dev, *dev_tmp;
  1204. mutex_lock(&vme_buses_lock);
  1205. list_for_each_entry_safe(dev, dev_tmp, &drv->devices, drv_list) {
  1206. list_del(&dev->drv_list);
  1207. list_del(&dev->bridge_list);
  1208. device_unregister(&dev->dev);
  1209. }
  1210. mutex_unlock(&vme_buses_lock);
  1211. driver_unregister(&drv->driver);
  1212. }
  1213. EXPORT_SYMBOL(vme_unregister_driver);
  1214. /* - Bus Registration ------------------------------------------------------ */
  1215. static int vme_bus_match(struct device *dev, struct device_driver *drv)
  1216. {
  1217. struct vme_driver *vme_drv;
  1218. vme_drv = container_of(drv, struct vme_driver, driver);
  1219. if (dev->platform_data == vme_drv) {
  1220. struct vme_dev *vdev = dev_to_vme_dev(dev);
  1221. if (vme_drv->match && vme_drv->match(vdev))
  1222. return 1;
  1223. dev->platform_data = NULL;
  1224. }
  1225. return 0;
  1226. }
  1227. static int vme_bus_probe(struct device *dev)
  1228. {
  1229. int retval = -ENODEV;
  1230. struct vme_driver *driver;
  1231. struct vme_dev *vdev = dev_to_vme_dev(dev);
  1232. driver = dev->platform_data;
  1233. if (driver->probe != NULL)
  1234. retval = driver->probe(vdev);
  1235. return retval;
  1236. }
  1237. static int vme_bus_remove(struct device *dev)
  1238. {
  1239. int retval = -ENODEV;
  1240. struct vme_driver *driver;
  1241. struct vme_dev *vdev = dev_to_vme_dev(dev);
  1242. driver = dev->platform_data;
  1243. if (driver->remove != NULL)
  1244. retval = driver->remove(vdev);
  1245. return retval;
  1246. }
  1247. struct bus_type vme_bus_type = {
  1248. .name = "vme",
  1249. .match = vme_bus_match,
  1250. .probe = vme_bus_probe,
  1251. .remove = vme_bus_remove,
  1252. };
  1253. EXPORT_SYMBOL(vme_bus_type);
  1254. static int __init vme_init(void)
  1255. {
  1256. return bus_register(&vme_bus_type);
  1257. }
  1258. static void __exit vme_exit(void)
  1259. {
  1260. bus_unregister(&vme_bus_type);
  1261. }
  1262. subsys_initcall(vme_init);
  1263. module_exit(vme_exit);