vme.c 37 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644
  1. /*
  2. * VME Bridge Framework
  3. *
  4. * Author: Martyn Welch <martyn.welch@ge.com>
  5. * Copyright 2008 GE Intelligent Platforms Embedded Systems, Inc.
  6. *
  7. * Based on work by Tom Armistead and Ajit Prem
  8. * Copyright 2004 Motorola Inc.
  9. *
  10. * This program is free software; you can redistribute it and/or modify it
  11. * under the terms of the GNU General Public License as published by the
  12. * Free Software Foundation; either version 2 of the License, or (at your
  13. * option) any later version.
  14. */
  15. #include <linux/module.h>
  16. #include <linux/moduleparam.h>
  17. #include <linux/mm.h>
  18. #include <linux/types.h>
  19. #include <linux/kernel.h>
  20. #include <linux/errno.h>
  21. #include <linux/pci.h>
  22. #include <linux/poll.h>
  23. #include <linux/highmem.h>
  24. #include <linux/interrupt.h>
  25. #include <linux/pagemap.h>
  26. #include <linux/device.h>
  27. #include <linux/dma-mapping.h>
  28. #include <linux/syscalls.h>
  29. #include <linux/mutex.h>
  30. #include <linux/spinlock.h>
  31. #include <linux/slab.h>
  32. #include <linux/vme.h>
  33. #include "vme_bridge.h"
  34. /* Bitmask and list of registered buses both protected by common mutex */
  35. static unsigned int vme_bus_numbers;
  36. static LIST_HEAD(vme_bus_list);
  37. static DEFINE_MUTEX(vme_buses_lock);
  38. static void __exit vme_exit(void);
  39. static int __init vme_init(void);
  40. static struct vme_dev *dev_to_vme_dev(struct device *dev)
  41. {
  42. return container_of(dev, struct vme_dev, dev);
  43. }
  44. /*
  45. * Find the bridge that the resource is associated with.
  46. */
  47. static struct vme_bridge *find_bridge(struct vme_resource *resource)
  48. {
  49. /* Get list to search */
  50. switch (resource->type) {
  51. case VME_MASTER:
  52. return list_entry(resource->entry, struct vme_master_resource,
  53. list)->parent;
  54. break;
  55. case VME_SLAVE:
  56. return list_entry(resource->entry, struct vme_slave_resource,
  57. list)->parent;
  58. break;
  59. case VME_DMA:
  60. return list_entry(resource->entry, struct vme_dma_resource,
  61. list)->parent;
  62. break;
  63. case VME_LM:
  64. return list_entry(resource->entry, struct vme_lm_resource,
  65. list)->parent;
  66. break;
  67. default:
  68. printk(KERN_ERR "Unknown resource type\n");
  69. return NULL;
  70. break;
  71. }
  72. }
  73. /*
  74. * Allocate a contiguous block of memory for use by the driver. This is used to
  75. * create the buffers for the slave windows.
  76. */
  77. void *vme_alloc_consistent(struct vme_resource *resource, size_t size,
  78. dma_addr_t *dma)
  79. {
  80. struct vme_bridge *bridge;
  81. if (resource == NULL) {
  82. printk(KERN_ERR "No resource\n");
  83. return NULL;
  84. }
  85. bridge = find_bridge(resource);
  86. if (bridge == NULL) {
  87. printk(KERN_ERR "Can't find bridge\n");
  88. return NULL;
  89. }
  90. if (bridge->parent == NULL) {
  91. printk(KERN_ERR "Dev entry NULL for bridge %s\n", bridge->name);
  92. return NULL;
  93. }
  94. if (bridge->alloc_consistent == NULL) {
  95. printk(KERN_ERR "alloc_consistent not supported by bridge %s\n",
  96. bridge->name);
  97. return NULL;
  98. }
  99. return bridge->alloc_consistent(bridge->parent, size, dma);
  100. }
  101. EXPORT_SYMBOL(vme_alloc_consistent);
  102. /*
  103. * Free previously allocated contiguous block of memory.
  104. */
  105. void vme_free_consistent(struct vme_resource *resource, size_t size,
  106. void *vaddr, dma_addr_t dma)
  107. {
  108. struct vme_bridge *bridge;
  109. if (resource == NULL) {
  110. printk(KERN_ERR "No resource\n");
  111. return;
  112. }
  113. bridge = find_bridge(resource);
  114. if (bridge == NULL) {
  115. printk(KERN_ERR "Can't find bridge\n");
  116. return;
  117. }
  118. if (bridge->parent == NULL) {
  119. printk(KERN_ERR "Dev entry NULL for bridge %s\n", bridge->name);
  120. return;
  121. }
  122. if (bridge->free_consistent == NULL) {
  123. printk(KERN_ERR "free_consistent not supported by bridge %s\n",
  124. bridge->name);
  125. return;
  126. }
  127. bridge->free_consistent(bridge->parent, size, vaddr, dma);
  128. }
  129. EXPORT_SYMBOL(vme_free_consistent);
  130. size_t vme_get_size(struct vme_resource *resource)
  131. {
  132. int enabled, retval;
  133. unsigned long long base, size;
  134. dma_addr_t buf_base;
  135. u32 aspace, cycle, dwidth;
  136. switch (resource->type) {
  137. case VME_MASTER:
  138. retval = vme_master_get(resource, &enabled, &base, &size,
  139. &aspace, &cycle, &dwidth);
  140. return size;
  141. break;
  142. case VME_SLAVE:
  143. retval = vme_slave_get(resource, &enabled, &base, &size,
  144. &buf_base, &aspace, &cycle);
  145. return size;
  146. break;
  147. case VME_DMA:
  148. return 0;
  149. break;
  150. default:
  151. printk(KERN_ERR "Unknown resource type\n");
  152. return 0;
  153. break;
  154. }
  155. }
  156. EXPORT_SYMBOL(vme_get_size);
  157. int vme_check_window(u32 aspace, unsigned long long vme_base,
  158. unsigned long long size)
  159. {
  160. int retval = 0;
  161. switch (aspace) {
  162. case VME_A16:
  163. if (((vme_base + size) > VME_A16_MAX) ||
  164. (vme_base > VME_A16_MAX))
  165. retval = -EFAULT;
  166. break;
  167. case VME_A24:
  168. if (((vme_base + size) > VME_A24_MAX) ||
  169. (vme_base > VME_A24_MAX))
  170. retval = -EFAULT;
  171. break;
  172. case VME_A32:
  173. if (((vme_base + size) > VME_A32_MAX) ||
  174. (vme_base > VME_A32_MAX))
  175. retval = -EFAULT;
  176. break;
  177. case VME_A64:
  178. if ((size != 0) && (vme_base > U64_MAX + 1 - size))
  179. retval = -EFAULT;
  180. break;
  181. case VME_CRCSR:
  182. if (((vme_base + size) > VME_CRCSR_MAX) ||
  183. (vme_base > VME_CRCSR_MAX))
  184. retval = -EFAULT;
  185. break;
  186. case VME_USER1:
  187. case VME_USER2:
  188. case VME_USER3:
  189. case VME_USER4:
  190. /* User Defined */
  191. break;
  192. default:
  193. printk(KERN_ERR "Invalid address space\n");
  194. retval = -EINVAL;
  195. break;
  196. }
  197. return retval;
  198. }
  199. EXPORT_SYMBOL(vme_check_window);
  200. static u32 vme_get_aspace(int am)
  201. {
  202. switch (am) {
  203. case 0x29:
  204. case 0x2D:
  205. return VME_A16;
  206. case 0x38:
  207. case 0x39:
  208. case 0x3A:
  209. case 0x3B:
  210. case 0x3C:
  211. case 0x3D:
  212. case 0x3E:
  213. case 0x3F:
  214. return VME_A24;
  215. case 0x8:
  216. case 0x9:
  217. case 0xA:
  218. case 0xB:
  219. case 0xC:
  220. case 0xD:
  221. case 0xE:
  222. case 0xF:
  223. return VME_A32;
  224. case 0x0:
  225. case 0x1:
  226. case 0x3:
  227. return VME_A64;
  228. }
  229. return 0;
  230. }
  231. /*
  232. * Request a slave image with specific attributes, return some unique
  233. * identifier.
  234. */
  235. struct vme_resource *vme_slave_request(struct vme_dev *vdev, u32 address,
  236. u32 cycle)
  237. {
  238. struct vme_bridge *bridge;
  239. struct list_head *slave_pos = NULL;
  240. struct vme_slave_resource *allocated_image = NULL;
  241. struct vme_slave_resource *slave_image = NULL;
  242. struct vme_resource *resource = NULL;
  243. bridge = vdev->bridge;
  244. if (bridge == NULL) {
  245. printk(KERN_ERR "Can't find VME bus\n");
  246. goto err_bus;
  247. }
  248. /* Loop through slave resources */
  249. list_for_each(slave_pos, &bridge->slave_resources) {
  250. slave_image = list_entry(slave_pos,
  251. struct vme_slave_resource, list);
  252. if (slave_image == NULL) {
  253. printk(KERN_ERR "Registered NULL Slave resource\n");
  254. continue;
  255. }
  256. /* Find an unlocked and compatible image */
  257. mutex_lock(&slave_image->mtx);
  258. if (((slave_image->address_attr & address) == address) &&
  259. ((slave_image->cycle_attr & cycle) == cycle) &&
  260. (slave_image->locked == 0)) {
  261. slave_image->locked = 1;
  262. mutex_unlock(&slave_image->mtx);
  263. allocated_image = slave_image;
  264. break;
  265. }
  266. mutex_unlock(&slave_image->mtx);
  267. }
  268. /* No free image */
  269. if (allocated_image == NULL)
  270. goto err_image;
  271. resource = kmalloc(sizeof(struct vme_resource), GFP_KERNEL);
  272. if (resource == NULL) {
  273. printk(KERN_WARNING "Unable to allocate resource structure\n");
  274. goto err_alloc;
  275. }
  276. resource->type = VME_SLAVE;
  277. resource->entry = &allocated_image->list;
  278. return resource;
  279. err_alloc:
  280. /* Unlock image */
  281. mutex_lock(&slave_image->mtx);
  282. slave_image->locked = 0;
  283. mutex_unlock(&slave_image->mtx);
  284. err_image:
  285. err_bus:
  286. return NULL;
  287. }
  288. EXPORT_SYMBOL(vme_slave_request);
  289. int vme_slave_set(struct vme_resource *resource, int enabled,
  290. unsigned long long vme_base, unsigned long long size,
  291. dma_addr_t buf_base, u32 aspace, u32 cycle)
  292. {
  293. struct vme_bridge *bridge = find_bridge(resource);
  294. struct vme_slave_resource *image;
  295. int retval;
  296. if (resource->type != VME_SLAVE) {
  297. printk(KERN_ERR "Not a slave resource\n");
  298. return -EINVAL;
  299. }
  300. image = list_entry(resource->entry, struct vme_slave_resource, list);
  301. if (bridge->slave_set == NULL) {
  302. printk(KERN_ERR "Function not supported\n");
  303. return -ENOSYS;
  304. }
  305. if (!(((image->address_attr & aspace) == aspace) &&
  306. ((image->cycle_attr & cycle) == cycle))) {
  307. printk(KERN_ERR "Invalid attributes\n");
  308. return -EINVAL;
  309. }
  310. retval = vme_check_window(aspace, vme_base, size);
  311. if (retval)
  312. return retval;
  313. return bridge->slave_set(image, enabled, vme_base, size, buf_base,
  314. aspace, cycle);
  315. }
  316. EXPORT_SYMBOL(vme_slave_set);
  317. int vme_slave_get(struct vme_resource *resource, int *enabled,
  318. unsigned long long *vme_base, unsigned long long *size,
  319. dma_addr_t *buf_base, u32 *aspace, u32 *cycle)
  320. {
  321. struct vme_bridge *bridge = find_bridge(resource);
  322. struct vme_slave_resource *image;
  323. if (resource->type != VME_SLAVE) {
  324. printk(KERN_ERR "Not a slave resource\n");
  325. return -EINVAL;
  326. }
  327. image = list_entry(resource->entry, struct vme_slave_resource, list);
  328. if (bridge->slave_get == NULL) {
  329. printk(KERN_ERR "vme_slave_get not supported\n");
  330. return -EINVAL;
  331. }
  332. return bridge->slave_get(image, enabled, vme_base, size, buf_base,
  333. aspace, cycle);
  334. }
  335. EXPORT_SYMBOL(vme_slave_get);
  336. void vme_slave_free(struct vme_resource *resource)
  337. {
  338. struct vme_slave_resource *slave_image;
  339. if (resource->type != VME_SLAVE) {
  340. printk(KERN_ERR "Not a slave resource\n");
  341. return;
  342. }
  343. slave_image = list_entry(resource->entry, struct vme_slave_resource,
  344. list);
  345. if (slave_image == NULL) {
  346. printk(KERN_ERR "Can't find slave resource\n");
  347. return;
  348. }
  349. /* Unlock image */
  350. mutex_lock(&slave_image->mtx);
  351. if (slave_image->locked == 0)
  352. printk(KERN_ERR "Image is already free\n");
  353. slave_image->locked = 0;
  354. mutex_unlock(&slave_image->mtx);
  355. /* Free up resource memory */
  356. kfree(resource);
  357. }
  358. EXPORT_SYMBOL(vme_slave_free);
  359. /*
  360. * Request a master image with specific attributes, return some unique
  361. * identifier.
  362. */
  363. struct vme_resource *vme_master_request(struct vme_dev *vdev, u32 address,
  364. u32 cycle, u32 dwidth)
  365. {
  366. struct vme_bridge *bridge;
  367. struct list_head *master_pos = NULL;
  368. struct vme_master_resource *allocated_image = NULL;
  369. struct vme_master_resource *master_image = NULL;
  370. struct vme_resource *resource = NULL;
  371. bridge = vdev->bridge;
  372. if (bridge == NULL) {
  373. printk(KERN_ERR "Can't find VME bus\n");
  374. goto err_bus;
  375. }
  376. /* Loop through master resources */
  377. list_for_each(master_pos, &bridge->master_resources) {
  378. master_image = list_entry(master_pos,
  379. struct vme_master_resource, list);
  380. if (master_image == NULL) {
  381. printk(KERN_WARNING "Registered NULL master resource\n");
  382. continue;
  383. }
  384. /* Find an unlocked and compatible image */
  385. spin_lock(&master_image->lock);
  386. if (((master_image->address_attr & address) == address) &&
  387. ((master_image->cycle_attr & cycle) == cycle) &&
  388. ((master_image->width_attr & dwidth) == dwidth) &&
  389. (master_image->locked == 0)) {
  390. master_image->locked = 1;
  391. spin_unlock(&master_image->lock);
  392. allocated_image = master_image;
  393. break;
  394. }
  395. spin_unlock(&master_image->lock);
  396. }
  397. /* Check to see if we found a resource */
  398. if (allocated_image == NULL) {
  399. printk(KERN_ERR "Can't find a suitable resource\n");
  400. goto err_image;
  401. }
  402. resource = kmalloc(sizeof(struct vme_resource), GFP_KERNEL);
  403. if (resource == NULL) {
  404. printk(KERN_ERR "Unable to allocate resource structure\n");
  405. goto err_alloc;
  406. }
  407. resource->type = VME_MASTER;
  408. resource->entry = &allocated_image->list;
  409. return resource;
  410. err_alloc:
  411. /* Unlock image */
  412. spin_lock(&master_image->lock);
  413. master_image->locked = 0;
  414. spin_unlock(&master_image->lock);
  415. err_image:
  416. err_bus:
  417. return NULL;
  418. }
  419. EXPORT_SYMBOL(vme_master_request);
  420. int vme_master_set(struct vme_resource *resource, int enabled,
  421. unsigned long long vme_base, unsigned long long size, u32 aspace,
  422. u32 cycle, u32 dwidth)
  423. {
  424. struct vme_bridge *bridge = find_bridge(resource);
  425. struct vme_master_resource *image;
  426. int retval;
  427. if (resource->type != VME_MASTER) {
  428. printk(KERN_ERR "Not a master resource\n");
  429. return -EINVAL;
  430. }
  431. image = list_entry(resource->entry, struct vme_master_resource, list);
  432. if (bridge->master_set == NULL) {
  433. printk(KERN_WARNING "vme_master_set not supported\n");
  434. return -EINVAL;
  435. }
  436. if (!(((image->address_attr & aspace) == aspace) &&
  437. ((image->cycle_attr & cycle) == cycle) &&
  438. ((image->width_attr & dwidth) == dwidth))) {
  439. printk(KERN_WARNING "Invalid attributes\n");
  440. return -EINVAL;
  441. }
  442. retval = vme_check_window(aspace, vme_base, size);
  443. if (retval)
  444. return retval;
  445. return bridge->master_set(image, enabled, vme_base, size, aspace,
  446. cycle, dwidth);
  447. }
  448. EXPORT_SYMBOL(vme_master_set);
  449. int vme_master_get(struct vme_resource *resource, int *enabled,
  450. unsigned long long *vme_base, unsigned long long *size, u32 *aspace,
  451. u32 *cycle, u32 *dwidth)
  452. {
  453. struct vme_bridge *bridge = find_bridge(resource);
  454. struct vme_master_resource *image;
  455. if (resource->type != VME_MASTER) {
  456. printk(KERN_ERR "Not a master resource\n");
  457. return -EINVAL;
  458. }
  459. image = list_entry(resource->entry, struct vme_master_resource, list);
  460. if (bridge->master_get == NULL) {
  461. printk(KERN_WARNING "%s not supported\n", __func__);
  462. return -EINVAL;
  463. }
  464. return bridge->master_get(image, enabled, vme_base, size, aspace,
  465. cycle, dwidth);
  466. }
  467. EXPORT_SYMBOL(vme_master_get);
  468. /*
  469. * Read data out of VME space into a buffer.
  470. */
  471. ssize_t vme_master_read(struct vme_resource *resource, void *buf, size_t count,
  472. loff_t offset)
  473. {
  474. struct vme_bridge *bridge = find_bridge(resource);
  475. struct vme_master_resource *image;
  476. size_t length;
  477. if (bridge->master_read == NULL) {
  478. printk(KERN_WARNING "Reading from resource not supported\n");
  479. return -EINVAL;
  480. }
  481. if (resource->type != VME_MASTER) {
  482. printk(KERN_ERR "Not a master resource\n");
  483. return -EINVAL;
  484. }
  485. image = list_entry(resource->entry, struct vme_master_resource, list);
  486. length = vme_get_size(resource);
  487. if (offset > length) {
  488. printk(KERN_WARNING "Invalid Offset\n");
  489. return -EFAULT;
  490. }
  491. if ((offset + count) > length)
  492. count = length - offset;
  493. return bridge->master_read(image, buf, count, offset);
  494. }
  495. EXPORT_SYMBOL(vme_master_read);
  496. /*
  497. * Write data out to VME space from a buffer.
  498. */
  499. ssize_t vme_master_write(struct vme_resource *resource, void *buf,
  500. size_t count, loff_t offset)
  501. {
  502. struct vme_bridge *bridge = find_bridge(resource);
  503. struct vme_master_resource *image;
  504. size_t length;
  505. if (bridge->master_write == NULL) {
  506. printk(KERN_WARNING "Writing to resource not supported\n");
  507. return -EINVAL;
  508. }
  509. if (resource->type != VME_MASTER) {
  510. printk(KERN_ERR "Not a master resource\n");
  511. return -EINVAL;
  512. }
  513. image = list_entry(resource->entry, struct vme_master_resource, list);
  514. length = vme_get_size(resource);
  515. if (offset > length) {
  516. printk(KERN_WARNING "Invalid Offset\n");
  517. return -EFAULT;
  518. }
  519. if ((offset + count) > length)
  520. count = length - offset;
  521. return bridge->master_write(image, buf, count, offset);
  522. }
  523. EXPORT_SYMBOL(vme_master_write);
  524. /*
  525. * Perform RMW cycle to provided location.
  526. */
  527. unsigned int vme_master_rmw(struct vme_resource *resource, unsigned int mask,
  528. unsigned int compare, unsigned int swap, loff_t offset)
  529. {
  530. struct vme_bridge *bridge = find_bridge(resource);
  531. struct vme_master_resource *image;
  532. if (bridge->master_rmw == NULL) {
  533. printk(KERN_WARNING "Writing to resource not supported\n");
  534. return -EINVAL;
  535. }
  536. if (resource->type != VME_MASTER) {
  537. printk(KERN_ERR "Not a master resource\n");
  538. return -EINVAL;
  539. }
  540. image = list_entry(resource->entry, struct vme_master_resource, list);
  541. return bridge->master_rmw(image, mask, compare, swap, offset);
  542. }
  543. EXPORT_SYMBOL(vme_master_rmw);
  544. int vme_master_mmap(struct vme_resource *resource, struct vm_area_struct *vma)
  545. {
  546. struct vme_master_resource *image;
  547. phys_addr_t phys_addr;
  548. unsigned long vma_size;
  549. if (resource->type != VME_MASTER) {
  550. pr_err("Not a master resource\n");
  551. return -EINVAL;
  552. }
  553. image = list_entry(resource->entry, struct vme_master_resource, list);
  554. phys_addr = image->bus_resource.start + (vma->vm_pgoff << PAGE_SHIFT);
  555. vma_size = vma->vm_end - vma->vm_start;
  556. if (phys_addr + vma_size > image->bus_resource.end + 1) {
  557. pr_err("Map size cannot exceed the window size\n");
  558. return -EFAULT;
  559. }
  560. vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
  561. return vm_iomap_memory(vma, phys_addr, vma->vm_end - vma->vm_start);
  562. }
  563. EXPORT_SYMBOL(vme_master_mmap);
  564. void vme_master_free(struct vme_resource *resource)
  565. {
  566. struct vme_master_resource *master_image;
  567. if (resource->type != VME_MASTER) {
  568. printk(KERN_ERR "Not a master resource\n");
  569. return;
  570. }
  571. master_image = list_entry(resource->entry, struct vme_master_resource,
  572. list);
  573. if (master_image == NULL) {
  574. printk(KERN_ERR "Can't find master resource\n");
  575. return;
  576. }
  577. /* Unlock image */
  578. spin_lock(&master_image->lock);
  579. if (master_image->locked == 0)
  580. printk(KERN_ERR "Image is already free\n");
  581. master_image->locked = 0;
  582. spin_unlock(&master_image->lock);
  583. /* Free up resource memory */
  584. kfree(resource);
  585. }
  586. EXPORT_SYMBOL(vme_master_free);
  587. /*
  588. * Request a DMA controller with specific attributes, return some unique
  589. * identifier.
  590. */
  591. struct vme_resource *vme_dma_request(struct vme_dev *vdev, u32 route)
  592. {
  593. struct vme_bridge *bridge;
  594. struct list_head *dma_pos = NULL;
  595. struct vme_dma_resource *allocated_ctrlr = NULL;
  596. struct vme_dma_resource *dma_ctrlr = NULL;
  597. struct vme_resource *resource = NULL;
  598. /* XXX Not checking resource attributes */
  599. printk(KERN_ERR "No VME resource Attribute tests done\n");
  600. bridge = vdev->bridge;
  601. if (bridge == NULL) {
  602. printk(KERN_ERR "Can't find VME bus\n");
  603. goto err_bus;
  604. }
  605. /* Loop through DMA resources */
  606. list_for_each(dma_pos, &bridge->dma_resources) {
  607. dma_ctrlr = list_entry(dma_pos,
  608. struct vme_dma_resource, list);
  609. if (dma_ctrlr == NULL) {
  610. printk(KERN_ERR "Registered NULL DMA resource\n");
  611. continue;
  612. }
  613. /* Find an unlocked and compatible controller */
  614. mutex_lock(&dma_ctrlr->mtx);
  615. if (((dma_ctrlr->route_attr & route) == route) &&
  616. (dma_ctrlr->locked == 0)) {
  617. dma_ctrlr->locked = 1;
  618. mutex_unlock(&dma_ctrlr->mtx);
  619. allocated_ctrlr = dma_ctrlr;
  620. break;
  621. }
  622. mutex_unlock(&dma_ctrlr->mtx);
  623. }
  624. /* Check to see if we found a resource */
  625. if (allocated_ctrlr == NULL)
  626. goto err_ctrlr;
  627. resource = kmalloc(sizeof(struct vme_resource), GFP_KERNEL);
  628. if (resource == NULL) {
  629. printk(KERN_WARNING "Unable to allocate resource structure\n");
  630. goto err_alloc;
  631. }
  632. resource->type = VME_DMA;
  633. resource->entry = &allocated_ctrlr->list;
  634. return resource;
  635. err_alloc:
  636. /* Unlock image */
  637. mutex_lock(&dma_ctrlr->mtx);
  638. dma_ctrlr->locked = 0;
  639. mutex_unlock(&dma_ctrlr->mtx);
  640. err_ctrlr:
  641. err_bus:
  642. return NULL;
  643. }
  644. EXPORT_SYMBOL(vme_dma_request);
  645. /*
  646. * Start new list
  647. */
  648. struct vme_dma_list *vme_new_dma_list(struct vme_resource *resource)
  649. {
  650. struct vme_dma_resource *ctrlr;
  651. struct vme_dma_list *dma_list;
  652. if (resource->type != VME_DMA) {
  653. printk(KERN_ERR "Not a DMA resource\n");
  654. return NULL;
  655. }
  656. ctrlr = list_entry(resource->entry, struct vme_dma_resource, list);
  657. dma_list = kmalloc(sizeof(struct vme_dma_list), GFP_KERNEL);
  658. if (dma_list == NULL) {
  659. printk(KERN_ERR "Unable to allocate memory for new dma list\n");
  660. return NULL;
  661. }
  662. INIT_LIST_HEAD(&dma_list->entries);
  663. dma_list->parent = ctrlr;
  664. mutex_init(&dma_list->mtx);
  665. return dma_list;
  666. }
  667. EXPORT_SYMBOL(vme_new_dma_list);
  668. /*
  669. * Create "Pattern" type attributes
  670. */
  671. struct vme_dma_attr *vme_dma_pattern_attribute(u32 pattern, u32 type)
  672. {
  673. struct vme_dma_attr *attributes;
  674. struct vme_dma_pattern *pattern_attr;
  675. attributes = kmalloc(sizeof(struct vme_dma_attr), GFP_KERNEL);
  676. if (attributes == NULL) {
  677. printk(KERN_ERR "Unable to allocate memory for attributes structure\n");
  678. goto err_attr;
  679. }
  680. pattern_attr = kmalloc(sizeof(struct vme_dma_pattern), GFP_KERNEL);
  681. if (pattern_attr == NULL) {
  682. printk(KERN_ERR "Unable to allocate memory for pattern attributes\n");
  683. goto err_pat;
  684. }
  685. attributes->type = VME_DMA_PATTERN;
  686. attributes->private = (void *)pattern_attr;
  687. pattern_attr->pattern = pattern;
  688. pattern_attr->type = type;
  689. return attributes;
  690. err_pat:
  691. kfree(attributes);
  692. err_attr:
  693. return NULL;
  694. }
  695. EXPORT_SYMBOL(vme_dma_pattern_attribute);
  696. /*
  697. * Create "PCI" type attributes
  698. */
  699. struct vme_dma_attr *vme_dma_pci_attribute(dma_addr_t address)
  700. {
  701. struct vme_dma_attr *attributes;
  702. struct vme_dma_pci *pci_attr;
  703. /* XXX Run some sanity checks here */
  704. attributes = kmalloc(sizeof(struct vme_dma_attr), GFP_KERNEL);
  705. if (attributes == NULL) {
  706. printk(KERN_ERR "Unable to allocate memory for attributes structure\n");
  707. goto err_attr;
  708. }
  709. pci_attr = kmalloc(sizeof(struct vme_dma_pci), GFP_KERNEL);
  710. if (pci_attr == NULL) {
  711. printk(KERN_ERR "Unable to allocate memory for pci attributes\n");
  712. goto err_pci;
  713. }
  714. attributes->type = VME_DMA_PCI;
  715. attributes->private = (void *)pci_attr;
  716. pci_attr->address = address;
  717. return attributes;
  718. err_pci:
  719. kfree(attributes);
  720. err_attr:
  721. return NULL;
  722. }
  723. EXPORT_SYMBOL(vme_dma_pci_attribute);
  724. /*
  725. * Create "VME" type attributes
  726. */
  727. struct vme_dma_attr *vme_dma_vme_attribute(unsigned long long address,
  728. u32 aspace, u32 cycle, u32 dwidth)
  729. {
  730. struct vme_dma_attr *attributes;
  731. struct vme_dma_vme *vme_attr;
  732. attributes = kmalloc(
  733. sizeof(struct vme_dma_attr), GFP_KERNEL);
  734. if (attributes == NULL) {
  735. printk(KERN_ERR "Unable to allocate memory for attributes structure\n");
  736. goto err_attr;
  737. }
  738. vme_attr = kmalloc(sizeof(struct vme_dma_vme), GFP_KERNEL);
  739. if (vme_attr == NULL) {
  740. printk(KERN_ERR "Unable to allocate memory for vme attributes\n");
  741. goto err_vme;
  742. }
  743. attributes->type = VME_DMA_VME;
  744. attributes->private = (void *)vme_attr;
  745. vme_attr->address = address;
  746. vme_attr->aspace = aspace;
  747. vme_attr->cycle = cycle;
  748. vme_attr->dwidth = dwidth;
  749. return attributes;
  750. err_vme:
  751. kfree(attributes);
  752. err_attr:
  753. return NULL;
  754. }
  755. EXPORT_SYMBOL(vme_dma_vme_attribute);
  756. /*
  757. * Free attribute
  758. */
  759. void vme_dma_free_attribute(struct vme_dma_attr *attributes)
  760. {
  761. kfree(attributes->private);
  762. kfree(attributes);
  763. }
  764. EXPORT_SYMBOL(vme_dma_free_attribute);
  765. int vme_dma_list_add(struct vme_dma_list *list, struct vme_dma_attr *src,
  766. struct vme_dma_attr *dest, size_t count)
  767. {
  768. struct vme_bridge *bridge = list->parent->parent;
  769. int retval;
  770. if (bridge->dma_list_add == NULL) {
  771. printk(KERN_WARNING "Link List DMA generation not supported\n");
  772. return -EINVAL;
  773. }
  774. if (!mutex_trylock(&list->mtx)) {
  775. printk(KERN_ERR "Link List already submitted\n");
  776. return -EINVAL;
  777. }
  778. retval = bridge->dma_list_add(list, src, dest, count);
  779. mutex_unlock(&list->mtx);
  780. return retval;
  781. }
  782. EXPORT_SYMBOL(vme_dma_list_add);
  783. int vme_dma_list_exec(struct vme_dma_list *list)
  784. {
  785. struct vme_bridge *bridge = list->parent->parent;
  786. int retval;
  787. if (bridge->dma_list_exec == NULL) {
  788. printk(KERN_ERR "Link List DMA execution not supported\n");
  789. return -EINVAL;
  790. }
  791. mutex_lock(&list->mtx);
  792. retval = bridge->dma_list_exec(list);
  793. mutex_unlock(&list->mtx);
  794. return retval;
  795. }
  796. EXPORT_SYMBOL(vme_dma_list_exec);
  797. int vme_dma_list_free(struct vme_dma_list *list)
  798. {
  799. struct vme_bridge *bridge = list->parent->parent;
  800. int retval;
  801. if (bridge->dma_list_empty == NULL) {
  802. printk(KERN_WARNING "Emptying of Link Lists not supported\n");
  803. return -EINVAL;
  804. }
  805. if (!mutex_trylock(&list->mtx)) {
  806. printk(KERN_ERR "Link List in use\n");
  807. return -EINVAL;
  808. }
  809. /*
  810. * Empty out all of the entries from the dma list. We need to go to the
  811. * low level driver as dma entries are driver specific.
  812. */
  813. retval = bridge->dma_list_empty(list);
  814. if (retval) {
  815. printk(KERN_ERR "Unable to empty link-list entries\n");
  816. mutex_unlock(&list->mtx);
  817. return retval;
  818. }
  819. mutex_unlock(&list->mtx);
  820. kfree(list);
  821. return retval;
  822. }
  823. EXPORT_SYMBOL(vme_dma_list_free);
  824. int vme_dma_free(struct vme_resource *resource)
  825. {
  826. struct vme_dma_resource *ctrlr;
  827. if (resource->type != VME_DMA) {
  828. printk(KERN_ERR "Not a DMA resource\n");
  829. return -EINVAL;
  830. }
  831. ctrlr = list_entry(resource->entry, struct vme_dma_resource, list);
  832. if (!mutex_trylock(&ctrlr->mtx)) {
  833. printk(KERN_ERR "Resource busy, can't free\n");
  834. return -EBUSY;
  835. }
  836. if (!(list_empty(&ctrlr->pending) && list_empty(&ctrlr->running))) {
  837. printk(KERN_WARNING "Resource still processing transfers\n");
  838. mutex_unlock(&ctrlr->mtx);
  839. return -EBUSY;
  840. }
  841. ctrlr->locked = 0;
  842. mutex_unlock(&ctrlr->mtx);
  843. kfree(resource);
  844. return 0;
  845. }
  846. EXPORT_SYMBOL(vme_dma_free);
  847. void vme_bus_error_handler(struct vme_bridge *bridge,
  848. unsigned long long address, int am)
  849. {
  850. struct list_head *handler_pos = NULL;
  851. struct vme_error_handler *handler;
  852. int handler_triggered = 0;
  853. u32 aspace = vme_get_aspace(am);
  854. list_for_each(handler_pos, &bridge->vme_error_handlers) {
  855. handler = list_entry(handler_pos, struct vme_error_handler,
  856. list);
  857. if ((aspace == handler->aspace) &&
  858. (address >= handler->start) &&
  859. (address < handler->end)) {
  860. if (!handler->num_errors)
  861. handler->first_error = address;
  862. if (handler->num_errors != UINT_MAX)
  863. handler->num_errors++;
  864. handler_triggered = 1;
  865. }
  866. }
  867. if (!handler_triggered)
  868. dev_err(bridge->parent,
  869. "Unhandled VME access error at address 0x%llx\n",
  870. address);
  871. }
  872. EXPORT_SYMBOL(vme_bus_error_handler);
  873. struct vme_error_handler *vme_register_error_handler(
  874. struct vme_bridge *bridge, u32 aspace,
  875. unsigned long long address, size_t len)
  876. {
  877. struct vme_error_handler *handler;
  878. handler = kmalloc(sizeof(*handler), GFP_KERNEL);
  879. if (!handler)
  880. return NULL;
  881. handler->aspace = aspace;
  882. handler->start = address;
  883. handler->end = address + len;
  884. handler->num_errors = 0;
  885. handler->first_error = 0;
  886. list_add_tail(&handler->list, &bridge->vme_error_handlers);
  887. return handler;
  888. }
  889. EXPORT_SYMBOL(vme_register_error_handler);
  890. void vme_unregister_error_handler(struct vme_error_handler *handler)
  891. {
  892. list_del(&handler->list);
  893. kfree(handler);
  894. }
  895. EXPORT_SYMBOL(vme_unregister_error_handler);
  896. void vme_irq_handler(struct vme_bridge *bridge, int level, int statid)
  897. {
  898. void (*call)(int, int, void *);
  899. void *priv_data;
  900. call = bridge->irq[level - 1].callback[statid].func;
  901. priv_data = bridge->irq[level - 1].callback[statid].priv_data;
  902. if (call != NULL)
  903. call(level, statid, priv_data);
  904. else
  905. printk(KERN_WARNING "Spurilous VME interrupt, level:%x, vector:%x\n",
  906. level, statid);
  907. }
  908. EXPORT_SYMBOL(vme_irq_handler);
  909. int vme_irq_request(struct vme_dev *vdev, int level, int statid,
  910. void (*callback)(int, int, void *),
  911. void *priv_data)
  912. {
  913. struct vme_bridge *bridge;
  914. bridge = vdev->bridge;
  915. if (bridge == NULL) {
  916. printk(KERN_ERR "Can't find VME bus\n");
  917. return -EINVAL;
  918. }
  919. if ((level < 1) || (level > 7)) {
  920. printk(KERN_ERR "Invalid interrupt level\n");
  921. return -EINVAL;
  922. }
  923. if (bridge->irq_set == NULL) {
  924. printk(KERN_ERR "Configuring interrupts not supported\n");
  925. return -EINVAL;
  926. }
  927. mutex_lock(&bridge->irq_mtx);
  928. if (bridge->irq[level - 1].callback[statid].func) {
  929. mutex_unlock(&bridge->irq_mtx);
  930. printk(KERN_WARNING "VME Interrupt already taken\n");
  931. return -EBUSY;
  932. }
  933. bridge->irq[level - 1].count++;
  934. bridge->irq[level - 1].callback[statid].priv_data = priv_data;
  935. bridge->irq[level - 1].callback[statid].func = callback;
  936. /* Enable IRQ level */
  937. bridge->irq_set(bridge, level, 1, 1);
  938. mutex_unlock(&bridge->irq_mtx);
  939. return 0;
  940. }
  941. EXPORT_SYMBOL(vme_irq_request);
  942. void vme_irq_free(struct vme_dev *vdev, int level, int statid)
  943. {
  944. struct vme_bridge *bridge;
  945. bridge = vdev->bridge;
  946. if (bridge == NULL) {
  947. printk(KERN_ERR "Can't find VME bus\n");
  948. return;
  949. }
  950. if ((level < 1) || (level > 7)) {
  951. printk(KERN_ERR "Invalid interrupt level\n");
  952. return;
  953. }
  954. if (bridge->irq_set == NULL) {
  955. printk(KERN_ERR "Configuring interrupts not supported\n");
  956. return;
  957. }
  958. mutex_lock(&bridge->irq_mtx);
  959. bridge->irq[level - 1].count--;
  960. /* Disable IRQ level if no more interrupts attached at this level*/
  961. if (bridge->irq[level - 1].count == 0)
  962. bridge->irq_set(bridge, level, 0, 1);
  963. bridge->irq[level - 1].callback[statid].func = NULL;
  964. bridge->irq[level - 1].callback[statid].priv_data = NULL;
  965. mutex_unlock(&bridge->irq_mtx);
  966. }
  967. EXPORT_SYMBOL(vme_irq_free);
  968. int vme_irq_generate(struct vme_dev *vdev, int level, int statid)
  969. {
  970. struct vme_bridge *bridge;
  971. bridge = vdev->bridge;
  972. if (bridge == NULL) {
  973. printk(KERN_ERR "Can't find VME bus\n");
  974. return -EINVAL;
  975. }
  976. if ((level < 1) || (level > 7)) {
  977. printk(KERN_WARNING "Invalid interrupt level\n");
  978. return -EINVAL;
  979. }
  980. if (bridge->irq_generate == NULL) {
  981. printk(KERN_WARNING "Interrupt generation not supported\n");
  982. return -EINVAL;
  983. }
  984. return bridge->irq_generate(bridge, level, statid);
  985. }
  986. EXPORT_SYMBOL(vme_irq_generate);
  987. /*
  988. * Request the location monitor, return resource or NULL
  989. */
  990. struct vme_resource *vme_lm_request(struct vme_dev *vdev)
  991. {
  992. struct vme_bridge *bridge;
  993. struct list_head *lm_pos = NULL;
  994. struct vme_lm_resource *allocated_lm = NULL;
  995. struct vme_lm_resource *lm = NULL;
  996. struct vme_resource *resource = NULL;
  997. bridge = vdev->bridge;
  998. if (bridge == NULL) {
  999. printk(KERN_ERR "Can't find VME bus\n");
  1000. goto err_bus;
  1001. }
  1002. /* Loop through DMA resources */
  1003. list_for_each(lm_pos, &bridge->lm_resources) {
  1004. lm = list_entry(lm_pos,
  1005. struct vme_lm_resource, list);
  1006. if (lm == NULL) {
  1007. printk(KERN_ERR "Registered NULL Location Monitor resource\n");
  1008. continue;
  1009. }
  1010. /* Find an unlocked controller */
  1011. mutex_lock(&lm->mtx);
  1012. if (lm->locked == 0) {
  1013. lm->locked = 1;
  1014. mutex_unlock(&lm->mtx);
  1015. allocated_lm = lm;
  1016. break;
  1017. }
  1018. mutex_unlock(&lm->mtx);
  1019. }
  1020. /* Check to see if we found a resource */
  1021. if (allocated_lm == NULL)
  1022. goto err_lm;
  1023. resource = kmalloc(sizeof(struct vme_resource), GFP_KERNEL);
  1024. if (resource == NULL) {
  1025. printk(KERN_ERR "Unable to allocate resource structure\n");
  1026. goto err_alloc;
  1027. }
  1028. resource->type = VME_LM;
  1029. resource->entry = &allocated_lm->list;
  1030. return resource;
  1031. err_alloc:
  1032. /* Unlock image */
  1033. mutex_lock(&lm->mtx);
  1034. lm->locked = 0;
  1035. mutex_unlock(&lm->mtx);
  1036. err_lm:
  1037. err_bus:
  1038. return NULL;
  1039. }
  1040. EXPORT_SYMBOL(vme_lm_request);
  1041. int vme_lm_count(struct vme_resource *resource)
  1042. {
  1043. struct vme_lm_resource *lm;
  1044. if (resource->type != VME_LM) {
  1045. printk(KERN_ERR "Not a Location Monitor resource\n");
  1046. return -EINVAL;
  1047. }
  1048. lm = list_entry(resource->entry, struct vme_lm_resource, list);
  1049. return lm->monitors;
  1050. }
  1051. EXPORT_SYMBOL(vme_lm_count);
  1052. int vme_lm_set(struct vme_resource *resource, unsigned long long lm_base,
  1053. u32 aspace, u32 cycle)
  1054. {
  1055. struct vme_bridge *bridge = find_bridge(resource);
  1056. struct vme_lm_resource *lm;
  1057. if (resource->type != VME_LM) {
  1058. printk(KERN_ERR "Not a Location Monitor resource\n");
  1059. return -EINVAL;
  1060. }
  1061. lm = list_entry(resource->entry, struct vme_lm_resource, list);
  1062. if (bridge->lm_set == NULL) {
  1063. printk(KERN_ERR "vme_lm_set not supported\n");
  1064. return -EINVAL;
  1065. }
  1066. return bridge->lm_set(lm, lm_base, aspace, cycle);
  1067. }
  1068. EXPORT_SYMBOL(vme_lm_set);
  1069. int vme_lm_get(struct vme_resource *resource, unsigned long long *lm_base,
  1070. u32 *aspace, u32 *cycle)
  1071. {
  1072. struct vme_bridge *bridge = find_bridge(resource);
  1073. struct vme_lm_resource *lm;
  1074. if (resource->type != VME_LM) {
  1075. printk(KERN_ERR "Not a Location Monitor resource\n");
  1076. return -EINVAL;
  1077. }
  1078. lm = list_entry(resource->entry, struct vme_lm_resource, list);
  1079. if (bridge->lm_get == NULL) {
  1080. printk(KERN_ERR "vme_lm_get not supported\n");
  1081. return -EINVAL;
  1082. }
  1083. return bridge->lm_get(lm, lm_base, aspace, cycle);
  1084. }
  1085. EXPORT_SYMBOL(vme_lm_get);
  1086. int vme_lm_attach(struct vme_resource *resource, int monitor,
  1087. void (*callback)(int))
  1088. {
  1089. struct vme_bridge *bridge = find_bridge(resource);
  1090. struct vme_lm_resource *lm;
  1091. if (resource->type != VME_LM) {
  1092. printk(KERN_ERR "Not a Location Monitor resource\n");
  1093. return -EINVAL;
  1094. }
  1095. lm = list_entry(resource->entry, struct vme_lm_resource, list);
  1096. if (bridge->lm_attach == NULL) {
  1097. printk(KERN_ERR "vme_lm_attach not supported\n");
  1098. return -EINVAL;
  1099. }
  1100. return bridge->lm_attach(lm, monitor, callback);
  1101. }
  1102. EXPORT_SYMBOL(vme_lm_attach);
  1103. int vme_lm_detach(struct vme_resource *resource, int monitor)
  1104. {
  1105. struct vme_bridge *bridge = find_bridge(resource);
  1106. struct vme_lm_resource *lm;
  1107. if (resource->type != VME_LM) {
  1108. printk(KERN_ERR "Not a Location Monitor resource\n");
  1109. return -EINVAL;
  1110. }
  1111. lm = list_entry(resource->entry, struct vme_lm_resource, list);
  1112. if (bridge->lm_detach == NULL) {
  1113. printk(KERN_ERR "vme_lm_detach not supported\n");
  1114. return -EINVAL;
  1115. }
  1116. return bridge->lm_detach(lm, monitor);
  1117. }
  1118. EXPORT_SYMBOL(vme_lm_detach);
  1119. void vme_lm_free(struct vme_resource *resource)
  1120. {
  1121. struct vme_lm_resource *lm;
  1122. if (resource->type != VME_LM) {
  1123. printk(KERN_ERR "Not a Location Monitor resource\n");
  1124. return;
  1125. }
  1126. lm = list_entry(resource->entry, struct vme_lm_resource, list);
  1127. mutex_lock(&lm->mtx);
  1128. /* XXX
  1129. * Check to see that there aren't any callbacks still attached, if
  1130. * there are we should probably be detaching them!
  1131. */
  1132. lm->locked = 0;
  1133. mutex_unlock(&lm->mtx);
  1134. kfree(resource);
  1135. }
  1136. EXPORT_SYMBOL(vme_lm_free);
  1137. int vme_slot_num(struct vme_dev *vdev)
  1138. {
  1139. struct vme_bridge *bridge;
  1140. bridge = vdev->bridge;
  1141. if (bridge == NULL) {
  1142. printk(KERN_ERR "Can't find VME bus\n");
  1143. return -EINVAL;
  1144. }
  1145. if (bridge->slot_get == NULL) {
  1146. printk(KERN_WARNING "vme_slot_num not supported\n");
  1147. return -EINVAL;
  1148. }
  1149. return bridge->slot_get(bridge);
  1150. }
  1151. EXPORT_SYMBOL(vme_slot_num);
  1152. int vme_bus_num(struct vme_dev *vdev)
  1153. {
  1154. struct vme_bridge *bridge;
  1155. bridge = vdev->bridge;
  1156. if (bridge == NULL) {
  1157. pr_err("Can't find VME bus\n");
  1158. return -EINVAL;
  1159. }
  1160. return bridge->num;
  1161. }
  1162. EXPORT_SYMBOL(vme_bus_num);
  1163. /* - Bridge Registration --------------------------------------------------- */
  1164. static void vme_dev_release(struct device *dev)
  1165. {
  1166. kfree(dev_to_vme_dev(dev));
  1167. }
  1168. int vme_register_bridge(struct vme_bridge *bridge)
  1169. {
  1170. int i;
  1171. int ret = -1;
  1172. mutex_lock(&vme_buses_lock);
  1173. for (i = 0; i < sizeof(vme_bus_numbers) * 8; i++) {
  1174. if ((vme_bus_numbers & (1 << i)) == 0) {
  1175. vme_bus_numbers |= (1 << i);
  1176. bridge->num = i;
  1177. INIT_LIST_HEAD(&bridge->devices);
  1178. list_add_tail(&bridge->bus_list, &vme_bus_list);
  1179. ret = 0;
  1180. break;
  1181. }
  1182. }
  1183. mutex_unlock(&vme_buses_lock);
  1184. return ret;
  1185. }
  1186. EXPORT_SYMBOL(vme_register_bridge);
  1187. void vme_unregister_bridge(struct vme_bridge *bridge)
  1188. {
  1189. struct vme_dev *vdev;
  1190. struct vme_dev *tmp;
  1191. mutex_lock(&vme_buses_lock);
  1192. vme_bus_numbers &= ~(1 << bridge->num);
  1193. list_for_each_entry_safe(vdev, tmp, &bridge->devices, bridge_list) {
  1194. list_del(&vdev->drv_list);
  1195. list_del(&vdev->bridge_list);
  1196. device_unregister(&vdev->dev);
  1197. }
  1198. list_del(&bridge->bus_list);
  1199. mutex_unlock(&vme_buses_lock);
  1200. }
  1201. EXPORT_SYMBOL(vme_unregister_bridge);
  1202. /* - Driver Registration --------------------------------------------------- */
  1203. static int __vme_register_driver_bus(struct vme_driver *drv,
  1204. struct vme_bridge *bridge, unsigned int ndevs)
  1205. {
  1206. int err;
  1207. unsigned int i;
  1208. struct vme_dev *vdev;
  1209. struct vme_dev *tmp;
  1210. for (i = 0; i < ndevs; i++) {
  1211. vdev = kzalloc(sizeof(struct vme_dev), GFP_KERNEL);
  1212. if (!vdev) {
  1213. err = -ENOMEM;
  1214. goto err_devalloc;
  1215. }
  1216. vdev->num = i;
  1217. vdev->bridge = bridge;
  1218. vdev->dev.platform_data = drv;
  1219. vdev->dev.release = vme_dev_release;
  1220. vdev->dev.parent = bridge->parent;
  1221. vdev->dev.bus = &vme_bus_type;
  1222. dev_set_name(&vdev->dev, "%s.%u-%u", drv->name, bridge->num,
  1223. vdev->num);
  1224. err = device_register(&vdev->dev);
  1225. if (err)
  1226. goto err_reg;
  1227. if (vdev->dev.platform_data) {
  1228. list_add_tail(&vdev->drv_list, &drv->devices);
  1229. list_add_tail(&vdev->bridge_list, &bridge->devices);
  1230. } else
  1231. device_unregister(&vdev->dev);
  1232. }
  1233. return 0;
  1234. err_reg:
  1235. put_device(&vdev->dev);
  1236. kfree(vdev);
  1237. err_devalloc:
  1238. list_for_each_entry_safe(vdev, tmp, &drv->devices, drv_list) {
  1239. list_del(&vdev->drv_list);
  1240. list_del(&vdev->bridge_list);
  1241. device_unregister(&vdev->dev);
  1242. }
  1243. return err;
  1244. }
  1245. static int __vme_register_driver(struct vme_driver *drv, unsigned int ndevs)
  1246. {
  1247. struct vme_bridge *bridge;
  1248. int err = 0;
  1249. mutex_lock(&vme_buses_lock);
  1250. list_for_each_entry(bridge, &vme_bus_list, bus_list) {
  1251. /*
  1252. * This cannot cause trouble as we already have vme_buses_lock
  1253. * and if the bridge is removed, it will have to go through
  1254. * vme_unregister_bridge() to do it (which calls remove() on
  1255. * the bridge which in turn tries to acquire vme_buses_lock and
  1256. * will have to wait).
  1257. */
  1258. err = __vme_register_driver_bus(drv, bridge, ndevs);
  1259. if (err)
  1260. break;
  1261. }
  1262. mutex_unlock(&vme_buses_lock);
  1263. return err;
  1264. }
  1265. int vme_register_driver(struct vme_driver *drv, unsigned int ndevs)
  1266. {
  1267. int err;
  1268. drv->driver.name = drv->name;
  1269. drv->driver.bus = &vme_bus_type;
  1270. INIT_LIST_HEAD(&drv->devices);
  1271. err = driver_register(&drv->driver);
  1272. if (err)
  1273. return err;
  1274. err = __vme_register_driver(drv, ndevs);
  1275. if (err)
  1276. driver_unregister(&drv->driver);
  1277. return err;
  1278. }
  1279. EXPORT_SYMBOL(vme_register_driver);
  1280. void vme_unregister_driver(struct vme_driver *drv)
  1281. {
  1282. struct vme_dev *dev, *dev_tmp;
  1283. mutex_lock(&vme_buses_lock);
  1284. list_for_each_entry_safe(dev, dev_tmp, &drv->devices, drv_list) {
  1285. list_del(&dev->drv_list);
  1286. list_del(&dev->bridge_list);
  1287. device_unregister(&dev->dev);
  1288. }
  1289. mutex_unlock(&vme_buses_lock);
  1290. driver_unregister(&drv->driver);
  1291. }
  1292. EXPORT_SYMBOL(vme_unregister_driver);
  1293. /* - Bus Registration ------------------------------------------------------ */
  1294. static int vme_bus_match(struct device *dev, struct device_driver *drv)
  1295. {
  1296. struct vme_driver *vme_drv;
  1297. vme_drv = container_of(drv, struct vme_driver, driver);
  1298. if (dev->platform_data == vme_drv) {
  1299. struct vme_dev *vdev = dev_to_vme_dev(dev);
  1300. if (vme_drv->match && vme_drv->match(vdev))
  1301. return 1;
  1302. dev->platform_data = NULL;
  1303. }
  1304. return 0;
  1305. }
  1306. static int vme_bus_probe(struct device *dev)
  1307. {
  1308. int retval = -ENODEV;
  1309. struct vme_driver *driver;
  1310. struct vme_dev *vdev = dev_to_vme_dev(dev);
  1311. driver = dev->platform_data;
  1312. if (driver->probe != NULL)
  1313. retval = driver->probe(vdev);
  1314. return retval;
  1315. }
  1316. static int vme_bus_remove(struct device *dev)
  1317. {
  1318. int retval = -ENODEV;
  1319. struct vme_driver *driver;
  1320. struct vme_dev *vdev = dev_to_vme_dev(dev);
  1321. driver = dev->platform_data;
  1322. if (driver->remove != NULL)
  1323. retval = driver->remove(vdev);
  1324. return retval;
  1325. }
  1326. struct bus_type vme_bus_type = {
  1327. .name = "vme",
  1328. .match = vme_bus_match,
  1329. .probe = vme_bus_probe,
  1330. .remove = vme_bus_remove,
  1331. };
  1332. EXPORT_SYMBOL(vme_bus_type);
  1333. static int __init vme_init(void)
  1334. {
  1335. return bus_register(&vme_bus_type);
  1336. }
  1337. static void __exit vme_exit(void)
  1338. {
  1339. bus_unregister(&vme_bus_type);
  1340. }
  1341. subsys_initcall(vme_init);
  1342. module_exit(vme_exit);