vme.c 35 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554
  1. /*
  2. * VME Bridge Framework
  3. *
  4. * Author: Martyn Welch <martyn.welch@ge.com>
  5. * Copyright 2008 GE Intelligent Platforms Embedded Systems, Inc.
  6. *
  7. * Based on work by Tom Armistead and Ajit Prem
  8. * Copyright 2004 Motorola Inc.
  9. *
  10. * This program is free software; you can redistribute it and/or modify it
  11. * under the terms of the GNU General Public License as published by the
  12. * Free Software Foundation; either version 2 of the License, or (at your
  13. * option) any later version.
  14. */
  15. #include <linux/module.h>
  16. #include <linux/moduleparam.h>
  17. #include <linux/mm.h>
  18. #include <linux/types.h>
  19. #include <linux/kernel.h>
  20. #include <linux/errno.h>
  21. #include <linux/pci.h>
  22. #include <linux/poll.h>
  23. #include <linux/highmem.h>
  24. #include <linux/interrupt.h>
  25. #include <linux/pagemap.h>
  26. #include <linux/device.h>
  27. #include <linux/dma-mapping.h>
  28. #include <linux/syscalls.h>
  29. #include <linux/mutex.h>
  30. #include <linux/spinlock.h>
  31. #include <linux/slab.h>
  32. #include <linux/vme.h>
  33. #include "vme_bridge.h"
  34. /* Bitmask and list of registered buses both protected by common mutex */
  35. static unsigned int vme_bus_numbers;
  36. static LIST_HEAD(vme_bus_list);
  37. static DEFINE_MUTEX(vme_buses_lock);
  38. static void __exit vme_exit(void);
  39. static int __init vme_init(void);
  40. static struct vme_dev *dev_to_vme_dev(struct device *dev)
  41. {
  42. return container_of(dev, struct vme_dev, dev);
  43. }
  44. /*
  45. * Find the bridge that the resource is associated with.
  46. */
  47. static struct vme_bridge *find_bridge(struct vme_resource *resource)
  48. {
  49. /* Get list to search */
  50. switch (resource->type) {
  51. case VME_MASTER:
  52. return list_entry(resource->entry, struct vme_master_resource,
  53. list)->parent;
  54. break;
  55. case VME_SLAVE:
  56. return list_entry(resource->entry, struct vme_slave_resource,
  57. list)->parent;
  58. break;
  59. case VME_DMA:
  60. return list_entry(resource->entry, struct vme_dma_resource,
  61. list)->parent;
  62. break;
  63. case VME_LM:
  64. return list_entry(resource->entry, struct vme_lm_resource,
  65. list)->parent;
  66. break;
  67. default:
  68. printk(KERN_ERR "Unknown resource type\n");
  69. return NULL;
  70. break;
  71. }
  72. }
  73. /*
  74. * Allocate a contiguous block of memory for use by the driver. This is used to
  75. * create the buffers for the slave windows.
  76. */
  77. void *vme_alloc_consistent(struct vme_resource *resource, size_t size,
  78. dma_addr_t *dma)
  79. {
  80. struct vme_bridge *bridge;
  81. if (resource == NULL) {
  82. printk(KERN_ERR "No resource\n");
  83. return NULL;
  84. }
  85. bridge = find_bridge(resource);
  86. if (bridge == NULL) {
  87. printk(KERN_ERR "Can't find bridge\n");
  88. return NULL;
  89. }
  90. if (bridge->parent == NULL) {
  91. printk(KERN_ERR "Dev entry NULL for bridge %s\n", bridge->name);
  92. return NULL;
  93. }
  94. if (bridge->alloc_consistent == NULL) {
  95. printk(KERN_ERR "alloc_consistent not supported by bridge %s\n",
  96. bridge->name);
  97. return NULL;
  98. }
  99. return bridge->alloc_consistent(bridge->parent, size, dma);
  100. }
  101. EXPORT_SYMBOL(vme_alloc_consistent);
  102. /*
  103. * Free previously allocated contiguous block of memory.
  104. */
  105. void vme_free_consistent(struct vme_resource *resource, size_t size,
  106. void *vaddr, dma_addr_t dma)
  107. {
  108. struct vme_bridge *bridge;
  109. if (resource == NULL) {
  110. printk(KERN_ERR "No resource\n");
  111. return;
  112. }
  113. bridge = find_bridge(resource);
  114. if (bridge == NULL) {
  115. printk(KERN_ERR "Can't find bridge\n");
  116. return;
  117. }
  118. if (bridge->parent == NULL) {
  119. printk(KERN_ERR "Dev entry NULL for bridge %s\n", bridge->name);
  120. return;
  121. }
  122. if (bridge->free_consistent == NULL) {
  123. printk(KERN_ERR "free_consistent not supported by bridge %s\n",
  124. bridge->name);
  125. return;
  126. }
  127. bridge->free_consistent(bridge->parent, size, vaddr, dma);
  128. }
  129. EXPORT_SYMBOL(vme_free_consistent);
  130. size_t vme_get_size(struct vme_resource *resource)
  131. {
  132. int enabled, retval;
  133. unsigned long long base, size;
  134. dma_addr_t buf_base;
  135. u32 aspace, cycle, dwidth;
  136. switch (resource->type) {
  137. case VME_MASTER:
  138. retval = vme_master_get(resource, &enabled, &base, &size,
  139. &aspace, &cycle, &dwidth);
  140. return size;
  141. break;
  142. case VME_SLAVE:
  143. retval = vme_slave_get(resource, &enabled, &base, &size,
  144. &buf_base, &aspace, &cycle);
  145. return size;
  146. break;
  147. case VME_DMA:
  148. return 0;
  149. break;
  150. default:
  151. printk(KERN_ERR "Unknown resource type\n");
  152. return 0;
  153. break;
  154. }
  155. }
  156. EXPORT_SYMBOL(vme_get_size);
  157. int vme_check_window(u32 aspace, unsigned long long vme_base,
  158. unsigned long long size)
  159. {
  160. int retval = 0;
  161. switch (aspace) {
  162. case VME_A16:
  163. if (((vme_base + size) > VME_A16_MAX) ||
  164. (vme_base > VME_A16_MAX))
  165. retval = -EFAULT;
  166. break;
  167. case VME_A24:
  168. if (((vme_base + size) > VME_A24_MAX) ||
  169. (vme_base > VME_A24_MAX))
  170. retval = -EFAULT;
  171. break;
  172. case VME_A32:
  173. if (((vme_base + size) > VME_A32_MAX) ||
  174. (vme_base > VME_A32_MAX))
  175. retval = -EFAULT;
  176. break;
  177. case VME_A64:
  178. if ((size != 0) && (vme_base > U64_MAX + 1 - size))
  179. retval = -EFAULT;
  180. break;
  181. case VME_CRCSR:
  182. if (((vme_base + size) > VME_CRCSR_MAX) ||
  183. (vme_base > VME_CRCSR_MAX))
  184. retval = -EFAULT;
  185. break;
  186. case VME_USER1:
  187. case VME_USER2:
  188. case VME_USER3:
  189. case VME_USER4:
  190. /* User Defined */
  191. break;
  192. default:
  193. printk(KERN_ERR "Invalid address space\n");
  194. retval = -EINVAL;
  195. break;
  196. }
  197. return retval;
  198. }
  199. EXPORT_SYMBOL(vme_check_window);
  200. /*
  201. * Request a slave image with specific attributes, return some unique
  202. * identifier.
  203. */
  204. struct vme_resource *vme_slave_request(struct vme_dev *vdev, u32 address,
  205. u32 cycle)
  206. {
  207. struct vme_bridge *bridge;
  208. struct list_head *slave_pos = NULL;
  209. struct vme_slave_resource *allocated_image = NULL;
  210. struct vme_slave_resource *slave_image = NULL;
  211. struct vme_resource *resource = NULL;
  212. bridge = vdev->bridge;
  213. if (bridge == NULL) {
  214. printk(KERN_ERR "Can't find VME bus\n");
  215. goto err_bus;
  216. }
  217. /* Loop through slave resources */
  218. list_for_each(slave_pos, &bridge->slave_resources) {
  219. slave_image = list_entry(slave_pos,
  220. struct vme_slave_resource, list);
  221. if (slave_image == NULL) {
  222. printk(KERN_ERR "Registered NULL Slave resource\n");
  223. continue;
  224. }
  225. /* Find an unlocked and compatible image */
  226. mutex_lock(&slave_image->mtx);
  227. if (((slave_image->address_attr & address) == address) &&
  228. ((slave_image->cycle_attr & cycle) == cycle) &&
  229. (slave_image->locked == 0)) {
  230. slave_image->locked = 1;
  231. mutex_unlock(&slave_image->mtx);
  232. allocated_image = slave_image;
  233. break;
  234. }
  235. mutex_unlock(&slave_image->mtx);
  236. }
  237. /* No free image */
  238. if (allocated_image == NULL)
  239. goto err_image;
  240. resource = kmalloc(sizeof(struct vme_resource), GFP_KERNEL);
  241. if (resource == NULL) {
  242. printk(KERN_WARNING "Unable to allocate resource structure\n");
  243. goto err_alloc;
  244. }
  245. resource->type = VME_SLAVE;
  246. resource->entry = &allocated_image->list;
  247. return resource;
  248. err_alloc:
  249. /* Unlock image */
  250. mutex_lock(&slave_image->mtx);
  251. slave_image->locked = 0;
  252. mutex_unlock(&slave_image->mtx);
  253. err_image:
  254. err_bus:
  255. return NULL;
  256. }
  257. EXPORT_SYMBOL(vme_slave_request);
  258. int vme_slave_set(struct vme_resource *resource, int enabled,
  259. unsigned long long vme_base, unsigned long long size,
  260. dma_addr_t buf_base, u32 aspace, u32 cycle)
  261. {
  262. struct vme_bridge *bridge = find_bridge(resource);
  263. struct vme_slave_resource *image;
  264. int retval;
  265. if (resource->type != VME_SLAVE) {
  266. printk(KERN_ERR "Not a slave resource\n");
  267. return -EINVAL;
  268. }
  269. image = list_entry(resource->entry, struct vme_slave_resource, list);
  270. if (bridge->slave_set == NULL) {
  271. printk(KERN_ERR "Function not supported\n");
  272. return -ENOSYS;
  273. }
  274. if (!(((image->address_attr & aspace) == aspace) &&
  275. ((image->cycle_attr & cycle) == cycle))) {
  276. printk(KERN_ERR "Invalid attributes\n");
  277. return -EINVAL;
  278. }
  279. retval = vme_check_window(aspace, vme_base, size);
  280. if (retval)
  281. return retval;
  282. return bridge->slave_set(image, enabled, vme_base, size, buf_base,
  283. aspace, cycle);
  284. }
  285. EXPORT_SYMBOL(vme_slave_set);
  286. int vme_slave_get(struct vme_resource *resource, int *enabled,
  287. unsigned long long *vme_base, unsigned long long *size,
  288. dma_addr_t *buf_base, u32 *aspace, u32 *cycle)
  289. {
  290. struct vme_bridge *bridge = find_bridge(resource);
  291. struct vme_slave_resource *image;
  292. if (resource->type != VME_SLAVE) {
  293. printk(KERN_ERR "Not a slave resource\n");
  294. return -EINVAL;
  295. }
  296. image = list_entry(resource->entry, struct vme_slave_resource, list);
  297. if (bridge->slave_get == NULL) {
  298. printk(KERN_ERR "vme_slave_get not supported\n");
  299. return -EINVAL;
  300. }
  301. return bridge->slave_get(image, enabled, vme_base, size, buf_base,
  302. aspace, cycle);
  303. }
  304. EXPORT_SYMBOL(vme_slave_get);
  305. void vme_slave_free(struct vme_resource *resource)
  306. {
  307. struct vme_slave_resource *slave_image;
  308. if (resource->type != VME_SLAVE) {
  309. printk(KERN_ERR "Not a slave resource\n");
  310. return;
  311. }
  312. slave_image = list_entry(resource->entry, struct vme_slave_resource,
  313. list);
  314. if (slave_image == NULL) {
  315. printk(KERN_ERR "Can't find slave resource\n");
  316. return;
  317. }
  318. /* Unlock image */
  319. mutex_lock(&slave_image->mtx);
  320. if (slave_image->locked == 0)
  321. printk(KERN_ERR "Image is already free\n");
  322. slave_image->locked = 0;
  323. mutex_unlock(&slave_image->mtx);
  324. /* Free up resource memory */
  325. kfree(resource);
  326. }
  327. EXPORT_SYMBOL(vme_slave_free);
  328. /*
  329. * Request a master image with specific attributes, return some unique
  330. * identifier.
  331. */
  332. struct vme_resource *vme_master_request(struct vme_dev *vdev, u32 address,
  333. u32 cycle, u32 dwidth)
  334. {
  335. struct vme_bridge *bridge;
  336. struct list_head *master_pos = NULL;
  337. struct vme_master_resource *allocated_image = NULL;
  338. struct vme_master_resource *master_image = NULL;
  339. struct vme_resource *resource = NULL;
  340. bridge = vdev->bridge;
  341. if (bridge == NULL) {
  342. printk(KERN_ERR "Can't find VME bus\n");
  343. goto err_bus;
  344. }
  345. /* Loop through master resources */
  346. list_for_each(master_pos, &bridge->master_resources) {
  347. master_image = list_entry(master_pos,
  348. struct vme_master_resource, list);
  349. if (master_image == NULL) {
  350. printk(KERN_WARNING "Registered NULL master resource\n");
  351. continue;
  352. }
  353. /* Find an unlocked and compatible image */
  354. spin_lock(&master_image->lock);
  355. if (((master_image->address_attr & address) == address) &&
  356. ((master_image->cycle_attr & cycle) == cycle) &&
  357. ((master_image->width_attr & dwidth) == dwidth) &&
  358. (master_image->locked == 0)) {
  359. master_image->locked = 1;
  360. spin_unlock(&master_image->lock);
  361. allocated_image = master_image;
  362. break;
  363. }
  364. spin_unlock(&master_image->lock);
  365. }
  366. /* Check to see if we found a resource */
  367. if (allocated_image == NULL) {
  368. printk(KERN_ERR "Can't find a suitable resource\n");
  369. goto err_image;
  370. }
  371. resource = kmalloc(sizeof(struct vme_resource), GFP_KERNEL);
  372. if (resource == NULL) {
  373. printk(KERN_ERR "Unable to allocate resource structure\n");
  374. goto err_alloc;
  375. }
  376. resource->type = VME_MASTER;
  377. resource->entry = &allocated_image->list;
  378. return resource;
  379. err_alloc:
  380. /* Unlock image */
  381. spin_lock(&master_image->lock);
  382. master_image->locked = 0;
  383. spin_unlock(&master_image->lock);
  384. err_image:
  385. err_bus:
  386. return NULL;
  387. }
  388. EXPORT_SYMBOL(vme_master_request);
  389. int vme_master_set(struct vme_resource *resource, int enabled,
  390. unsigned long long vme_base, unsigned long long size, u32 aspace,
  391. u32 cycle, u32 dwidth)
  392. {
  393. struct vme_bridge *bridge = find_bridge(resource);
  394. struct vme_master_resource *image;
  395. int retval;
  396. if (resource->type != VME_MASTER) {
  397. printk(KERN_ERR "Not a master resource\n");
  398. return -EINVAL;
  399. }
  400. image = list_entry(resource->entry, struct vme_master_resource, list);
  401. if (bridge->master_set == NULL) {
  402. printk(KERN_WARNING "vme_master_set not supported\n");
  403. return -EINVAL;
  404. }
  405. if (!(((image->address_attr & aspace) == aspace) &&
  406. ((image->cycle_attr & cycle) == cycle) &&
  407. ((image->width_attr & dwidth) == dwidth))) {
  408. printk(KERN_WARNING "Invalid attributes\n");
  409. return -EINVAL;
  410. }
  411. retval = vme_check_window(aspace, vme_base, size);
  412. if (retval)
  413. return retval;
  414. return bridge->master_set(image, enabled, vme_base, size, aspace,
  415. cycle, dwidth);
  416. }
  417. EXPORT_SYMBOL(vme_master_set);
  418. int vme_master_get(struct vme_resource *resource, int *enabled,
  419. unsigned long long *vme_base, unsigned long long *size, u32 *aspace,
  420. u32 *cycle, u32 *dwidth)
  421. {
  422. struct vme_bridge *bridge = find_bridge(resource);
  423. struct vme_master_resource *image;
  424. if (resource->type != VME_MASTER) {
  425. printk(KERN_ERR "Not a master resource\n");
  426. return -EINVAL;
  427. }
  428. image = list_entry(resource->entry, struct vme_master_resource, list);
  429. if (bridge->master_get == NULL) {
  430. printk(KERN_WARNING "%s not supported\n", __func__);
  431. return -EINVAL;
  432. }
  433. return bridge->master_get(image, enabled, vme_base, size, aspace,
  434. cycle, dwidth);
  435. }
  436. EXPORT_SYMBOL(vme_master_get);
  437. /*
  438. * Read data out of VME space into a buffer.
  439. */
  440. ssize_t vme_master_read(struct vme_resource *resource, void *buf, size_t count,
  441. loff_t offset)
  442. {
  443. struct vme_bridge *bridge = find_bridge(resource);
  444. struct vme_master_resource *image;
  445. size_t length;
  446. if (bridge->master_read == NULL) {
  447. printk(KERN_WARNING "Reading from resource not supported\n");
  448. return -EINVAL;
  449. }
  450. if (resource->type != VME_MASTER) {
  451. printk(KERN_ERR "Not a master resource\n");
  452. return -EINVAL;
  453. }
  454. image = list_entry(resource->entry, struct vme_master_resource, list);
  455. length = vme_get_size(resource);
  456. if (offset > length) {
  457. printk(KERN_WARNING "Invalid Offset\n");
  458. return -EFAULT;
  459. }
  460. if ((offset + count) > length)
  461. count = length - offset;
  462. return bridge->master_read(image, buf, count, offset);
  463. }
  464. EXPORT_SYMBOL(vme_master_read);
  465. /*
  466. * Write data out to VME space from a buffer.
  467. */
  468. ssize_t vme_master_write(struct vme_resource *resource, void *buf,
  469. size_t count, loff_t offset)
  470. {
  471. struct vme_bridge *bridge = find_bridge(resource);
  472. struct vme_master_resource *image;
  473. size_t length;
  474. if (bridge->master_write == NULL) {
  475. printk(KERN_WARNING "Writing to resource not supported\n");
  476. return -EINVAL;
  477. }
  478. if (resource->type != VME_MASTER) {
  479. printk(KERN_ERR "Not a master resource\n");
  480. return -EINVAL;
  481. }
  482. image = list_entry(resource->entry, struct vme_master_resource, list);
  483. length = vme_get_size(resource);
  484. if (offset > length) {
  485. printk(KERN_WARNING "Invalid Offset\n");
  486. return -EFAULT;
  487. }
  488. if ((offset + count) > length)
  489. count = length - offset;
  490. return bridge->master_write(image, buf, count, offset);
  491. }
  492. EXPORT_SYMBOL(vme_master_write);
  493. /*
  494. * Perform RMW cycle to provided location.
  495. */
  496. unsigned int vme_master_rmw(struct vme_resource *resource, unsigned int mask,
  497. unsigned int compare, unsigned int swap, loff_t offset)
  498. {
  499. struct vme_bridge *bridge = find_bridge(resource);
  500. struct vme_master_resource *image;
  501. if (bridge->master_rmw == NULL) {
  502. printk(KERN_WARNING "Writing to resource not supported\n");
  503. return -EINVAL;
  504. }
  505. if (resource->type != VME_MASTER) {
  506. printk(KERN_ERR "Not a master resource\n");
  507. return -EINVAL;
  508. }
  509. image = list_entry(resource->entry, struct vme_master_resource, list);
  510. return bridge->master_rmw(image, mask, compare, swap, offset);
  511. }
  512. EXPORT_SYMBOL(vme_master_rmw);
  513. int vme_master_mmap(struct vme_resource *resource, struct vm_area_struct *vma)
  514. {
  515. struct vme_master_resource *image;
  516. phys_addr_t phys_addr;
  517. unsigned long vma_size;
  518. if (resource->type != VME_MASTER) {
  519. pr_err("Not a master resource\n");
  520. return -EINVAL;
  521. }
  522. image = list_entry(resource->entry, struct vme_master_resource, list);
  523. phys_addr = image->bus_resource.start + (vma->vm_pgoff << PAGE_SHIFT);
  524. vma_size = vma->vm_end - vma->vm_start;
  525. if (phys_addr + vma_size > image->bus_resource.end + 1) {
  526. pr_err("Map size cannot exceed the window size\n");
  527. return -EFAULT;
  528. }
  529. vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
  530. return vm_iomap_memory(vma, phys_addr, vma->vm_end - vma->vm_start);
  531. }
  532. EXPORT_SYMBOL(vme_master_mmap);
  533. void vme_master_free(struct vme_resource *resource)
  534. {
  535. struct vme_master_resource *master_image;
  536. if (resource->type != VME_MASTER) {
  537. printk(KERN_ERR "Not a master resource\n");
  538. return;
  539. }
  540. master_image = list_entry(resource->entry, struct vme_master_resource,
  541. list);
  542. if (master_image == NULL) {
  543. printk(KERN_ERR "Can't find master resource\n");
  544. return;
  545. }
  546. /* Unlock image */
  547. spin_lock(&master_image->lock);
  548. if (master_image->locked == 0)
  549. printk(KERN_ERR "Image is already free\n");
  550. master_image->locked = 0;
  551. spin_unlock(&master_image->lock);
  552. /* Free up resource memory */
  553. kfree(resource);
  554. }
  555. EXPORT_SYMBOL(vme_master_free);
  556. /*
  557. * Request a DMA controller with specific attributes, return some unique
  558. * identifier.
  559. */
  560. struct vme_resource *vme_dma_request(struct vme_dev *vdev, u32 route)
  561. {
  562. struct vme_bridge *bridge;
  563. struct list_head *dma_pos = NULL;
  564. struct vme_dma_resource *allocated_ctrlr = NULL;
  565. struct vme_dma_resource *dma_ctrlr = NULL;
  566. struct vme_resource *resource = NULL;
  567. /* XXX Not checking resource attributes */
  568. printk(KERN_ERR "No VME resource Attribute tests done\n");
  569. bridge = vdev->bridge;
  570. if (bridge == NULL) {
  571. printk(KERN_ERR "Can't find VME bus\n");
  572. goto err_bus;
  573. }
  574. /* Loop through DMA resources */
  575. list_for_each(dma_pos, &bridge->dma_resources) {
  576. dma_ctrlr = list_entry(dma_pos,
  577. struct vme_dma_resource, list);
  578. if (dma_ctrlr == NULL) {
  579. printk(KERN_ERR "Registered NULL DMA resource\n");
  580. continue;
  581. }
  582. /* Find an unlocked and compatible controller */
  583. mutex_lock(&dma_ctrlr->mtx);
  584. if (((dma_ctrlr->route_attr & route) == route) &&
  585. (dma_ctrlr->locked == 0)) {
  586. dma_ctrlr->locked = 1;
  587. mutex_unlock(&dma_ctrlr->mtx);
  588. allocated_ctrlr = dma_ctrlr;
  589. break;
  590. }
  591. mutex_unlock(&dma_ctrlr->mtx);
  592. }
  593. /* Check to see if we found a resource */
  594. if (allocated_ctrlr == NULL)
  595. goto err_ctrlr;
  596. resource = kmalloc(sizeof(struct vme_resource), GFP_KERNEL);
  597. if (resource == NULL) {
  598. printk(KERN_WARNING "Unable to allocate resource structure\n");
  599. goto err_alloc;
  600. }
  601. resource->type = VME_DMA;
  602. resource->entry = &allocated_ctrlr->list;
  603. return resource;
  604. err_alloc:
  605. /* Unlock image */
  606. mutex_lock(&dma_ctrlr->mtx);
  607. dma_ctrlr->locked = 0;
  608. mutex_unlock(&dma_ctrlr->mtx);
  609. err_ctrlr:
  610. err_bus:
  611. return NULL;
  612. }
  613. EXPORT_SYMBOL(vme_dma_request);
  614. /*
  615. * Start new list
  616. */
  617. struct vme_dma_list *vme_new_dma_list(struct vme_resource *resource)
  618. {
  619. struct vme_dma_resource *ctrlr;
  620. struct vme_dma_list *dma_list;
  621. if (resource->type != VME_DMA) {
  622. printk(KERN_ERR "Not a DMA resource\n");
  623. return NULL;
  624. }
  625. ctrlr = list_entry(resource->entry, struct vme_dma_resource, list);
  626. dma_list = kmalloc(sizeof(struct vme_dma_list), GFP_KERNEL);
  627. if (dma_list == NULL) {
  628. printk(KERN_ERR "Unable to allocate memory for new dma list\n");
  629. return NULL;
  630. }
  631. INIT_LIST_HEAD(&dma_list->entries);
  632. dma_list->parent = ctrlr;
  633. mutex_init(&dma_list->mtx);
  634. return dma_list;
  635. }
  636. EXPORT_SYMBOL(vme_new_dma_list);
  637. /*
  638. * Create "Pattern" type attributes
  639. */
  640. struct vme_dma_attr *vme_dma_pattern_attribute(u32 pattern, u32 type)
  641. {
  642. struct vme_dma_attr *attributes;
  643. struct vme_dma_pattern *pattern_attr;
  644. attributes = kmalloc(sizeof(struct vme_dma_attr), GFP_KERNEL);
  645. if (attributes == NULL) {
  646. printk(KERN_ERR "Unable to allocate memory for attributes structure\n");
  647. goto err_attr;
  648. }
  649. pattern_attr = kmalloc(sizeof(struct vme_dma_pattern), GFP_KERNEL);
  650. if (pattern_attr == NULL) {
  651. printk(KERN_ERR "Unable to allocate memory for pattern attributes\n");
  652. goto err_pat;
  653. }
  654. attributes->type = VME_DMA_PATTERN;
  655. attributes->private = (void *)pattern_attr;
  656. pattern_attr->pattern = pattern;
  657. pattern_attr->type = type;
  658. return attributes;
  659. err_pat:
  660. kfree(attributes);
  661. err_attr:
  662. return NULL;
  663. }
  664. EXPORT_SYMBOL(vme_dma_pattern_attribute);
  665. /*
  666. * Create "PCI" type attributes
  667. */
  668. struct vme_dma_attr *vme_dma_pci_attribute(dma_addr_t address)
  669. {
  670. struct vme_dma_attr *attributes;
  671. struct vme_dma_pci *pci_attr;
  672. /* XXX Run some sanity checks here */
  673. attributes = kmalloc(sizeof(struct vme_dma_attr), GFP_KERNEL);
  674. if (attributes == NULL) {
  675. printk(KERN_ERR "Unable to allocate memory for attributes structure\n");
  676. goto err_attr;
  677. }
  678. pci_attr = kmalloc(sizeof(struct vme_dma_pci), GFP_KERNEL);
  679. if (pci_attr == NULL) {
  680. printk(KERN_ERR "Unable to allocate memory for pci attributes\n");
  681. goto err_pci;
  682. }
  683. attributes->type = VME_DMA_PCI;
  684. attributes->private = (void *)pci_attr;
  685. pci_attr->address = address;
  686. return attributes;
  687. err_pci:
  688. kfree(attributes);
  689. err_attr:
  690. return NULL;
  691. }
  692. EXPORT_SYMBOL(vme_dma_pci_attribute);
  693. /*
  694. * Create "VME" type attributes
  695. */
  696. struct vme_dma_attr *vme_dma_vme_attribute(unsigned long long address,
  697. u32 aspace, u32 cycle, u32 dwidth)
  698. {
  699. struct vme_dma_attr *attributes;
  700. struct vme_dma_vme *vme_attr;
  701. attributes = kmalloc(
  702. sizeof(struct vme_dma_attr), GFP_KERNEL);
  703. if (attributes == NULL) {
  704. printk(KERN_ERR "Unable to allocate memory for attributes structure\n");
  705. goto err_attr;
  706. }
  707. vme_attr = kmalloc(sizeof(struct vme_dma_vme), GFP_KERNEL);
  708. if (vme_attr == NULL) {
  709. printk(KERN_ERR "Unable to allocate memory for vme attributes\n");
  710. goto err_vme;
  711. }
  712. attributes->type = VME_DMA_VME;
  713. attributes->private = (void *)vme_attr;
  714. vme_attr->address = address;
  715. vme_attr->aspace = aspace;
  716. vme_attr->cycle = cycle;
  717. vme_attr->dwidth = dwidth;
  718. return attributes;
  719. err_vme:
  720. kfree(attributes);
  721. err_attr:
  722. return NULL;
  723. }
  724. EXPORT_SYMBOL(vme_dma_vme_attribute);
  725. /*
  726. * Free attribute
  727. */
  728. void vme_dma_free_attribute(struct vme_dma_attr *attributes)
  729. {
  730. kfree(attributes->private);
  731. kfree(attributes);
  732. }
  733. EXPORT_SYMBOL(vme_dma_free_attribute);
  734. int vme_dma_list_add(struct vme_dma_list *list, struct vme_dma_attr *src,
  735. struct vme_dma_attr *dest, size_t count)
  736. {
  737. struct vme_bridge *bridge = list->parent->parent;
  738. int retval;
  739. if (bridge->dma_list_add == NULL) {
  740. printk(KERN_WARNING "Link List DMA generation not supported\n");
  741. return -EINVAL;
  742. }
  743. if (!mutex_trylock(&list->mtx)) {
  744. printk(KERN_ERR "Link List already submitted\n");
  745. return -EINVAL;
  746. }
  747. retval = bridge->dma_list_add(list, src, dest, count);
  748. mutex_unlock(&list->mtx);
  749. return retval;
  750. }
  751. EXPORT_SYMBOL(vme_dma_list_add);
  752. int vme_dma_list_exec(struct vme_dma_list *list)
  753. {
  754. struct vme_bridge *bridge = list->parent->parent;
  755. int retval;
  756. if (bridge->dma_list_exec == NULL) {
  757. printk(KERN_ERR "Link List DMA execution not supported\n");
  758. return -EINVAL;
  759. }
  760. mutex_lock(&list->mtx);
  761. retval = bridge->dma_list_exec(list);
  762. mutex_unlock(&list->mtx);
  763. return retval;
  764. }
  765. EXPORT_SYMBOL(vme_dma_list_exec);
  766. int vme_dma_list_free(struct vme_dma_list *list)
  767. {
  768. struct vme_bridge *bridge = list->parent->parent;
  769. int retval;
  770. if (bridge->dma_list_empty == NULL) {
  771. printk(KERN_WARNING "Emptying of Link Lists not supported\n");
  772. return -EINVAL;
  773. }
  774. if (!mutex_trylock(&list->mtx)) {
  775. printk(KERN_ERR "Link List in use\n");
  776. return -EINVAL;
  777. }
  778. /*
  779. * Empty out all of the entries from the dma list. We need to go to the
  780. * low level driver as dma entries are driver specific.
  781. */
  782. retval = bridge->dma_list_empty(list);
  783. if (retval) {
  784. printk(KERN_ERR "Unable to empty link-list entries\n");
  785. mutex_unlock(&list->mtx);
  786. return retval;
  787. }
  788. mutex_unlock(&list->mtx);
  789. kfree(list);
  790. return retval;
  791. }
  792. EXPORT_SYMBOL(vme_dma_list_free);
  793. int vme_dma_free(struct vme_resource *resource)
  794. {
  795. struct vme_dma_resource *ctrlr;
  796. if (resource->type != VME_DMA) {
  797. printk(KERN_ERR "Not a DMA resource\n");
  798. return -EINVAL;
  799. }
  800. ctrlr = list_entry(resource->entry, struct vme_dma_resource, list);
  801. if (!mutex_trylock(&ctrlr->mtx)) {
  802. printk(KERN_ERR "Resource busy, can't free\n");
  803. return -EBUSY;
  804. }
  805. if (!(list_empty(&ctrlr->pending) && list_empty(&ctrlr->running))) {
  806. printk(KERN_WARNING "Resource still processing transfers\n");
  807. mutex_unlock(&ctrlr->mtx);
  808. return -EBUSY;
  809. }
  810. ctrlr->locked = 0;
  811. mutex_unlock(&ctrlr->mtx);
  812. kfree(resource);
  813. return 0;
  814. }
  815. EXPORT_SYMBOL(vme_dma_free);
  816. void vme_irq_handler(struct vme_bridge *bridge, int level, int statid)
  817. {
  818. void (*call)(int, int, void *);
  819. void *priv_data;
  820. call = bridge->irq[level - 1].callback[statid].func;
  821. priv_data = bridge->irq[level - 1].callback[statid].priv_data;
  822. if (call != NULL)
  823. call(level, statid, priv_data);
  824. else
  825. printk(KERN_WARNING "Spurilous VME interrupt, level:%x, vector:%x\n",
  826. level, statid);
  827. }
  828. EXPORT_SYMBOL(vme_irq_handler);
  829. int vme_irq_request(struct vme_dev *vdev, int level, int statid,
  830. void (*callback)(int, int, void *),
  831. void *priv_data)
  832. {
  833. struct vme_bridge *bridge;
  834. bridge = vdev->bridge;
  835. if (bridge == NULL) {
  836. printk(KERN_ERR "Can't find VME bus\n");
  837. return -EINVAL;
  838. }
  839. if ((level < 1) || (level > 7)) {
  840. printk(KERN_ERR "Invalid interrupt level\n");
  841. return -EINVAL;
  842. }
  843. if (bridge->irq_set == NULL) {
  844. printk(KERN_ERR "Configuring interrupts not supported\n");
  845. return -EINVAL;
  846. }
  847. mutex_lock(&bridge->irq_mtx);
  848. if (bridge->irq[level - 1].callback[statid].func) {
  849. mutex_unlock(&bridge->irq_mtx);
  850. printk(KERN_WARNING "VME Interrupt already taken\n");
  851. return -EBUSY;
  852. }
  853. bridge->irq[level - 1].count++;
  854. bridge->irq[level - 1].callback[statid].priv_data = priv_data;
  855. bridge->irq[level - 1].callback[statid].func = callback;
  856. /* Enable IRQ level */
  857. bridge->irq_set(bridge, level, 1, 1);
  858. mutex_unlock(&bridge->irq_mtx);
  859. return 0;
  860. }
  861. EXPORT_SYMBOL(vme_irq_request);
  862. void vme_irq_free(struct vme_dev *vdev, int level, int statid)
  863. {
  864. struct vme_bridge *bridge;
  865. bridge = vdev->bridge;
  866. if (bridge == NULL) {
  867. printk(KERN_ERR "Can't find VME bus\n");
  868. return;
  869. }
  870. if ((level < 1) || (level > 7)) {
  871. printk(KERN_ERR "Invalid interrupt level\n");
  872. return;
  873. }
  874. if (bridge->irq_set == NULL) {
  875. printk(KERN_ERR "Configuring interrupts not supported\n");
  876. return;
  877. }
  878. mutex_lock(&bridge->irq_mtx);
  879. bridge->irq[level - 1].count--;
  880. /* Disable IRQ level if no more interrupts attached at this level*/
  881. if (bridge->irq[level - 1].count == 0)
  882. bridge->irq_set(bridge, level, 0, 1);
  883. bridge->irq[level - 1].callback[statid].func = NULL;
  884. bridge->irq[level - 1].callback[statid].priv_data = NULL;
  885. mutex_unlock(&bridge->irq_mtx);
  886. }
  887. EXPORT_SYMBOL(vme_irq_free);
  888. int vme_irq_generate(struct vme_dev *vdev, int level, int statid)
  889. {
  890. struct vme_bridge *bridge;
  891. bridge = vdev->bridge;
  892. if (bridge == NULL) {
  893. printk(KERN_ERR "Can't find VME bus\n");
  894. return -EINVAL;
  895. }
  896. if ((level < 1) || (level > 7)) {
  897. printk(KERN_WARNING "Invalid interrupt level\n");
  898. return -EINVAL;
  899. }
  900. if (bridge->irq_generate == NULL) {
  901. printk(KERN_WARNING "Interrupt generation not supported\n");
  902. return -EINVAL;
  903. }
  904. return bridge->irq_generate(bridge, level, statid);
  905. }
  906. EXPORT_SYMBOL(vme_irq_generate);
  907. /*
  908. * Request the location monitor, return resource or NULL
  909. */
  910. struct vme_resource *vme_lm_request(struct vme_dev *vdev)
  911. {
  912. struct vme_bridge *bridge;
  913. struct list_head *lm_pos = NULL;
  914. struct vme_lm_resource *allocated_lm = NULL;
  915. struct vme_lm_resource *lm = NULL;
  916. struct vme_resource *resource = NULL;
  917. bridge = vdev->bridge;
  918. if (bridge == NULL) {
  919. printk(KERN_ERR "Can't find VME bus\n");
  920. goto err_bus;
  921. }
  922. /* Loop through DMA resources */
  923. list_for_each(lm_pos, &bridge->lm_resources) {
  924. lm = list_entry(lm_pos,
  925. struct vme_lm_resource, list);
  926. if (lm == NULL) {
  927. printk(KERN_ERR "Registered NULL Location Monitor resource\n");
  928. continue;
  929. }
  930. /* Find an unlocked controller */
  931. mutex_lock(&lm->mtx);
  932. if (lm->locked == 0) {
  933. lm->locked = 1;
  934. mutex_unlock(&lm->mtx);
  935. allocated_lm = lm;
  936. break;
  937. }
  938. mutex_unlock(&lm->mtx);
  939. }
  940. /* Check to see if we found a resource */
  941. if (allocated_lm == NULL)
  942. goto err_lm;
  943. resource = kmalloc(sizeof(struct vme_resource), GFP_KERNEL);
  944. if (resource == NULL) {
  945. printk(KERN_ERR "Unable to allocate resource structure\n");
  946. goto err_alloc;
  947. }
  948. resource->type = VME_LM;
  949. resource->entry = &allocated_lm->list;
  950. return resource;
  951. err_alloc:
  952. /* Unlock image */
  953. mutex_lock(&lm->mtx);
  954. lm->locked = 0;
  955. mutex_unlock(&lm->mtx);
  956. err_lm:
  957. err_bus:
  958. return NULL;
  959. }
  960. EXPORT_SYMBOL(vme_lm_request);
  961. int vme_lm_count(struct vme_resource *resource)
  962. {
  963. struct vme_lm_resource *lm;
  964. if (resource->type != VME_LM) {
  965. printk(KERN_ERR "Not a Location Monitor resource\n");
  966. return -EINVAL;
  967. }
  968. lm = list_entry(resource->entry, struct vme_lm_resource, list);
  969. return lm->monitors;
  970. }
  971. EXPORT_SYMBOL(vme_lm_count);
  972. int vme_lm_set(struct vme_resource *resource, unsigned long long lm_base,
  973. u32 aspace, u32 cycle)
  974. {
  975. struct vme_bridge *bridge = find_bridge(resource);
  976. struct vme_lm_resource *lm;
  977. if (resource->type != VME_LM) {
  978. printk(KERN_ERR "Not a Location Monitor resource\n");
  979. return -EINVAL;
  980. }
  981. lm = list_entry(resource->entry, struct vme_lm_resource, list);
  982. if (bridge->lm_set == NULL) {
  983. printk(KERN_ERR "vme_lm_set not supported\n");
  984. return -EINVAL;
  985. }
  986. return bridge->lm_set(lm, lm_base, aspace, cycle);
  987. }
  988. EXPORT_SYMBOL(vme_lm_set);
  989. int vme_lm_get(struct vme_resource *resource, unsigned long long *lm_base,
  990. u32 *aspace, u32 *cycle)
  991. {
  992. struct vme_bridge *bridge = find_bridge(resource);
  993. struct vme_lm_resource *lm;
  994. if (resource->type != VME_LM) {
  995. printk(KERN_ERR "Not a Location Monitor resource\n");
  996. return -EINVAL;
  997. }
  998. lm = list_entry(resource->entry, struct vme_lm_resource, list);
  999. if (bridge->lm_get == NULL) {
  1000. printk(KERN_ERR "vme_lm_get not supported\n");
  1001. return -EINVAL;
  1002. }
  1003. return bridge->lm_get(lm, lm_base, aspace, cycle);
  1004. }
  1005. EXPORT_SYMBOL(vme_lm_get);
  1006. int vme_lm_attach(struct vme_resource *resource, int monitor,
  1007. void (*callback)(int))
  1008. {
  1009. struct vme_bridge *bridge = find_bridge(resource);
  1010. struct vme_lm_resource *lm;
  1011. if (resource->type != VME_LM) {
  1012. printk(KERN_ERR "Not a Location Monitor resource\n");
  1013. return -EINVAL;
  1014. }
  1015. lm = list_entry(resource->entry, struct vme_lm_resource, list);
  1016. if (bridge->lm_attach == NULL) {
  1017. printk(KERN_ERR "vme_lm_attach not supported\n");
  1018. return -EINVAL;
  1019. }
  1020. return bridge->lm_attach(lm, monitor, callback);
  1021. }
  1022. EXPORT_SYMBOL(vme_lm_attach);
  1023. int vme_lm_detach(struct vme_resource *resource, int monitor)
  1024. {
  1025. struct vme_bridge *bridge = find_bridge(resource);
  1026. struct vme_lm_resource *lm;
  1027. if (resource->type != VME_LM) {
  1028. printk(KERN_ERR "Not a Location Monitor resource\n");
  1029. return -EINVAL;
  1030. }
  1031. lm = list_entry(resource->entry, struct vme_lm_resource, list);
  1032. if (bridge->lm_detach == NULL) {
  1033. printk(KERN_ERR "vme_lm_detach not supported\n");
  1034. return -EINVAL;
  1035. }
  1036. return bridge->lm_detach(lm, monitor);
  1037. }
  1038. EXPORT_SYMBOL(vme_lm_detach);
  1039. void vme_lm_free(struct vme_resource *resource)
  1040. {
  1041. struct vme_lm_resource *lm;
  1042. if (resource->type != VME_LM) {
  1043. printk(KERN_ERR "Not a Location Monitor resource\n");
  1044. return;
  1045. }
  1046. lm = list_entry(resource->entry, struct vme_lm_resource, list);
  1047. mutex_lock(&lm->mtx);
  1048. /* XXX
  1049. * Check to see that there aren't any callbacks still attached, if
  1050. * there are we should probably be detaching them!
  1051. */
  1052. lm->locked = 0;
  1053. mutex_unlock(&lm->mtx);
  1054. kfree(resource);
  1055. }
  1056. EXPORT_SYMBOL(vme_lm_free);
  1057. int vme_slot_num(struct vme_dev *vdev)
  1058. {
  1059. struct vme_bridge *bridge;
  1060. bridge = vdev->bridge;
  1061. if (bridge == NULL) {
  1062. printk(KERN_ERR "Can't find VME bus\n");
  1063. return -EINVAL;
  1064. }
  1065. if (bridge->slot_get == NULL) {
  1066. printk(KERN_WARNING "vme_slot_num not supported\n");
  1067. return -EINVAL;
  1068. }
  1069. return bridge->slot_get(bridge);
  1070. }
  1071. EXPORT_SYMBOL(vme_slot_num);
  1072. int vme_bus_num(struct vme_dev *vdev)
  1073. {
  1074. struct vme_bridge *bridge;
  1075. bridge = vdev->bridge;
  1076. if (bridge == NULL) {
  1077. pr_err("Can't find VME bus\n");
  1078. return -EINVAL;
  1079. }
  1080. return bridge->num;
  1081. }
  1082. EXPORT_SYMBOL(vme_bus_num);
  1083. /* - Bridge Registration --------------------------------------------------- */
  1084. static void vme_dev_release(struct device *dev)
  1085. {
  1086. kfree(dev_to_vme_dev(dev));
  1087. }
  1088. int vme_register_bridge(struct vme_bridge *bridge)
  1089. {
  1090. int i;
  1091. int ret = -1;
  1092. mutex_lock(&vme_buses_lock);
  1093. for (i = 0; i < sizeof(vme_bus_numbers) * 8; i++) {
  1094. if ((vme_bus_numbers & (1 << i)) == 0) {
  1095. vme_bus_numbers |= (1 << i);
  1096. bridge->num = i;
  1097. INIT_LIST_HEAD(&bridge->devices);
  1098. list_add_tail(&bridge->bus_list, &vme_bus_list);
  1099. ret = 0;
  1100. break;
  1101. }
  1102. }
  1103. mutex_unlock(&vme_buses_lock);
  1104. return ret;
  1105. }
  1106. EXPORT_SYMBOL(vme_register_bridge);
  1107. void vme_unregister_bridge(struct vme_bridge *bridge)
  1108. {
  1109. struct vme_dev *vdev;
  1110. struct vme_dev *tmp;
  1111. mutex_lock(&vme_buses_lock);
  1112. vme_bus_numbers &= ~(1 << bridge->num);
  1113. list_for_each_entry_safe(vdev, tmp, &bridge->devices, bridge_list) {
  1114. list_del(&vdev->drv_list);
  1115. list_del(&vdev->bridge_list);
  1116. device_unregister(&vdev->dev);
  1117. }
  1118. list_del(&bridge->bus_list);
  1119. mutex_unlock(&vme_buses_lock);
  1120. }
  1121. EXPORT_SYMBOL(vme_unregister_bridge);
  1122. /* - Driver Registration --------------------------------------------------- */
  1123. static int __vme_register_driver_bus(struct vme_driver *drv,
  1124. struct vme_bridge *bridge, unsigned int ndevs)
  1125. {
  1126. int err;
  1127. unsigned int i;
  1128. struct vme_dev *vdev;
  1129. struct vme_dev *tmp;
  1130. for (i = 0; i < ndevs; i++) {
  1131. vdev = kzalloc(sizeof(struct vme_dev), GFP_KERNEL);
  1132. if (!vdev) {
  1133. err = -ENOMEM;
  1134. goto err_devalloc;
  1135. }
  1136. vdev->num = i;
  1137. vdev->bridge = bridge;
  1138. vdev->dev.platform_data = drv;
  1139. vdev->dev.release = vme_dev_release;
  1140. vdev->dev.parent = bridge->parent;
  1141. vdev->dev.bus = &vme_bus_type;
  1142. dev_set_name(&vdev->dev, "%s.%u-%u", drv->name, bridge->num,
  1143. vdev->num);
  1144. err = device_register(&vdev->dev);
  1145. if (err)
  1146. goto err_reg;
  1147. if (vdev->dev.platform_data) {
  1148. list_add_tail(&vdev->drv_list, &drv->devices);
  1149. list_add_tail(&vdev->bridge_list, &bridge->devices);
  1150. } else
  1151. device_unregister(&vdev->dev);
  1152. }
  1153. return 0;
  1154. err_reg:
  1155. put_device(&vdev->dev);
  1156. kfree(vdev);
  1157. err_devalloc:
  1158. list_for_each_entry_safe(vdev, tmp, &drv->devices, drv_list) {
  1159. list_del(&vdev->drv_list);
  1160. list_del(&vdev->bridge_list);
  1161. device_unregister(&vdev->dev);
  1162. }
  1163. return err;
  1164. }
  1165. static int __vme_register_driver(struct vme_driver *drv, unsigned int ndevs)
  1166. {
  1167. struct vme_bridge *bridge;
  1168. int err = 0;
  1169. mutex_lock(&vme_buses_lock);
  1170. list_for_each_entry(bridge, &vme_bus_list, bus_list) {
  1171. /*
  1172. * This cannot cause trouble as we already have vme_buses_lock
  1173. * and if the bridge is removed, it will have to go through
  1174. * vme_unregister_bridge() to do it (which calls remove() on
  1175. * the bridge which in turn tries to acquire vme_buses_lock and
  1176. * will have to wait).
  1177. */
  1178. err = __vme_register_driver_bus(drv, bridge, ndevs);
  1179. if (err)
  1180. break;
  1181. }
  1182. mutex_unlock(&vme_buses_lock);
  1183. return err;
  1184. }
  1185. int vme_register_driver(struct vme_driver *drv, unsigned int ndevs)
  1186. {
  1187. int err;
  1188. drv->driver.name = drv->name;
  1189. drv->driver.bus = &vme_bus_type;
  1190. INIT_LIST_HEAD(&drv->devices);
  1191. err = driver_register(&drv->driver);
  1192. if (err)
  1193. return err;
  1194. err = __vme_register_driver(drv, ndevs);
  1195. if (err)
  1196. driver_unregister(&drv->driver);
  1197. return err;
  1198. }
  1199. EXPORT_SYMBOL(vme_register_driver);
  1200. void vme_unregister_driver(struct vme_driver *drv)
  1201. {
  1202. struct vme_dev *dev, *dev_tmp;
  1203. mutex_lock(&vme_buses_lock);
  1204. list_for_each_entry_safe(dev, dev_tmp, &drv->devices, drv_list) {
  1205. list_del(&dev->drv_list);
  1206. list_del(&dev->bridge_list);
  1207. device_unregister(&dev->dev);
  1208. }
  1209. mutex_unlock(&vme_buses_lock);
  1210. driver_unregister(&drv->driver);
  1211. }
  1212. EXPORT_SYMBOL(vme_unregister_driver);
  1213. /* - Bus Registration ------------------------------------------------------ */
  1214. static int vme_bus_match(struct device *dev, struct device_driver *drv)
  1215. {
  1216. struct vme_driver *vme_drv;
  1217. vme_drv = container_of(drv, struct vme_driver, driver);
  1218. if (dev->platform_data == vme_drv) {
  1219. struct vme_dev *vdev = dev_to_vme_dev(dev);
  1220. if (vme_drv->match && vme_drv->match(vdev))
  1221. return 1;
  1222. dev->platform_data = NULL;
  1223. }
  1224. return 0;
  1225. }
  1226. static int vme_bus_probe(struct device *dev)
  1227. {
  1228. int retval = -ENODEV;
  1229. struct vme_driver *driver;
  1230. struct vme_dev *vdev = dev_to_vme_dev(dev);
  1231. driver = dev->platform_data;
  1232. if (driver->probe != NULL)
  1233. retval = driver->probe(vdev);
  1234. return retval;
  1235. }
  1236. static int vme_bus_remove(struct device *dev)
  1237. {
  1238. int retval = -ENODEV;
  1239. struct vme_driver *driver;
  1240. struct vme_dev *vdev = dev_to_vme_dev(dev);
  1241. driver = dev->platform_data;
  1242. if (driver->remove != NULL)
  1243. retval = driver->remove(vdev);
  1244. return retval;
  1245. }
  1246. struct bus_type vme_bus_type = {
  1247. .name = "vme",
  1248. .match = vme_bus_match,
  1249. .probe = vme_bus_probe,
  1250. .remove = vme_bus_remove,
  1251. };
  1252. EXPORT_SYMBOL(vme_bus_type);
  1253. static int __init vme_init(void)
  1254. {
  1255. return bus_register(&vme_bus_type);
  1256. }
  1257. static void __exit vme_exit(void)
  1258. {
  1259. bus_unregister(&vme_bus_type);
  1260. }
  1261. subsys_initcall(vme_init);
  1262. module_exit(vme_exit);