vme.c 54 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061
  1. /*
  2. * VME Bridge Framework
  3. *
  4. * Author: Martyn Welch <martyn.welch@ge.com>
  5. * Copyright 2008 GE Intelligent Platforms Embedded Systems, Inc.
  6. *
  7. * Based on work by Tom Armistead and Ajit Prem
  8. * Copyright 2004 Motorola Inc.
  9. *
  10. * This program is free software; you can redistribute it and/or modify it
  11. * under the terms of the GNU General Public License as published by the
  12. * Free Software Foundation; either version 2 of the License, or (at your
  13. * option) any later version.
  14. */
  15. #include <linux/init.h>
  16. #include <linux/export.h>
  17. #include <linux/mm.h>
  18. #include <linux/types.h>
  19. #include <linux/kernel.h>
  20. #include <linux/errno.h>
  21. #include <linux/pci.h>
  22. #include <linux/poll.h>
  23. #include <linux/highmem.h>
  24. #include <linux/interrupt.h>
  25. #include <linux/pagemap.h>
  26. #include <linux/device.h>
  27. #include <linux/dma-mapping.h>
  28. #include <linux/syscalls.h>
  29. #include <linux/mutex.h>
  30. #include <linux/spinlock.h>
  31. #include <linux/slab.h>
  32. #include <linux/vme.h>
  33. #include "vme_bridge.h"
  34. /* Bitmask and list of registered buses both protected by common mutex */
  35. static unsigned int vme_bus_numbers;
  36. static LIST_HEAD(vme_bus_list);
  37. static DEFINE_MUTEX(vme_buses_lock);
  38. static int __init vme_init(void);
  39. static struct vme_dev *dev_to_vme_dev(struct device *dev)
  40. {
  41. return container_of(dev, struct vme_dev, dev);
  42. }
  43. /*
  44. * Find the bridge that the resource is associated with.
  45. */
  46. static struct vme_bridge *find_bridge(struct vme_resource *resource)
  47. {
  48. /* Get list to search */
  49. switch (resource->type) {
  50. case VME_MASTER:
  51. return list_entry(resource->entry, struct vme_master_resource,
  52. list)->parent;
  53. break;
  54. case VME_SLAVE:
  55. return list_entry(resource->entry, struct vme_slave_resource,
  56. list)->parent;
  57. break;
  58. case VME_DMA:
  59. return list_entry(resource->entry, struct vme_dma_resource,
  60. list)->parent;
  61. break;
  62. case VME_LM:
  63. return list_entry(resource->entry, struct vme_lm_resource,
  64. list)->parent;
  65. break;
  66. default:
  67. printk(KERN_ERR "Unknown resource type\n");
  68. return NULL;
  69. break;
  70. }
  71. }
  72. /**
  73. * vme_free_consistent - Allocate contiguous memory.
  74. * @resource: Pointer to VME resource.
  75. * @size: Size of allocation required.
  76. * @dma: Pointer to variable to store physical address of allocation.
  77. *
  78. * Allocate a contiguous block of memory for use by the driver. This is used to
  79. * create the buffers for the slave windows.
  80. *
  81. * Return: Virtual address of allocation on success, NULL on failure.
  82. */
  83. void *vme_alloc_consistent(struct vme_resource *resource, size_t size,
  84. dma_addr_t *dma)
  85. {
  86. struct vme_bridge *bridge;
  87. if (resource == NULL) {
  88. printk(KERN_ERR "No resource\n");
  89. return NULL;
  90. }
  91. bridge = find_bridge(resource);
  92. if (bridge == NULL) {
  93. printk(KERN_ERR "Can't find bridge\n");
  94. return NULL;
  95. }
  96. if (bridge->parent == NULL) {
  97. printk(KERN_ERR "Dev entry NULL for bridge %s\n", bridge->name);
  98. return NULL;
  99. }
  100. if (bridge->alloc_consistent == NULL) {
  101. printk(KERN_ERR "alloc_consistent not supported by bridge %s\n",
  102. bridge->name);
  103. return NULL;
  104. }
  105. return bridge->alloc_consistent(bridge->parent, size, dma);
  106. }
  107. EXPORT_SYMBOL(vme_alloc_consistent);
  108. /**
  109. * vme_free_consistent - Free previously allocated memory.
  110. * @resource: Pointer to VME resource.
  111. * @size: Size of allocation to free.
  112. * @vaddr: Virtual address of allocation.
  113. * @dma: Physical address of allocation.
  114. *
  115. * Free previously allocated block of contiguous memory.
  116. */
  117. void vme_free_consistent(struct vme_resource *resource, size_t size,
  118. void *vaddr, dma_addr_t dma)
  119. {
  120. struct vme_bridge *bridge;
  121. if (resource == NULL) {
  122. printk(KERN_ERR "No resource\n");
  123. return;
  124. }
  125. bridge = find_bridge(resource);
  126. if (bridge == NULL) {
  127. printk(KERN_ERR "Can't find bridge\n");
  128. return;
  129. }
  130. if (bridge->parent == NULL) {
  131. printk(KERN_ERR "Dev entry NULL for bridge %s\n", bridge->name);
  132. return;
  133. }
  134. if (bridge->free_consistent == NULL) {
  135. printk(KERN_ERR "free_consistent not supported by bridge %s\n",
  136. bridge->name);
  137. return;
  138. }
  139. bridge->free_consistent(bridge->parent, size, vaddr, dma);
  140. }
  141. EXPORT_SYMBOL(vme_free_consistent);
  142. /**
  143. * vme_get_size - Helper function returning size of a VME window
  144. * @resource: Pointer to VME slave or master resource.
  145. *
  146. * Determine the size of the VME window provided. This is a helper
  147. * function, wrappering the call to vme_master_get or vme_slave_get
  148. * depending on the type of window resource handed to it.
  149. *
  150. * Return: Size of the window on success, zero on failure.
  151. */
  152. size_t vme_get_size(struct vme_resource *resource)
  153. {
  154. int enabled, retval;
  155. unsigned long long base, size;
  156. dma_addr_t buf_base;
  157. u32 aspace, cycle, dwidth;
  158. switch (resource->type) {
  159. case VME_MASTER:
  160. retval = vme_master_get(resource, &enabled, &base, &size,
  161. &aspace, &cycle, &dwidth);
  162. if (retval)
  163. return 0;
  164. return size;
  165. break;
  166. case VME_SLAVE:
  167. retval = vme_slave_get(resource, &enabled, &base, &size,
  168. &buf_base, &aspace, &cycle);
  169. if (retval)
  170. return 0;
  171. return size;
  172. break;
  173. case VME_DMA:
  174. return 0;
  175. break;
  176. default:
  177. printk(KERN_ERR "Unknown resource type\n");
  178. return 0;
  179. break;
  180. }
  181. }
  182. EXPORT_SYMBOL(vme_get_size);
  183. int vme_check_window(u32 aspace, unsigned long long vme_base,
  184. unsigned long long size)
  185. {
  186. int retval = 0;
  187. switch (aspace) {
  188. case VME_A16:
  189. if (((vme_base + size) > VME_A16_MAX) ||
  190. (vme_base > VME_A16_MAX))
  191. retval = -EFAULT;
  192. break;
  193. case VME_A24:
  194. if (((vme_base + size) > VME_A24_MAX) ||
  195. (vme_base > VME_A24_MAX))
  196. retval = -EFAULT;
  197. break;
  198. case VME_A32:
  199. if (((vme_base + size) > VME_A32_MAX) ||
  200. (vme_base > VME_A32_MAX))
  201. retval = -EFAULT;
  202. break;
  203. case VME_A64:
  204. if ((size != 0) && (vme_base > U64_MAX + 1 - size))
  205. retval = -EFAULT;
  206. break;
  207. case VME_CRCSR:
  208. if (((vme_base + size) > VME_CRCSR_MAX) ||
  209. (vme_base > VME_CRCSR_MAX))
  210. retval = -EFAULT;
  211. break;
  212. case VME_USER1:
  213. case VME_USER2:
  214. case VME_USER3:
  215. case VME_USER4:
  216. /* User Defined */
  217. break;
  218. default:
  219. printk(KERN_ERR "Invalid address space\n");
  220. retval = -EINVAL;
  221. break;
  222. }
  223. return retval;
  224. }
  225. EXPORT_SYMBOL(vme_check_window);
  226. static u32 vme_get_aspace(int am)
  227. {
  228. switch (am) {
  229. case 0x29:
  230. case 0x2D:
  231. return VME_A16;
  232. case 0x38:
  233. case 0x39:
  234. case 0x3A:
  235. case 0x3B:
  236. case 0x3C:
  237. case 0x3D:
  238. case 0x3E:
  239. case 0x3F:
  240. return VME_A24;
  241. case 0x8:
  242. case 0x9:
  243. case 0xA:
  244. case 0xB:
  245. case 0xC:
  246. case 0xD:
  247. case 0xE:
  248. case 0xF:
  249. return VME_A32;
  250. case 0x0:
  251. case 0x1:
  252. case 0x3:
  253. return VME_A64;
  254. }
  255. return 0;
  256. }
  257. /**
  258. * vme_slave_request - Request a VME slave window resource.
  259. * @vdev: Pointer to VME device struct vme_dev assigned to driver instance.
  260. * @address: Required VME address space.
  261. * @cycle: Required VME data transfer cycle type.
  262. *
  263. * Request use of a VME window resource capable of being set for the requested
  264. * address space and data transfer cycle.
  265. *
  266. * Return: Pointer to VME resource on success, NULL on failure.
  267. */
  268. struct vme_resource *vme_slave_request(struct vme_dev *vdev, u32 address,
  269. u32 cycle)
  270. {
  271. struct vme_bridge *bridge;
  272. struct list_head *slave_pos = NULL;
  273. struct vme_slave_resource *allocated_image = NULL;
  274. struct vme_slave_resource *slave_image = NULL;
  275. struct vme_resource *resource = NULL;
  276. bridge = vdev->bridge;
  277. if (bridge == NULL) {
  278. printk(KERN_ERR "Can't find VME bus\n");
  279. goto err_bus;
  280. }
  281. /* Loop through slave resources */
  282. list_for_each(slave_pos, &bridge->slave_resources) {
  283. slave_image = list_entry(slave_pos,
  284. struct vme_slave_resource, list);
  285. if (slave_image == NULL) {
  286. printk(KERN_ERR "Registered NULL Slave resource\n");
  287. continue;
  288. }
  289. /* Find an unlocked and compatible image */
  290. mutex_lock(&slave_image->mtx);
  291. if (((slave_image->address_attr & address) == address) &&
  292. ((slave_image->cycle_attr & cycle) == cycle) &&
  293. (slave_image->locked == 0)) {
  294. slave_image->locked = 1;
  295. mutex_unlock(&slave_image->mtx);
  296. allocated_image = slave_image;
  297. break;
  298. }
  299. mutex_unlock(&slave_image->mtx);
  300. }
  301. /* No free image */
  302. if (allocated_image == NULL)
  303. goto err_image;
  304. resource = kmalloc(sizeof(struct vme_resource), GFP_KERNEL);
  305. if (resource == NULL) {
  306. printk(KERN_WARNING "Unable to allocate resource structure\n");
  307. goto err_alloc;
  308. }
  309. resource->type = VME_SLAVE;
  310. resource->entry = &allocated_image->list;
  311. return resource;
  312. err_alloc:
  313. /* Unlock image */
  314. mutex_lock(&slave_image->mtx);
  315. slave_image->locked = 0;
  316. mutex_unlock(&slave_image->mtx);
  317. err_image:
  318. err_bus:
  319. return NULL;
  320. }
  321. EXPORT_SYMBOL(vme_slave_request);
  322. /**
  323. * vme_slave_set - Set VME slave window configuration.
  324. * @resource: Pointer to VME slave resource.
  325. * @enabled: State to which the window should be configured.
  326. * @vme_base: Base address for the window.
  327. * @size: Size of the VME window.
  328. * @buf_base: Based address of buffer used to provide VME slave window storage.
  329. * @aspace: VME address space for the VME window.
  330. * @cycle: VME data transfer cycle type for the VME window.
  331. *
  332. * Set configuration for provided VME slave window.
  333. *
  334. * Return: Zero on success, -EINVAL if operation is not supported on this
  335. * device, if an invalid resource has been provided or invalid
  336. * attributes are provided. Hardware specific errors may also be
  337. * returned.
  338. */
  339. int vme_slave_set(struct vme_resource *resource, int enabled,
  340. unsigned long long vme_base, unsigned long long size,
  341. dma_addr_t buf_base, u32 aspace, u32 cycle)
  342. {
  343. struct vme_bridge *bridge = find_bridge(resource);
  344. struct vme_slave_resource *image;
  345. int retval;
  346. if (resource->type != VME_SLAVE) {
  347. printk(KERN_ERR "Not a slave resource\n");
  348. return -EINVAL;
  349. }
  350. image = list_entry(resource->entry, struct vme_slave_resource, list);
  351. if (bridge->slave_set == NULL) {
  352. printk(KERN_ERR "Function not supported\n");
  353. return -ENOSYS;
  354. }
  355. if (!(((image->address_attr & aspace) == aspace) &&
  356. ((image->cycle_attr & cycle) == cycle))) {
  357. printk(KERN_ERR "Invalid attributes\n");
  358. return -EINVAL;
  359. }
  360. retval = vme_check_window(aspace, vme_base, size);
  361. if (retval)
  362. return retval;
  363. return bridge->slave_set(image, enabled, vme_base, size, buf_base,
  364. aspace, cycle);
  365. }
  366. EXPORT_SYMBOL(vme_slave_set);
  367. /**
  368. * vme_slave_get - Retrieve VME slave window configuration.
  369. * @resource: Pointer to VME slave resource.
  370. * @enabled: Pointer to variable for storing state.
  371. * @vme_base: Pointer to variable for storing window base address.
  372. * @size: Pointer to variable for storing window size.
  373. * @buf_base: Pointer to variable for storing slave buffer base address.
  374. * @aspace: Pointer to variable for storing VME address space.
  375. * @cycle: Pointer to variable for storing VME data transfer cycle type.
  376. *
  377. * Return configuration for provided VME slave window.
  378. *
  379. * Return: Zero on success, -EINVAL if operation is not supported on this
  380. * device or if an invalid resource has been provided.
  381. */
  382. int vme_slave_get(struct vme_resource *resource, int *enabled,
  383. unsigned long long *vme_base, unsigned long long *size,
  384. dma_addr_t *buf_base, u32 *aspace, u32 *cycle)
  385. {
  386. struct vme_bridge *bridge = find_bridge(resource);
  387. struct vme_slave_resource *image;
  388. if (resource->type != VME_SLAVE) {
  389. printk(KERN_ERR "Not a slave resource\n");
  390. return -EINVAL;
  391. }
  392. image = list_entry(resource->entry, struct vme_slave_resource, list);
  393. if (bridge->slave_get == NULL) {
  394. printk(KERN_ERR "vme_slave_get not supported\n");
  395. return -EINVAL;
  396. }
  397. return bridge->slave_get(image, enabled, vme_base, size, buf_base,
  398. aspace, cycle);
  399. }
  400. EXPORT_SYMBOL(vme_slave_get);
  401. /**
  402. * vme_slave_free - Free VME slave window
  403. * @resource: Pointer to VME slave resource.
  404. *
  405. * Free the provided slave resource so that it may be reallocated.
  406. */
  407. void vme_slave_free(struct vme_resource *resource)
  408. {
  409. struct vme_slave_resource *slave_image;
  410. if (resource->type != VME_SLAVE) {
  411. printk(KERN_ERR "Not a slave resource\n");
  412. return;
  413. }
  414. slave_image = list_entry(resource->entry, struct vme_slave_resource,
  415. list);
  416. if (slave_image == NULL) {
  417. printk(KERN_ERR "Can't find slave resource\n");
  418. return;
  419. }
  420. /* Unlock image */
  421. mutex_lock(&slave_image->mtx);
  422. if (slave_image->locked == 0)
  423. printk(KERN_ERR "Image is already free\n");
  424. slave_image->locked = 0;
  425. mutex_unlock(&slave_image->mtx);
  426. /* Free up resource memory */
  427. kfree(resource);
  428. }
  429. EXPORT_SYMBOL(vme_slave_free);
  430. /**
  431. * vme_master_request - Request a VME master window resource.
  432. * @vdev: Pointer to VME device struct vme_dev assigned to driver instance.
  433. * @address: Required VME address space.
  434. * @cycle: Required VME data transfer cycle type.
  435. * @dwidth: Required VME data transfer width.
  436. *
  437. * Request use of a VME window resource capable of being set for the requested
  438. * address space, data transfer cycle and width.
  439. *
  440. * Return: Pointer to VME resource on success, NULL on failure.
  441. */
  442. struct vme_resource *vme_master_request(struct vme_dev *vdev, u32 address,
  443. u32 cycle, u32 dwidth)
  444. {
  445. struct vme_bridge *bridge;
  446. struct list_head *master_pos = NULL;
  447. struct vme_master_resource *allocated_image = NULL;
  448. struct vme_master_resource *master_image = NULL;
  449. struct vme_resource *resource = NULL;
  450. bridge = vdev->bridge;
  451. if (bridge == NULL) {
  452. printk(KERN_ERR "Can't find VME bus\n");
  453. goto err_bus;
  454. }
  455. /* Loop through master resources */
  456. list_for_each(master_pos, &bridge->master_resources) {
  457. master_image = list_entry(master_pos,
  458. struct vme_master_resource, list);
  459. if (master_image == NULL) {
  460. printk(KERN_WARNING "Registered NULL master resource\n");
  461. continue;
  462. }
  463. /* Find an unlocked and compatible image */
  464. spin_lock(&master_image->lock);
  465. if (((master_image->address_attr & address) == address) &&
  466. ((master_image->cycle_attr & cycle) == cycle) &&
  467. ((master_image->width_attr & dwidth) == dwidth) &&
  468. (master_image->locked == 0)) {
  469. master_image->locked = 1;
  470. spin_unlock(&master_image->lock);
  471. allocated_image = master_image;
  472. break;
  473. }
  474. spin_unlock(&master_image->lock);
  475. }
  476. /* Check to see if we found a resource */
  477. if (allocated_image == NULL) {
  478. printk(KERN_ERR "Can't find a suitable resource\n");
  479. goto err_image;
  480. }
  481. resource = kmalloc(sizeof(struct vme_resource), GFP_KERNEL);
  482. if (resource == NULL) {
  483. printk(KERN_ERR "Unable to allocate resource structure\n");
  484. goto err_alloc;
  485. }
  486. resource->type = VME_MASTER;
  487. resource->entry = &allocated_image->list;
  488. return resource;
  489. err_alloc:
  490. /* Unlock image */
  491. spin_lock(&master_image->lock);
  492. master_image->locked = 0;
  493. spin_unlock(&master_image->lock);
  494. err_image:
  495. err_bus:
  496. return NULL;
  497. }
  498. EXPORT_SYMBOL(vme_master_request);
  499. /**
  500. * vme_master_set - Set VME master window configuration.
  501. * @resource: Pointer to VME master resource.
  502. * @enabled: State to which the window should be configured.
  503. * @vme_base: Base address for the window.
  504. * @size: Size of the VME window.
  505. * @aspace: VME address space for the VME window.
  506. * @cycle: VME data transfer cycle type for the VME window.
  507. * @dwidth: VME data transfer width for the VME window.
  508. *
  509. * Set configuration for provided VME master window.
  510. *
  511. * Return: Zero on success, -EINVAL if operation is not supported on this
  512. * device, if an invalid resource has been provided or invalid
  513. * attributes are provided. Hardware specific errors may also be
  514. * returned.
  515. */
  516. int vme_master_set(struct vme_resource *resource, int enabled,
  517. unsigned long long vme_base, unsigned long long size, u32 aspace,
  518. u32 cycle, u32 dwidth)
  519. {
  520. struct vme_bridge *bridge = find_bridge(resource);
  521. struct vme_master_resource *image;
  522. int retval;
  523. if (resource->type != VME_MASTER) {
  524. printk(KERN_ERR "Not a master resource\n");
  525. return -EINVAL;
  526. }
  527. image = list_entry(resource->entry, struct vme_master_resource, list);
  528. if (bridge->master_set == NULL) {
  529. printk(KERN_WARNING "vme_master_set not supported\n");
  530. return -EINVAL;
  531. }
  532. if (!(((image->address_attr & aspace) == aspace) &&
  533. ((image->cycle_attr & cycle) == cycle) &&
  534. ((image->width_attr & dwidth) == dwidth))) {
  535. printk(KERN_WARNING "Invalid attributes\n");
  536. return -EINVAL;
  537. }
  538. retval = vme_check_window(aspace, vme_base, size);
  539. if (retval)
  540. return retval;
  541. return bridge->master_set(image, enabled, vme_base, size, aspace,
  542. cycle, dwidth);
  543. }
  544. EXPORT_SYMBOL(vme_master_set);
  545. /**
  546. * vme_master_get - Retrieve VME master window configuration.
  547. * @resource: Pointer to VME master resource.
  548. * @enabled: Pointer to variable for storing state.
  549. * @vme_base: Pointer to variable for storing window base address.
  550. * @size: Pointer to variable for storing window size.
  551. * @aspace: Pointer to variable for storing VME address space.
  552. * @cycle: Pointer to variable for storing VME data transfer cycle type.
  553. * @dwidth: Pointer to variable for storing VME data transfer width.
  554. *
  555. * Return configuration for provided VME master window.
  556. *
  557. * Return: Zero on success, -EINVAL if operation is not supported on this
  558. * device or if an invalid resource has been provided.
  559. */
  560. int vme_master_get(struct vme_resource *resource, int *enabled,
  561. unsigned long long *vme_base, unsigned long long *size, u32 *aspace,
  562. u32 *cycle, u32 *dwidth)
  563. {
  564. struct vme_bridge *bridge = find_bridge(resource);
  565. struct vme_master_resource *image;
  566. if (resource->type != VME_MASTER) {
  567. printk(KERN_ERR "Not a master resource\n");
  568. return -EINVAL;
  569. }
  570. image = list_entry(resource->entry, struct vme_master_resource, list);
  571. if (bridge->master_get == NULL) {
  572. printk(KERN_WARNING "%s not supported\n", __func__);
  573. return -EINVAL;
  574. }
  575. return bridge->master_get(image, enabled, vme_base, size, aspace,
  576. cycle, dwidth);
  577. }
  578. EXPORT_SYMBOL(vme_master_get);
  579. /**
  580. * vme_master_write - Read data from VME space into a buffer.
  581. * @resource: Pointer to VME master resource.
  582. * @buf: Pointer to buffer where data should be transferred.
  583. * @count: Number of bytes to transfer.
  584. * @offset: Offset into VME master window at which to start transfer.
  585. *
  586. * Perform read of count bytes of data from location on VME bus which maps into
  587. * the VME master window at offset to buf.
  588. *
  589. * Return: Number of bytes read, -EINVAL if resource is not a VME master
  590. * resource or read operation is not supported. -EFAULT returned if
  591. * invalid offset is provided. Hardware specific errors may also be
  592. * returned.
  593. */
  594. ssize_t vme_master_read(struct vme_resource *resource, void *buf, size_t count,
  595. loff_t offset)
  596. {
  597. struct vme_bridge *bridge = find_bridge(resource);
  598. struct vme_master_resource *image;
  599. size_t length;
  600. if (bridge->master_read == NULL) {
  601. printk(KERN_WARNING "Reading from resource not supported\n");
  602. return -EINVAL;
  603. }
  604. if (resource->type != VME_MASTER) {
  605. printk(KERN_ERR "Not a master resource\n");
  606. return -EINVAL;
  607. }
  608. image = list_entry(resource->entry, struct vme_master_resource, list);
  609. length = vme_get_size(resource);
  610. if (offset > length) {
  611. printk(KERN_WARNING "Invalid Offset\n");
  612. return -EFAULT;
  613. }
  614. if ((offset + count) > length)
  615. count = length - offset;
  616. return bridge->master_read(image, buf, count, offset);
  617. }
  618. EXPORT_SYMBOL(vme_master_read);
  619. /**
  620. * vme_master_write - Write data out to VME space from a buffer.
  621. * @resource: Pointer to VME master resource.
  622. * @buf: Pointer to buffer holding data to transfer.
  623. * @count: Number of bytes to transfer.
  624. * @offset: Offset into VME master window at which to start transfer.
  625. *
  626. * Perform write of count bytes of data from buf to location on VME bus which
  627. * maps into the VME master window at offset.
  628. *
  629. * Return: Number of bytes written, -EINVAL if resource is not a VME master
  630. * resource or write operation is not supported. -EFAULT returned if
  631. * invalid offset is provided. Hardware specific errors may also be
  632. * returned.
  633. */
  634. ssize_t vme_master_write(struct vme_resource *resource, void *buf,
  635. size_t count, loff_t offset)
  636. {
  637. struct vme_bridge *bridge = find_bridge(resource);
  638. struct vme_master_resource *image;
  639. size_t length;
  640. if (bridge->master_write == NULL) {
  641. printk(KERN_WARNING "Writing to resource not supported\n");
  642. return -EINVAL;
  643. }
  644. if (resource->type != VME_MASTER) {
  645. printk(KERN_ERR "Not a master resource\n");
  646. return -EINVAL;
  647. }
  648. image = list_entry(resource->entry, struct vme_master_resource, list);
  649. length = vme_get_size(resource);
  650. if (offset > length) {
  651. printk(KERN_WARNING "Invalid Offset\n");
  652. return -EFAULT;
  653. }
  654. if ((offset + count) > length)
  655. count = length - offset;
  656. return bridge->master_write(image, buf, count, offset);
  657. }
  658. EXPORT_SYMBOL(vme_master_write);
  659. /**
  660. * vme_master_rmw - Perform read-modify-write cycle.
  661. * @resource: Pointer to VME master resource.
  662. * @mask: Bits to be compared and swapped in operation.
  663. * @compare: Bits to be compared with data read from offset.
  664. * @swap: Bits to be swapped in data read from offset.
  665. * @offset: Offset into VME master window at which to perform operation.
  666. *
  667. * Perform read-modify-write cycle on provided location:
  668. * - Location on VME bus is read.
  669. * - Bits selected by mask are compared with compare.
  670. * - Where a selected bit matches that in compare and are selected in swap,
  671. * the bit is swapped.
  672. * - Result written back to location on VME bus.
  673. *
  674. * Return: Bytes written on success, -EINVAL if resource is not a VME master
  675. * resource or RMW operation is not supported. Hardware specific
  676. * errors may also be returned.
  677. */
  678. unsigned int vme_master_rmw(struct vme_resource *resource, unsigned int mask,
  679. unsigned int compare, unsigned int swap, loff_t offset)
  680. {
  681. struct vme_bridge *bridge = find_bridge(resource);
  682. struct vme_master_resource *image;
  683. if (bridge->master_rmw == NULL) {
  684. printk(KERN_WARNING "Writing to resource not supported\n");
  685. return -EINVAL;
  686. }
  687. if (resource->type != VME_MASTER) {
  688. printk(KERN_ERR "Not a master resource\n");
  689. return -EINVAL;
  690. }
  691. image = list_entry(resource->entry, struct vme_master_resource, list);
  692. return bridge->master_rmw(image, mask, compare, swap, offset);
  693. }
  694. EXPORT_SYMBOL(vme_master_rmw);
  695. /**
  696. * vme_master_mmap - Mmap region of VME master window.
  697. * @resource: Pointer to VME master resource.
  698. * @vma: Pointer to definition of user mapping.
  699. *
  700. * Memory map a region of the VME master window into user space.
  701. *
  702. * Return: Zero on success, -EINVAL if resource is not a VME master
  703. * resource or -EFAULT if map exceeds window size. Other generic mmap
  704. * errors may also be returned.
  705. */
  706. int vme_master_mmap(struct vme_resource *resource, struct vm_area_struct *vma)
  707. {
  708. struct vme_master_resource *image;
  709. phys_addr_t phys_addr;
  710. unsigned long vma_size;
  711. if (resource->type != VME_MASTER) {
  712. pr_err("Not a master resource\n");
  713. return -EINVAL;
  714. }
  715. image = list_entry(resource->entry, struct vme_master_resource, list);
  716. phys_addr = image->bus_resource.start + (vma->vm_pgoff << PAGE_SHIFT);
  717. vma_size = vma->vm_end - vma->vm_start;
  718. if (phys_addr + vma_size > image->bus_resource.end + 1) {
  719. pr_err("Map size cannot exceed the window size\n");
  720. return -EFAULT;
  721. }
  722. vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
  723. return vm_iomap_memory(vma, phys_addr, vma->vm_end - vma->vm_start);
  724. }
  725. EXPORT_SYMBOL(vme_master_mmap);
  726. /**
  727. * vme_master_free - Free VME master window
  728. * @resource: Pointer to VME master resource.
  729. *
  730. * Free the provided master resource so that it may be reallocated.
  731. */
  732. void vme_master_free(struct vme_resource *resource)
  733. {
  734. struct vme_master_resource *master_image;
  735. if (resource->type != VME_MASTER) {
  736. printk(KERN_ERR "Not a master resource\n");
  737. return;
  738. }
  739. master_image = list_entry(resource->entry, struct vme_master_resource,
  740. list);
  741. if (master_image == NULL) {
  742. printk(KERN_ERR "Can't find master resource\n");
  743. return;
  744. }
  745. /* Unlock image */
  746. spin_lock(&master_image->lock);
  747. if (master_image->locked == 0)
  748. printk(KERN_ERR "Image is already free\n");
  749. master_image->locked = 0;
  750. spin_unlock(&master_image->lock);
  751. /* Free up resource memory */
  752. kfree(resource);
  753. }
  754. EXPORT_SYMBOL(vme_master_free);
  755. /**
  756. * vme_dma_request - Request a DMA controller.
  757. * @vdev: Pointer to VME device struct vme_dev assigned to driver instance.
  758. * @route: Required src/destination combination.
  759. *
  760. * Request a VME DMA controller with capability to perform transfers bewteen
  761. * requested source/destination combination.
  762. *
  763. * Return: Pointer to VME DMA resource on success, NULL on failure.
  764. */
  765. struct vme_resource *vme_dma_request(struct vme_dev *vdev, u32 route)
  766. {
  767. struct vme_bridge *bridge;
  768. struct list_head *dma_pos = NULL;
  769. struct vme_dma_resource *allocated_ctrlr = NULL;
  770. struct vme_dma_resource *dma_ctrlr = NULL;
  771. struct vme_resource *resource = NULL;
  772. /* XXX Not checking resource attributes */
  773. printk(KERN_ERR "No VME resource Attribute tests done\n");
  774. bridge = vdev->bridge;
  775. if (bridge == NULL) {
  776. printk(KERN_ERR "Can't find VME bus\n");
  777. goto err_bus;
  778. }
  779. /* Loop through DMA resources */
  780. list_for_each(dma_pos, &bridge->dma_resources) {
  781. dma_ctrlr = list_entry(dma_pos,
  782. struct vme_dma_resource, list);
  783. if (dma_ctrlr == NULL) {
  784. printk(KERN_ERR "Registered NULL DMA resource\n");
  785. continue;
  786. }
  787. /* Find an unlocked and compatible controller */
  788. mutex_lock(&dma_ctrlr->mtx);
  789. if (((dma_ctrlr->route_attr & route) == route) &&
  790. (dma_ctrlr->locked == 0)) {
  791. dma_ctrlr->locked = 1;
  792. mutex_unlock(&dma_ctrlr->mtx);
  793. allocated_ctrlr = dma_ctrlr;
  794. break;
  795. }
  796. mutex_unlock(&dma_ctrlr->mtx);
  797. }
  798. /* Check to see if we found a resource */
  799. if (allocated_ctrlr == NULL)
  800. goto err_ctrlr;
  801. resource = kmalloc(sizeof(struct vme_resource), GFP_KERNEL);
  802. if (resource == NULL) {
  803. printk(KERN_WARNING "Unable to allocate resource structure\n");
  804. goto err_alloc;
  805. }
  806. resource->type = VME_DMA;
  807. resource->entry = &allocated_ctrlr->list;
  808. return resource;
  809. err_alloc:
  810. /* Unlock image */
  811. mutex_lock(&dma_ctrlr->mtx);
  812. dma_ctrlr->locked = 0;
  813. mutex_unlock(&dma_ctrlr->mtx);
  814. err_ctrlr:
  815. err_bus:
  816. return NULL;
  817. }
  818. EXPORT_SYMBOL(vme_dma_request);
  819. /**
  820. * vme_new_dma_list - Create new VME DMA list.
  821. * @resource: Pointer to VME DMA resource.
  822. *
  823. * Create a new VME DMA list. It is the responsibility of the user to free
  824. * the list once it is no longer required with vme_dma_list_free().
  825. *
  826. * Return: Pointer to new VME DMA list, NULL on allocation failure or invalid
  827. * VME DMA resource.
  828. */
  829. struct vme_dma_list *vme_new_dma_list(struct vme_resource *resource)
  830. {
  831. struct vme_dma_resource *ctrlr;
  832. struct vme_dma_list *dma_list;
  833. if (resource->type != VME_DMA) {
  834. printk(KERN_ERR "Not a DMA resource\n");
  835. return NULL;
  836. }
  837. ctrlr = list_entry(resource->entry, struct vme_dma_resource, list);
  838. dma_list = kmalloc(sizeof(struct vme_dma_list), GFP_KERNEL);
  839. if (dma_list == NULL) {
  840. printk(KERN_ERR "Unable to allocate memory for new DMA list\n");
  841. return NULL;
  842. }
  843. INIT_LIST_HEAD(&dma_list->entries);
  844. dma_list->parent = ctrlr;
  845. mutex_init(&dma_list->mtx);
  846. return dma_list;
  847. }
  848. EXPORT_SYMBOL(vme_new_dma_list);
  849. /**
  850. * vme_dma_pattern_attribute - Create "Pattern" type VME DMA list attribute.
  851. * @pattern: Value to use used as pattern
  852. * @type: Type of pattern to be written.
  853. *
  854. * Create VME DMA list attribute for pattern generation. It is the
  855. * responsibility of the user to free used attributes using
  856. * vme_dma_free_attribute().
  857. *
  858. * Return: Pointer to VME DMA attribute, NULL on failure.
  859. */
  860. struct vme_dma_attr *vme_dma_pattern_attribute(u32 pattern, u32 type)
  861. {
  862. struct vme_dma_attr *attributes;
  863. struct vme_dma_pattern *pattern_attr;
  864. attributes = kmalloc(sizeof(struct vme_dma_attr), GFP_KERNEL);
  865. if (attributes == NULL) {
  866. printk(KERN_ERR "Unable to allocate memory for attributes structure\n");
  867. goto err_attr;
  868. }
  869. pattern_attr = kmalloc(sizeof(struct vme_dma_pattern), GFP_KERNEL);
  870. if (pattern_attr == NULL) {
  871. printk(KERN_ERR "Unable to allocate memory for pattern attributes\n");
  872. goto err_pat;
  873. }
  874. attributes->type = VME_DMA_PATTERN;
  875. attributes->private = (void *)pattern_attr;
  876. pattern_attr->pattern = pattern;
  877. pattern_attr->type = type;
  878. return attributes;
  879. err_pat:
  880. kfree(attributes);
  881. err_attr:
  882. return NULL;
  883. }
  884. EXPORT_SYMBOL(vme_dma_pattern_attribute);
  885. /**
  886. * vme_dma_pci_attribute - Create "PCI" type VME DMA list attribute.
  887. * @address: PCI base address for DMA transfer.
  888. *
  889. * Create VME DMA list attribute pointing to a location on PCI for DMA
  890. * transfers. It is the responsibility of the user to free used attributes
  891. * using vme_dma_free_attribute().
  892. *
  893. * Return: Pointer to VME DMA attribute, NULL on failure.
  894. */
  895. struct vme_dma_attr *vme_dma_pci_attribute(dma_addr_t address)
  896. {
  897. struct vme_dma_attr *attributes;
  898. struct vme_dma_pci *pci_attr;
  899. /* XXX Run some sanity checks here */
  900. attributes = kmalloc(sizeof(struct vme_dma_attr), GFP_KERNEL);
  901. if (attributes == NULL) {
  902. printk(KERN_ERR "Unable to allocate memory for attributes structure\n");
  903. goto err_attr;
  904. }
  905. pci_attr = kmalloc(sizeof(struct vme_dma_pci), GFP_KERNEL);
  906. if (pci_attr == NULL) {
  907. printk(KERN_ERR "Unable to allocate memory for PCI attributes\n");
  908. goto err_pci;
  909. }
  910. attributes->type = VME_DMA_PCI;
  911. attributes->private = (void *)pci_attr;
  912. pci_attr->address = address;
  913. return attributes;
  914. err_pci:
  915. kfree(attributes);
  916. err_attr:
  917. return NULL;
  918. }
  919. EXPORT_SYMBOL(vme_dma_pci_attribute);
  920. /**
  921. * vme_dma_vme_attribute - Create "VME" type VME DMA list attribute.
  922. * @address: VME base address for DMA transfer.
  923. * @aspace: VME address space to use for DMA transfer.
  924. * @cycle: VME bus cycle to use for DMA transfer.
  925. * @dwidth: VME data width to use for DMA transfer.
  926. *
  927. * Create VME DMA list attribute pointing to a location on the VME bus for DMA
  928. * transfers. It is the responsibility of the user to free used attributes
  929. * using vme_dma_free_attribute().
  930. *
  931. * Return: Pointer to VME DMA attribute, NULL on failure.
  932. */
  933. struct vme_dma_attr *vme_dma_vme_attribute(unsigned long long address,
  934. u32 aspace, u32 cycle, u32 dwidth)
  935. {
  936. struct vme_dma_attr *attributes;
  937. struct vme_dma_vme *vme_attr;
  938. attributes = kmalloc(
  939. sizeof(struct vme_dma_attr), GFP_KERNEL);
  940. if (attributes == NULL) {
  941. printk(KERN_ERR "Unable to allocate memory for attributes structure\n");
  942. goto err_attr;
  943. }
  944. vme_attr = kmalloc(sizeof(struct vme_dma_vme), GFP_KERNEL);
  945. if (vme_attr == NULL) {
  946. printk(KERN_ERR "Unable to allocate memory for VME attributes\n");
  947. goto err_vme;
  948. }
  949. attributes->type = VME_DMA_VME;
  950. attributes->private = (void *)vme_attr;
  951. vme_attr->address = address;
  952. vme_attr->aspace = aspace;
  953. vme_attr->cycle = cycle;
  954. vme_attr->dwidth = dwidth;
  955. return attributes;
  956. err_vme:
  957. kfree(attributes);
  958. err_attr:
  959. return NULL;
  960. }
  961. EXPORT_SYMBOL(vme_dma_vme_attribute);
  962. /**
  963. * vme_dma_free_attribute - Free DMA list attribute.
  964. * @attributes: Pointer to DMA list attribute.
  965. *
  966. * Free VME DMA list attribute. VME DMA list attributes can be safely freed
  967. * once vme_dma_list_add() has returned.
  968. */
  969. void vme_dma_free_attribute(struct vme_dma_attr *attributes)
  970. {
  971. kfree(attributes->private);
  972. kfree(attributes);
  973. }
  974. EXPORT_SYMBOL(vme_dma_free_attribute);
  975. /**
  976. * vme_dma_list_add - Add enty to a VME DMA list.
  977. * @list: Pointer to VME list.
  978. * @src: Pointer to DMA list attribute to use as source.
  979. * @dest: Pointer to DMA list attribute to use as destination.
  980. * @count: Number of bytes to transfer.
  981. *
  982. * Add an entry to the provided VME DMA list. Entry requires pointers to source
  983. * and destination DMA attributes and a count.
  984. *
  985. * Please note, the attributes supported as source and destinations for
  986. * transfers are hardware dependent.
  987. *
  988. * Return: Zero on success, -EINVAL if operation is not supported on this
  989. * device or if the link list has already been submitted for execution.
  990. * Hardware specific errors also possible.
  991. */
  992. int vme_dma_list_add(struct vme_dma_list *list, struct vme_dma_attr *src,
  993. struct vme_dma_attr *dest, size_t count)
  994. {
  995. struct vme_bridge *bridge = list->parent->parent;
  996. int retval;
  997. if (bridge->dma_list_add == NULL) {
  998. printk(KERN_WARNING "Link List DMA generation not supported\n");
  999. return -EINVAL;
  1000. }
  1001. if (!mutex_trylock(&list->mtx)) {
  1002. printk(KERN_ERR "Link List already submitted\n");
  1003. return -EINVAL;
  1004. }
  1005. retval = bridge->dma_list_add(list, src, dest, count);
  1006. mutex_unlock(&list->mtx);
  1007. return retval;
  1008. }
  1009. EXPORT_SYMBOL(vme_dma_list_add);
  1010. /**
  1011. * vme_dma_list_exec - Queue a VME DMA list for execution.
  1012. * @list: Pointer to VME list.
  1013. *
  1014. * Queue the provided VME DMA list for execution. The call will return once the
  1015. * list has been executed.
  1016. *
  1017. * Return: Zero on success, -EINVAL if operation is not supported on this
  1018. * device. Hardware specific errors also possible.
  1019. */
  1020. int vme_dma_list_exec(struct vme_dma_list *list)
  1021. {
  1022. struct vme_bridge *bridge = list->parent->parent;
  1023. int retval;
  1024. if (bridge->dma_list_exec == NULL) {
  1025. printk(KERN_ERR "Link List DMA execution not supported\n");
  1026. return -EINVAL;
  1027. }
  1028. mutex_lock(&list->mtx);
  1029. retval = bridge->dma_list_exec(list);
  1030. mutex_unlock(&list->mtx);
  1031. return retval;
  1032. }
  1033. EXPORT_SYMBOL(vme_dma_list_exec);
  1034. /**
  1035. * vme_dma_list_free - Free a VME DMA list.
  1036. * @list: Pointer to VME list.
  1037. *
  1038. * Free the provided DMA list and all its entries.
  1039. *
  1040. * Return: Zero on success, -EINVAL on invalid VME resource, -EBUSY if resource
  1041. * is still in use. Hardware specific errors also possible.
  1042. */
  1043. int vme_dma_list_free(struct vme_dma_list *list)
  1044. {
  1045. struct vme_bridge *bridge = list->parent->parent;
  1046. int retval;
  1047. if (bridge->dma_list_empty == NULL) {
  1048. printk(KERN_WARNING "Emptying of Link Lists not supported\n");
  1049. return -EINVAL;
  1050. }
  1051. if (!mutex_trylock(&list->mtx)) {
  1052. printk(KERN_ERR "Link List in use\n");
  1053. return -EINVAL;
  1054. }
  1055. /*
  1056. * Empty out all of the entries from the DMA list. We need to go to the
  1057. * low level driver as DMA entries are driver specific.
  1058. */
  1059. retval = bridge->dma_list_empty(list);
  1060. if (retval) {
  1061. printk(KERN_ERR "Unable to empty link-list entries\n");
  1062. mutex_unlock(&list->mtx);
  1063. return retval;
  1064. }
  1065. mutex_unlock(&list->mtx);
  1066. kfree(list);
  1067. return retval;
  1068. }
  1069. EXPORT_SYMBOL(vme_dma_list_free);
  1070. /**
  1071. * vme_dma_free - Free a VME DMA resource.
  1072. * @resource: Pointer to VME DMA resource.
  1073. *
  1074. * Free the provided DMA resource so that it may be reallocated.
  1075. *
  1076. * Return: Zero on success, -EINVAL on invalid VME resource, -EBUSY if resource
  1077. * is still active.
  1078. */
  1079. int vme_dma_free(struct vme_resource *resource)
  1080. {
  1081. struct vme_dma_resource *ctrlr;
  1082. if (resource->type != VME_DMA) {
  1083. printk(KERN_ERR "Not a DMA resource\n");
  1084. return -EINVAL;
  1085. }
  1086. ctrlr = list_entry(resource->entry, struct vme_dma_resource, list);
  1087. if (!mutex_trylock(&ctrlr->mtx)) {
  1088. printk(KERN_ERR "Resource busy, can't free\n");
  1089. return -EBUSY;
  1090. }
  1091. if (!(list_empty(&ctrlr->pending) && list_empty(&ctrlr->running))) {
  1092. printk(KERN_WARNING "Resource still processing transfers\n");
  1093. mutex_unlock(&ctrlr->mtx);
  1094. return -EBUSY;
  1095. }
  1096. ctrlr->locked = 0;
  1097. mutex_unlock(&ctrlr->mtx);
  1098. kfree(resource);
  1099. return 0;
  1100. }
  1101. EXPORT_SYMBOL(vme_dma_free);
  1102. void vme_bus_error_handler(struct vme_bridge *bridge,
  1103. unsigned long long address, int am)
  1104. {
  1105. struct list_head *handler_pos = NULL;
  1106. struct vme_error_handler *handler;
  1107. int handler_triggered = 0;
  1108. u32 aspace = vme_get_aspace(am);
  1109. list_for_each(handler_pos, &bridge->vme_error_handlers) {
  1110. handler = list_entry(handler_pos, struct vme_error_handler,
  1111. list);
  1112. if ((aspace == handler->aspace) &&
  1113. (address >= handler->start) &&
  1114. (address < handler->end)) {
  1115. if (!handler->num_errors)
  1116. handler->first_error = address;
  1117. if (handler->num_errors != UINT_MAX)
  1118. handler->num_errors++;
  1119. handler_triggered = 1;
  1120. }
  1121. }
  1122. if (!handler_triggered)
  1123. dev_err(bridge->parent,
  1124. "Unhandled VME access error at address 0x%llx\n",
  1125. address);
  1126. }
  1127. EXPORT_SYMBOL(vme_bus_error_handler);
  1128. struct vme_error_handler *vme_register_error_handler(
  1129. struct vme_bridge *bridge, u32 aspace,
  1130. unsigned long long address, size_t len)
  1131. {
  1132. struct vme_error_handler *handler;
  1133. handler = kmalloc(sizeof(*handler), GFP_KERNEL);
  1134. if (!handler)
  1135. return NULL;
  1136. handler->aspace = aspace;
  1137. handler->start = address;
  1138. handler->end = address + len;
  1139. handler->num_errors = 0;
  1140. handler->first_error = 0;
  1141. list_add_tail(&handler->list, &bridge->vme_error_handlers);
  1142. return handler;
  1143. }
  1144. EXPORT_SYMBOL(vme_register_error_handler);
  1145. void vme_unregister_error_handler(struct vme_error_handler *handler)
  1146. {
  1147. list_del(&handler->list);
  1148. kfree(handler);
  1149. }
  1150. EXPORT_SYMBOL(vme_unregister_error_handler);
  1151. void vme_irq_handler(struct vme_bridge *bridge, int level, int statid)
  1152. {
  1153. void (*call)(int, int, void *);
  1154. void *priv_data;
  1155. call = bridge->irq[level - 1].callback[statid].func;
  1156. priv_data = bridge->irq[level - 1].callback[statid].priv_data;
  1157. if (call != NULL)
  1158. call(level, statid, priv_data);
  1159. else
  1160. printk(KERN_WARNING "Spurious VME interrupt, level:%x, vector:%x\n",
  1161. level, statid);
  1162. }
  1163. EXPORT_SYMBOL(vme_irq_handler);
  1164. /**
  1165. * vme_irq_request - Request a specific VME interrupt.
  1166. * @vdev: Pointer to VME device struct vme_dev assigned to driver instance.
  1167. * @level: Interrupt priority being requested.
  1168. * @statid: Interrupt vector being requested.
  1169. * @callback: Pointer to callback function called when VME interrupt/vector
  1170. * received.
  1171. * @priv_data: Generic pointer that will be passed to the callback function.
  1172. *
  1173. * Request callback to be attached as a handler for VME interrupts with provided
  1174. * level and statid.
  1175. *
  1176. * Return: Zero on success, -EINVAL on invalid vme device, level or if the
  1177. * function is not supported, -EBUSY if the level/statid combination is
  1178. * already in use. Hardware specific errors also possible.
  1179. */
  1180. int vme_irq_request(struct vme_dev *vdev, int level, int statid,
  1181. void (*callback)(int, int, void *),
  1182. void *priv_data)
  1183. {
  1184. struct vme_bridge *bridge;
  1185. bridge = vdev->bridge;
  1186. if (bridge == NULL) {
  1187. printk(KERN_ERR "Can't find VME bus\n");
  1188. return -EINVAL;
  1189. }
  1190. if ((level < 1) || (level > 7)) {
  1191. printk(KERN_ERR "Invalid interrupt level\n");
  1192. return -EINVAL;
  1193. }
  1194. if (bridge->irq_set == NULL) {
  1195. printk(KERN_ERR "Configuring interrupts not supported\n");
  1196. return -EINVAL;
  1197. }
  1198. mutex_lock(&bridge->irq_mtx);
  1199. if (bridge->irq[level - 1].callback[statid].func) {
  1200. mutex_unlock(&bridge->irq_mtx);
  1201. printk(KERN_WARNING "VME Interrupt already taken\n");
  1202. return -EBUSY;
  1203. }
  1204. bridge->irq[level - 1].count++;
  1205. bridge->irq[level - 1].callback[statid].priv_data = priv_data;
  1206. bridge->irq[level - 1].callback[statid].func = callback;
  1207. /* Enable IRQ level */
  1208. bridge->irq_set(bridge, level, 1, 1);
  1209. mutex_unlock(&bridge->irq_mtx);
  1210. return 0;
  1211. }
  1212. EXPORT_SYMBOL(vme_irq_request);
  1213. /**
  1214. * vme_irq_free - Free a VME interrupt.
  1215. * @vdev: Pointer to VME device struct vme_dev assigned to driver instance.
  1216. * @level: Interrupt priority of interrupt being freed.
  1217. * @statid: Interrupt vector of interrupt being freed.
  1218. *
  1219. * Remove previously attached callback from VME interrupt priority/vector.
  1220. */
  1221. void vme_irq_free(struct vme_dev *vdev, int level, int statid)
  1222. {
  1223. struct vme_bridge *bridge;
  1224. bridge = vdev->bridge;
  1225. if (bridge == NULL) {
  1226. printk(KERN_ERR "Can't find VME bus\n");
  1227. return;
  1228. }
  1229. if ((level < 1) || (level > 7)) {
  1230. printk(KERN_ERR "Invalid interrupt level\n");
  1231. return;
  1232. }
  1233. if (bridge->irq_set == NULL) {
  1234. printk(KERN_ERR "Configuring interrupts not supported\n");
  1235. return;
  1236. }
  1237. mutex_lock(&bridge->irq_mtx);
  1238. bridge->irq[level - 1].count--;
  1239. /* Disable IRQ level if no more interrupts attached at this level*/
  1240. if (bridge->irq[level - 1].count == 0)
  1241. bridge->irq_set(bridge, level, 0, 1);
  1242. bridge->irq[level - 1].callback[statid].func = NULL;
  1243. bridge->irq[level - 1].callback[statid].priv_data = NULL;
  1244. mutex_unlock(&bridge->irq_mtx);
  1245. }
  1246. EXPORT_SYMBOL(vme_irq_free);
  1247. /**
  1248. * vme_irq_generate - Generate VME interrupt.
  1249. * @vdev: Pointer to VME device struct vme_dev assigned to driver instance.
  1250. * @level: Interrupt priority at which to assert the interrupt.
  1251. * @statid: Interrupt vector to associate with the interrupt.
  1252. *
  1253. * Generate a VME interrupt of the provided level and with the provided
  1254. * statid.
  1255. *
  1256. * Return: Zero on success, -EINVAL on invalid vme device, level or if the
  1257. * function is not supported. Hardware specific errors also possible.
  1258. */
  1259. int vme_irq_generate(struct vme_dev *vdev, int level, int statid)
  1260. {
  1261. struct vme_bridge *bridge;
  1262. bridge = vdev->bridge;
  1263. if (bridge == NULL) {
  1264. printk(KERN_ERR "Can't find VME bus\n");
  1265. return -EINVAL;
  1266. }
  1267. if ((level < 1) || (level > 7)) {
  1268. printk(KERN_WARNING "Invalid interrupt level\n");
  1269. return -EINVAL;
  1270. }
  1271. if (bridge->irq_generate == NULL) {
  1272. printk(KERN_WARNING "Interrupt generation not supported\n");
  1273. return -EINVAL;
  1274. }
  1275. return bridge->irq_generate(bridge, level, statid);
  1276. }
  1277. EXPORT_SYMBOL(vme_irq_generate);
  1278. /**
  1279. * vme_lm_request - Request a VME location monitor
  1280. * @vdev: Pointer to VME device struct vme_dev assigned to driver instance.
  1281. *
  1282. * Allocate a location monitor resource to the driver. A location monitor
  1283. * allows the driver to monitor accesses to a contiguous number of
  1284. * addresses on the VME bus.
  1285. *
  1286. * Return: Pointer to a VME resource on success or NULL on failure.
  1287. */
  1288. struct vme_resource *vme_lm_request(struct vme_dev *vdev)
  1289. {
  1290. struct vme_bridge *bridge;
  1291. struct list_head *lm_pos = NULL;
  1292. struct vme_lm_resource *allocated_lm = NULL;
  1293. struct vme_lm_resource *lm = NULL;
  1294. struct vme_resource *resource = NULL;
  1295. bridge = vdev->bridge;
  1296. if (bridge == NULL) {
  1297. printk(KERN_ERR "Can't find VME bus\n");
  1298. goto err_bus;
  1299. }
  1300. /* Loop through LM resources */
  1301. list_for_each(lm_pos, &bridge->lm_resources) {
  1302. lm = list_entry(lm_pos,
  1303. struct vme_lm_resource, list);
  1304. if (lm == NULL) {
  1305. printk(KERN_ERR "Registered NULL Location Monitor resource\n");
  1306. continue;
  1307. }
  1308. /* Find an unlocked controller */
  1309. mutex_lock(&lm->mtx);
  1310. if (lm->locked == 0) {
  1311. lm->locked = 1;
  1312. mutex_unlock(&lm->mtx);
  1313. allocated_lm = lm;
  1314. break;
  1315. }
  1316. mutex_unlock(&lm->mtx);
  1317. }
  1318. /* Check to see if we found a resource */
  1319. if (allocated_lm == NULL)
  1320. goto err_lm;
  1321. resource = kmalloc(sizeof(struct vme_resource), GFP_KERNEL);
  1322. if (resource == NULL) {
  1323. printk(KERN_ERR "Unable to allocate resource structure\n");
  1324. goto err_alloc;
  1325. }
  1326. resource->type = VME_LM;
  1327. resource->entry = &allocated_lm->list;
  1328. return resource;
  1329. err_alloc:
  1330. /* Unlock image */
  1331. mutex_lock(&lm->mtx);
  1332. lm->locked = 0;
  1333. mutex_unlock(&lm->mtx);
  1334. err_lm:
  1335. err_bus:
  1336. return NULL;
  1337. }
  1338. EXPORT_SYMBOL(vme_lm_request);
  1339. /**
  1340. * vme_lm_count - Determine number of VME Addresses monitored
  1341. * @resource: Pointer to VME location monitor resource.
  1342. *
  1343. * The number of contiguous addresses monitored is hardware dependent.
  1344. * Return the number of contiguous addresses monitored by the
  1345. * location monitor.
  1346. *
  1347. * Return: Count of addresses monitored or -EINVAL when provided with an
  1348. * invalid location monitor resource.
  1349. */
  1350. int vme_lm_count(struct vme_resource *resource)
  1351. {
  1352. struct vme_lm_resource *lm;
  1353. if (resource->type != VME_LM) {
  1354. printk(KERN_ERR "Not a Location Monitor resource\n");
  1355. return -EINVAL;
  1356. }
  1357. lm = list_entry(resource->entry, struct vme_lm_resource, list);
  1358. return lm->monitors;
  1359. }
  1360. EXPORT_SYMBOL(vme_lm_count);
  1361. /**
  1362. * vme_lm_set - Configure location monitor
  1363. * @resource: Pointer to VME location monitor resource.
  1364. * @lm_base: Base address to monitor.
  1365. * @aspace: VME address space to monitor.
  1366. * @cycle: VME bus cycle type to monitor.
  1367. *
  1368. * Set the base address, address space and cycle type of accesses to be
  1369. * monitored by the location monitor.
  1370. *
  1371. * Return: Zero on success, -EINVAL when provided with an invalid location
  1372. * monitor resource or function is not supported. Hardware specific
  1373. * errors may also be returned.
  1374. */
  1375. int vme_lm_set(struct vme_resource *resource, unsigned long long lm_base,
  1376. u32 aspace, u32 cycle)
  1377. {
  1378. struct vme_bridge *bridge = find_bridge(resource);
  1379. struct vme_lm_resource *lm;
  1380. if (resource->type != VME_LM) {
  1381. printk(KERN_ERR "Not a Location Monitor resource\n");
  1382. return -EINVAL;
  1383. }
  1384. lm = list_entry(resource->entry, struct vme_lm_resource, list);
  1385. if (bridge->lm_set == NULL) {
  1386. printk(KERN_ERR "vme_lm_set not supported\n");
  1387. return -EINVAL;
  1388. }
  1389. return bridge->lm_set(lm, lm_base, aspace, cycle);
  1390. }
  1391. EXPORT_SYMBOL(vme_lm_set);
  1392. /**
  1393. * vme_lm_get - Retrieve location monitor settings
  1394. * @resource: Pointer to VME location monitor resource.
  1395. * @lm_base: Pointer used to output the base address monitored.
  1396. * @aspace: Pointer used to output the address space monitored.
  1397. * @cycle: Pointer used to output the VME bus cycle type monitored.
  1398. *
  1399. * Retrieve the base address, address space and cycle type of accesses to
  1400. * be monitored by the location monitor.
  1401. *
  1402. * Return: Zero on success, -EINVAL when provided with an invalid location
  1403. * monitor resource or function is not supported. Hardware specific
  1404. * errors may also be returned.
  1405. */
  1406. int vme_lm_get(struct vme_resource *resource, unsigned long long *lm_base,
  1407. u32 *aspace, u32 *cycle)
  1408. {
  1409. struct vme_bridge *bridge = find_bridge(resource);
  1410. struct vme_lm_resource *lm;
  1411. if (resource->type != VME_LM) {
  1412. printk(KERN_ERR "Not a Location Monitor resource\n");
  1413. return -EINVAL;
  1414. }
  1415. lm = list_entry(resource->entry, struct vme_lm_resource, list);
  1416. if (bridge->lm_get == NULL) {
  1417. printk(KERN_ERR "vme_lm_get not supported\n");
  1418. return -EINVAL;
  1419. }
  1420. return bridge->lm_get(lm, lm_base, aspace, cycle);
  1421. }
  1422. EXPORT_SYMBOL(vme_lm_get);
  1423. /**
  1424. * vme_lm_attach - Provide callback for location monitor address
  1425. * @resource: Pointer to VME location monitor resource.
  1426. * @monitor: Offset to which callback should be attached.
  1427. * @callback: Pointer to callback function called when triggered.
  1428. * @data: Generic pointer that will be passed to the callback function.
  1429. *
  1430. * Attach a callback to the specificed offset into the location monitors
  1431. * monitored addresses. A generic pointer is provided to allow data to be
  1432. * passed to the callback when called.
  1433. *
  1434. * Return: Zero on success, -EINVAL when provided with an invalid location
  1435. * monitor resource or function is not supported. Hardware specific
  1436. * errors may also be returned.
  1437. */
  1438. int vme_lm_attach(struct vme_resource *resource, int monitor,
  1439. void (*callback)(void *), void *data)
  1440. {
  1441. struct vme_bridge *bridge = find_bridge(resource);
  1442. struct vme_lm_resource *lm;
  1443. if (resource->type != VME_LM) {
  1444. printk(KERN_ERR "Not a Location Monitor resource\n");
  1445. return -EINVAL;
  1446. }
  1447. lm = list_entry(resource->entry, struct vme_lm_resource, list);
  1448. if (bridge->lm_attach == NULL) {
  1449. printk(KERN_ERR "vme_lm_attach not supported\n");
  1450. return -EINVAL;
  1451. }
  1452. return bridge->lm_attach(lm, monitor, callback, data);
  1453. }
  1454. EXPORT_SYMBOL(vme_lm_attach);
  1455. /**
  1456. * vme_lm_detach - Remove callback for location monitor address
  1457. * @resource: Pointer to VME location monitor resource.
  1458. * @monitor: Offset to which callback should be removed.
  1459. *
  1460. * Remove the callback associated with the specificed offset into the
  1461. * location monitors monitored addresses.
  1462. *
  1463. * Return: Zero on success, -EINVAL when provided with an invalid location
  1464. * monitor resource or function is not supported. Hardware specific
  1465. * errors may also be returned.
  1466. */
  1467. int vme_lm_detach(struct vme_resource *resource, int monitor)
  1468. {
  1469. struct vme_bridge *bridge = find_bridge(resource);
  1470. struct vme_lm_resource *lm;
  1471. if (resource->type != VME_LM) {
  1472. printk(KERN_ERR "Not a Location Monitor resource\n");
  1473. return -EINVAL;
  1474. }
  1475. lm = list_entry(resource->entry, struct vme_lm_resource, list);
  1476. if (bridge->lm_detach == NULL) {
  1477. printk(KERN_ERR "vme_lm_detach not supported\n");
  1478. return -EINVAL;
  1479. }
  1480. return bridge->lm_detach(lm, monitor);
  1481. }
  1482. EXPORT_SYMBOL(vme_lm_detach);
  1483. /**
  1484. * vme_lm_free - Free allocated VME location monitor
  1485. * @resource: Pointer to VME location monitor resource.
  1486. *
  1487. * Free allocation of a VME location monitor.
  1488. *
  1489. * WARNING: This function currently expects that any callbacks that have
  1490. * been attached to the location monitor have been removed.
  1491. *
  1492. * Return: Zero on success, -EINVAL when provided with an invalid location
  1493. * monitor resource.
  1494. */
  1495. void vme_lm_free(struct vme_resource *resource)
  1496. {
  1497. struct vme_lm_resource *lm;
  1498. if (resource->type != VME_LM) {
  1499. printk(KERN_ERR "Not a Location Monitor resource\n");
  1500. return;
  1501. }
  1502. lm = list_entry(resource->entry, struct vme_lm_resource, list);
  1503. mutex_lock(&lm->mtx);
  1504. /* XXX
  1505. * Check to see that there aren't any callbacks still attached, if
  1506. * there are we should probably be detaching them!
  1507. */
  1508. lm->locked = 0;
  1509. mutex_unlock(&lm->mtx);
  1510. kfree(resource);
  1511. }
  1512. EXPORT_SYMBOL(vme_lm_free);
  1513. /**
  1514. * vme_slot_num - Retrieve slot ID
  1515. * @vdev: Pointer to VME device struct vme_dev assigned to driver instance.
  1516. *
  1517. * Retrieve the slot ID associated with the provided VME device.
  1518. *
  1519. * Return: The slot ID on success, -EINVAL if VME bridge cannot be determined
  1520. * or the function is not supported. Hardware specific errors may also
  1521. * be returned.
  1522. */
  1523. int vme_slot_num(struct vme_dev *vdev)
  1524. {
  1525. struct vme_bridge *bridge;
  1526. bridge = vdev->bridge;
  1527. if (bridge == NULL) {
  1528. printk(KERN_ERR "Can't find VME bus\n");
  1529. return -EINVAL;
  1530. }
  1531. if (bridge->slot_get == NULL) {
  1532. printk(KERN_WARNING "vme_slot_num not supported\n");
  1533. return -EINVAL;
  1534. }
  1535. return bridge->slot_get(bridge);
  1536. }
  1537. EXPORT_SYMBOL(vme_slot_num);
  1538. /**
  1539. * vme_bus_num - Retrieve bus number
  1540. * @vdev: Pointer to VME device struct vme_dev assigned to driver instance.
  1541. *
  1542. * Retrieve the bus enumeration associated with the provided VME device.
  1543. *
  1544. * Return: The bus number on success, -EINVAL if VME bridge cannot be
  1545. * determined.
  1546. */
  1547. int vme_bus_num(struct vme_dev *vdev)
  1548. {
  1549. struct vme_bridge *bridge;
  1550. bridge = vdev->bridge;
  1551. if (bridge == NULL) {
  1552. pr_err("Can't find VME bus\n");
  1553. return -EINVAL;
  1554. }
  1555. return bridge->num;
  1556. }
  1557. EXPORT_SYMBOL(vme_bus_num);
  1558. /* - Bridge Registration --------------------------------------------------- */
  1559. static void vme_dev_release(struct device *dev)
  1560. {
  1561. kfree(dev_to_vme_dev(dev));
  1562. }
  1563. /* Common bridge initialization */
  1564. struct vme_bridge *vme_init_bridge(struct vme_bridge *bridge)
  1565. {
  1566. INIT_LIST_HEAD(&bridge->vme_error_handlers);
  1567. INIT_LIST_HEAD(&bridge->master_resources);
  1568. INIT_LIST_HEAD(&bridge->slave_resources);
  1569. INIT_LIST_HEAD(&bridge->dma_resources);
  1570. INIT_LIST_HEAD(&bridge->lm_resources);
  1571. mutex_init(&bridge->irq_mtx);
  1572. return bridge;
  1573. }
  1574. EXPORT_SYMBOL(vme_init_bridge);
  1575. int vme_register_bridge(struct vme_bridge *bridge)
  1576. {
  1577. int i;
  1578. int ret = -1;
  1579. mutex_lock(&vme_buses_lock);
  1580. for (i = 0; i < sizeof(vme_bus_numbers) * 8; i++) {
  1581. if ((vme_bus_numbers & (1 << i)) == 0) {
  1582. vme_bus_numbers |= (1 << i);
  1583. bridge->num = i;
  1584. INIT_LIST_HEAD(&bridge->devices);
  1585. list_add_tail(&bridge->bus_list, &vme_bus_list);
  1586. ret = 0;
  1587. break;
  1588. }
  1589. }
  1590. mutex_unlock(&vme_buses_lock);
  1591. return ret;
  1592. }
  1593. EXPORT_SYMBOL(vme_register_bridge);
  1594. void vme_unregister_bridge(struct vme_bridge *bridge)
  1595. {
  1596. struct vme_dev *vdev;
  1597. struct vme_dev *tmp;
  1598. mutex_lock(&vme_buses_lock);
  1599. vme_bus_numbers &= ~(1 << bridge->num);
  1600. list_for_each_entry_safe(vdev, tmp, &bridge->devices, bridge_list) {
  1601. list_del(&vdev->drv_list);
  1602. list_del(&vdev->bridge_list);
  1603. device_unregister(&vdev->dev);
  1604. }
  1605. list_del(&bridge->bus_list);
  1606. mutex_unlock(&vme_buses_lock);
  1607. }
  1608. EXPORT_SYMBOL(vme_unregister_bridge);
  1609. /* - Driver Registration --------------------------------------------------- */
  1610. static int __vme_register_driver_bus(struct vme_driver *drv,
  1611. struct vme_bridge *bridge, unsigned int ndevs)
  1612. {
  1613. int err;
  1614. unsigned int i;
  1615. struct vme_dev *vdev;
  1616. struct vme_dev *tmp;
  1617. for (i = 0; i < ndevs; i++) {
  1618. vdev = kzalloc(sizeof(struct vme_dev), GFP_KERNEL);
  1619. if (!vdev) {
  1620. err = -ENOMEM;
  1621. goto err_devalloc;
  1622. }
  1623. vdev->num = i;
  1624. vdev->bridge = bridge;
  1625. vdev->dev.platform_data = drv;
  1626. vdev->dev.release = vme_dev_release;
  1627. vdev->dev.parent = bridge->parent;
  1628. vdev->dev.bus = &vme_bus_type;
  1629. dev_set_name(&vdev->dev, "%s.%u-%u", drv->name, bridge->num,
  1630. vdev->num);
  1631. err = device_register(&vdev->dev);
  1632. if (err)
  1633. goto err_reg;
  1634. if (vdev->dev.platform_data) {
  1635. list_add_tail(&vdev->drv_list, &drv->devices);
  1636. list_add_tail(&vdev->bridge_list, &bridge->devices);
  1637. } else
  1638. device_unregister(&vdev->dev);
  1639. }
  1640. return 0;
  1641. err_reg:
  1642. put_device(&vdev->dev);
  1643. kfree(vdev);
  1644. err_devalloc:
  1645. list_for_each_entry_safe(vdev, tmp, &drv->devices, drv_list) {
  1646. list_del(&vdev->drv_list);
  1647. list_del(&vdev->bridge_list);
  1648. device_unregister(&vdev->dev);
  1649. }
  1650. return err;
  1651. }
  1652. static int __vme_register_driver(struct vme_driver *drv, unsigned int ndevs)
  1653. {
  1654. struct vme_bridge *bridge;
  1655. int err = 0;
  1656. mutex_lock(&vme_buses_lock);
  1657. list_for_each_entry(bridge, &vme_bus_list, bus_list) {
  1658. /*
  1659. * This cannot cause trouble as we already have vme_buses_lock
  1660. * and if the bridge is removed, it will have to go through
  1661. * vme_unregister_bridge() to do it (which calls remove() on
  1662. * the bridge which in turn tries to acquire vme_buses_lock and
  1663. * will have to wait).
  1664. */
  1665. err = __vme_register_driver_bus(drv, bridge, ndevs);
  1666. if (err)
  1667. break;
  1668. }
  1669. mutex_unlock(&vme_buses_lock);
  1670. return err;
  1671. }
  1672. /**
  1673. * vme_register_driver - Register a VME driver
  1674. * @drv: Pointer to VME driver structure to register.
  1675. * @ndevs: Maximum number of devices to allow to be enumerated.
  1676. *
  1677. * Register a VME device driver with the VME subsystem.
  1678. *
  1679. * Return: Zero on success, error value on registration failure.
  1680. */
  1681. int vme_register_driver(struct vme_driver *drv, unsigned int ndevs)
  1682. {
  1683. int err;
  1684. drv->driver.name = drv->name;
  1685. drv->driver.bus = &vme_bus_type;
  1686. INIT_LIST_HEAD(&drv->devices);
  1687. err = driver_register(&drv->driver);
  1688. if (err)
  1689. return err;
  1690. err = __vme_register_driver(drv, ndevs);
  1691. if (err)
  1692. driver_unregister(&drv->driver);
  1693. return err;
  1694. }
  1695. EXPORT_SYMBOL(vme_register_driver);
  1696. /**
  1697. * vme_unregister_driver - Unregister a VME driver
  1698. * @drv: Pointer to VME driver structure to unregister.
  1699. *
  1700. * Unregister a VME device driver from the VME subsystem.
  1701. */
  1702. void vme_unregister_driver(struct vme_driver *drv)
  1703. {
  1704. struct vme_dev *dev, *dev_tmp;
  1705. mutex_lock(&vme_buses_lock);
  1706. list_for_each_entry_safe(dev, dev_tmp, &drv->devices, drv_list) {
  1707. list_del(&dev->drv_list);
  1708. list_del(&dev->bridge_list);
  1709. device_unregister(&dev->dev);
  1710. }
  1711. mutex_unlock(&vme_buses_lock);
  1712. driver_unregister(&drv->driver);
  1713. }
  1714. EXPORT_SYMBOL(vme_unregister_driver);
  1715. /* - Bus Registration ------------------------------------------------------ */
  1716. static int vme_bus_match(struct device *dev, struct device_driver *drv)
  1717. {
  1718. struct vme_driver *vme_drv;
  1719. vme_drv = container_of(drv, struct vme_driver, driver);
  1720. if (dev->platform_data == vme_drv) {
  1721. struct vme_dev *vdev = dev_to_vme_dev(dev);
  1722. if (vme_drv->match && vme_drv->match(vdev))
  1723. return 1;
  1724. dev->platform_data = NULL;
  1725. }
  1726. return 0;
  1727. }
  1728. static int vme_bus_probe(struct device *dev)
  1729. {
  1730. int retval = -ENODEV;
  1731. struct vme_driver *driver;
  1732. struct vme_dev *vdev = dev_to_vme_dev(dev);
  1733. driver = dev->platform_data;
  1734. if (driver->probe != NULL)
  1735. retval = driver->probe(vdev);
  1736. return retval;
  1737. }
  1738. static int vme_bus_remove(struct device *dev)
  1739. {
  1740. int retval = -ENODEV;
  1741. struct vme_driver *driver;
  1742. struct vme_dev *vdev = dev_to_vme_dev(dev);
  1743. driver = dev->platform_data;
  1744. if (driver->remove != NULL)
  1745. retval = driver->remove(vdev);
  1746. return retval;
  1747. }
  1748. struct bus_type vme_bus_type = {
  1749. .name = "vme",
  1750. .match = vme_bus_match,
  1751. .probe = vme_bus_probe,
  1752. .remove = vme_bus_remove,
  1753. };
  1754. EXPORT_SYMBOL(vme_bus_type);
  1755. static int __init vme_init(void)
  1756. {
  1757. return bus_register(&vme_bus_type);
  1758. }
  1759. subsys_initcall(vme_init);