fw-cdev.c 28 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153
  1. /*
  2. * Char device for device raw access
  3. *
  4. * Copyright (C) 2005-2007 Kristian Hoegsberg <krh@bitplanet.net>
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License as published by
  8. * the Free Software Foundation; either version 2 of the License, or
  9. * (at your option) any later version.
  10. *
  11. * This program is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  14. * GNU General Public License for more details.
  15. *
  16. * You should have received a copy of the GNU General Public License
  17. * along with this program; if not, write to the Free Software Foundation,
  18. * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
  19. */
  20. #include <linux/module.h>
  21. #include <linux/kernel.h>
  22. #include <linux/kref.h>
  23. #include <linux/wait.h>
  24. #include <linux/errno.h>
  25. #include <linux/device.h>
  26. #include <linux/vmalloc.h>
  27. #include <linux/mutex.h>
  28. #include <linux/poll.h>
  29. #include <linux/preempt.h>
  30. #include <linux/time.h>
  31. #include <linux/spinlock.h>
  32. #include <linux/delay.h>
  33. #include <linux/mm.h>
  34. #include <linux/idr.h>
  35. #include <linux/compat.h>
  36. #include <linux/firewire-cdev.h>
  37. #include <asm/system.h>
  38. #include <asm/uaccess.h>
  39. #include "fw-transaction.h"
  40. #include "fw-topology.h"
  41. #include "fw-device.h"
  42. struct client;
  43. struct client_resource;
  44. typedef void (*client_resource_release_fn_t)(struct client *,
  45. struct client_resource *);
  46. struct client_resource {
  47. client_resource_release_fn_t release;
  48. int handle;
  49. };
  50. /*
  51. * dequeue_event() just kfree()'s the event, so the event has to be
  52. * the first field in the struct.
  53. */
  54. struct event {
  55. struct { void *data; size_t size; } v[2];
  56. struct list_head link;
  57. };
  58. struct bus_reset {
  59. struct event event;
  60. struct fw_cdev_event_bus_reset reset;
  61. };
  62. struct response {
  63. struct event event;
  64. struct fw_transaction transaction;
  65. struct client *client;
  66. struct client_resource resource;
  67. struct fw_cdev_event_response response;
  68. };
  69. struct iso_interrupt {
  70. struct event event;
  71. struct fw_cdev_event_iso_interrupt interrupt;
  72. };
  73. struct client {
  74. u32 version;
  75. struct fw_device *device;
  76. spinlock_t lock;
  77. bool in_shutdown;
  78. struct idr resource_idr;
  79. struct list_head event_list;
  80. wait_queue_head_t wait;
  81. u64 bus_reset_closure;
  82. struct fw_iso_context *iso_context;
  83. u64 iso_closure;
  84. struct fw_iso_buffer buffer;
  85. unsigned long vm_start;
  86. struct list_head link;
  87. struct kref kref;
  88. };
  89. static inline void client_get(struct client *client)
  90. {
  91. kref_get(&client->kref);
  92. }
  93. static void client_release(struct kref *kref)
  94. {
  95. struct client *client = container_of(kref, struct client, kref);
  96. fw_device_put(client->device);
  97. kfree(client);
  98. }
  99. static void client_put(struct client *client)
  100. {
  101. kref_put(&client->kref, client_release);
  102. }
  103. static inline void __user *u64_to_uptr(__u64 value)
  104. {
  105. return (void __user *)(unsigned long)value;
  106. }
  107. static inline __u64 uptr_to_u64(void __user *ptr)
  108. {
  109. return (__u64)(unsigned long)ptr;
  110. }
  111. static int fw_device_op_open(struct inode *inode, struct file *file)
  112. {
  113. struct fw_device *device;
  114. struct client *client;
  115. device = fw_device_get_by_devt(inode->i_rdev);
  116. if (device == NULL)
  117. return -ENODEV;
  118. if (fw_device_is_shutdown(device)) {
  119. fw_device_put(device);
  120. return -ENODEV;
  121. }
  122. client = kzalloc(sizeof(*client), GFP_KERNEL);
  123. if (client == NULL) {
  124. fw_device_put(device);
  125. return -ENOMEM;
  126. }
  127. client->device = device;
  128. spin_lock_init(&client->lock);
  129. idr_init(&client->resource_idr);
  130. INIT_LIST_HEAD(&client->event_list);
  131. init_waitqueue_head(&client->wait);
  132. kref_init(&client->kref);
  133. file->private_data = client;
  134. mutex_lock(&device->client_list_mutex);
  135. list_add_tail(&client->link, &device->client_list);
  136. mutex_unlock(&device->client_list_mutex);
  137. return 0;
  138. }
  139. static void queue_event(struct client *client, struct event *event,
  140. void *data0, size_t size0, void *data1, size_t size1)
  141. {
  142. unsigned long flags;
  143. event->v[0].data = data0;
  144. event->v[0].size = size0;
  145. event->v[1].data = data1;
  146. event->v[1].size = size1;
  147. spin_lock_irqsave(&client->lock, flags);
  148. if (client->in_shutdown)
  149. kfree(event);
  150. else
  151. list_add_tail(&event->link, &client->event_list);
  152. spin_unlock_irqrestore(&client->lock, flags);
  153. wake_up_interruptible(&client->wait);
  154. }
  155. static int dequeue_event(struct client *client,
  156. char __user *buffer, size_t count)
  157. {
  158. unsigned long flags;
  159. struct event *event;
  160. size_t size, total;
  161. int i, ret;
  162. ret = wait_event_interruptible(client->wait,
  163. !list_empty(&client->event_list) ||
  164. fw_device_is_shutdown(client->device));
  165. if (ret < 0)
  166. return ret;
  167. if (list_empty(&client->event_list) &&
  168. fw_device_is_shutdown(client->device))
  169. return -ENODEV;
  170. spin_lock_irqsave(&client->lock, flags);
  171. event = list_first_entry(&client->event_list, struct event, link);
  172. list_del(&event->link);
  173. spin_unlock_irqrestore(&client->lock, flags);
  174. total = 0;
  175. for (i = 0; i < ARRAY_SIZE(event->v) && total < count; i++) {
  176. size = min(event->v[i].size, count - total);
  177. if (copy_to_user(buffer + total, event->v[i].data, size)) {
  178. ret = -EFAULT;
  179. goto out;
  180. }
  181. total += size;
  182. }
  183. ret = total;
  184. out:
  185. kfree(event);
  186. return ret;
  187. }
  188. static ssize_t fw_device_op_read(struct file *file, char __user *buffer,
  189. size_t count, loff_t *offset)
  190. {
  191. struct client *client = file->private_data;
  192. return dequeue_event(client, buffer, count);
  193. }
  194. static void fill_bus_reset_event(struct fw_cdev_event_bus_reset *event,
  195. struct client *client)
  196. {
  197. struct fw_card *card = client->device->card;
  198. unsigned long flags;
  199. spin_lock_irqsave(&card->lock, flags);
  200. event->closure = client->bus_reset_closure;
  201. event->type = FW_CDEV_EVENT_BUS_RESET;
  202. event->generation = client->device->generation;
  203. event->node_id = client->device->node_id;
  204. event->local_node_id = card->local_node->node_id;
  205. event->bm_node_id = 0; /* FIXME: We don't track the BM. */
  206. event->irm_node_id = card->irm_node->node_id;
  207. event->root_node_id = card->root_node->node_id;
  208. spin_unlock_irqrestore(&card->lock, flags);
  209. }
  210. static void for_each_client(struct fw_device *device,
  211. void (*callback)(struct client *client))
  212. {
  213. struct client *c;
  214. mutex_lock(&device->client_list_mutex);
  215. list_for_each_entry(c, &device->client_list, link)
  216. callback(c);
  217. mutex_unlock(&device->client_list_mutex);
  218. }
  219. static void queue_bus_reset_event(struct client *client)
  220. {
  221. struct bus_reset *bus_reset;
  222. bus_reset = kzalloc(sizeof(*bus_reset), GFP_KERNEL);
  223. if (bus_reset == NULL) {
  224. fw_notify("Out of memory when allocating bus reset event\n");
  225. return;
  226. }
  227. fill_bus_reset_event(&bus_reset->reset, client);
  228. queue_event(client, &bus_reset->event,
  229. &bus_reset->reset, sizeof(bus_reset->reset), NULL, 0);
  230. }
  231. void fw_device_cdev_update(struct fw_device *device)
  232. {
  233. for_each_client(device, queue_bus_reset_event);
  234. }
  235. static void wake_up_client(struct client *client)
  236. {
  237. wake_up_interruptible(&client->wait);
  238. }
  239. void fw_device_cdev_remove(struct fw_device *device)
  240. {
  241. for_each_client(device, wake_up_client);
  242. }
  243. static int ioctl_get_info(struct client *client, void *buffer)
  244. {
  245. struct fw_cdev_get_info *get_info = buffer;
  246. struct fw_cdev_event_bus_reset bus_reset;
  247. unsigned long ret = 0;
  248. client->version = get_info->version;
  249. get_info->version = FW_CDEV_VERSION;
  250. get_info->card = client->device->card->index;
  251. down_read(&fw_device_rwsem);
  252. if (get_info->rom != 0) {
  253. void __user *uptr = u64_to_uptr(get_info->rom);
  254. size_t want = get_info->rom_length;
  255. size_t have = client->device->config_rom_length * 4;
  256. ret = copy_to_user(uptr, client->device->config_rom,
  257. min(want, have));
  258. }
  259. get_info->rom_length = client->device->config_rom_length * 4;
  260. up_read(&fw_device_rwsem);
  261. if (ret != 0)
  262. return -EFAULT;
  263. client->bus_reset_closure = get_info->bus_reset_closure;
  264. if (get_info->bus_reset != 0) {
  265. void __user *uptr = u64_to_uptr(get_info->bus_reset);
  266. fill_bus_reset_event(&bus_reset, client);
  267. if (copy_to_user(uptr, &bus_reset, sizeof(bus_reset)))
  268. return -EFAULT;
  269. }
  270. return 0;
  271. }
  272. static int add_client_resource(struct client *client,
  273. struct client_resource *resource, gfp_t gfp_mask)
  274. {
  275. unsigned long flags;
  276. int ret;
  277. retry:
  278. if (idr_pre_get(&client->resource_idr, gfp_mask) == 0)
  279. return -ENOMEM;
  280. spin_lock_irqsave(&client->lock, flags);
  281. if (client->in_shutdown)
  282. ret = -ECANCELED;
  283. else
  284. ret = idr_get_new(&client->resource_idr, resource,
  285. &resource->handle);
  286. if (ret >= 0)
  287. client_get(client);
  288. spin_unlock_irqrestore(&client->lock, flags);
  289. if (ret == -EAGAIN)
  290. goto retry;
  291. return ret < 0 ? ret : 0;
  292. }
  293. static int release_client_resource(struct client *client, u32 handle,
  294. client_resource_release_fn_t release,
  295. struct client_resource **resource)
  296. {
  297. struct client_resource *r;
  298. unsigned long flags;
  299. spin_lock_irqsave(&client->lock, flags);
  300. if (client->in_shutdown)
  301. r = NULL;
  302. else
  303. r = idr_find(&client->resource_idr, handle);
  304. if (r && r->release == release)
  305. idr_remove(&client->resource_idr, handle);
  306. spin_unlock_irqrestore(&client->lock, flags);
  307. if (!(r && r->release == release))
  308. return -EINVAL;
  309. if (resource)
  310. *resource = r;
  311. else
  312. r->release(client, r);
  313. client_put(client);
  314. return 0;
  315. }
  316. static void release_transaction(struct client *client,
  317. struct client_resource *resource)
  318. {
  319. struct response *response =
  320. container_of(resource, struct response, resource);
  321. fw_cancel_transaction(client->device->card, &response->transaction);
  322. }
  323. static void complete_transaction(struct fw_card *card, int rcode,
  324. void *payload, size_t length, void *data)
  325. {
  326. struct response *response = data;
  327. struct client *client = response->client;
  328. unsigned long flags;
  329. struct fw_cdev_event_response *r = &response->response;
  330. if (length < r->length)
  331. r->length = length;
  332. if (rcode == RCODE_COMPLETE)
  333. memcpy(r->data, payload, r->length);
  334. spin_lock_irqsave(&client->lock, flags);
  335. /*
  336. * 1. If called while in shutdown, the idr tree must be left untouched.
  337. * The idr handle will be removed and the client reference will be
  338. * dropped later.
  339. * 2. If the call chain was release_client_resource ->
  340. * release_transaction -> complete_transaction (instead of a normal
  341. * conclusion of the transaction), i.e. if this resource was already
  342. * unregistered from the idr, the client reference will be dropped
  343. * by release_client_resource and we must not drop it here.
  344. */
  345. if (!client->in_shutdown &&
  346. idr_find(&client->resource_idr, response->resource.handle)) {
  347. idr_remove(&client->resource_idr, response->resource.handle);
  348. /* Drop the idr's reference */
  349. client_put(client);
  350. }
  351. spin_unlock_irqrestore(&client->lock, flags);
  352. r->type = FW_CDEV_EVENT_RESPONSE;
  353. r->rcode = rcode;
  354. /*
  355. * In the case that sizeof(*r) doesn't align with the position of the
  356. * data, and the read is short, preserve an extra copy of the data
  357. * to stay compatible with a pre-2.6.27 bug. Since the bug is harmless
  358. * for short reads and some apps depended on it, this is both safe
  359. * and prudent for compatibility.
  360. */
  361. if (r->length <= sizeof(*r) - offsetof(typeof(*r), data))
  362. queue_event(client, &response->event, r, sizeof(*r),
  363. r->data, r->length);
  364. else
  365. queue_event(client, &response->event, r, sizeof(*r) + r->length,
  366. NULL, 0);
  367. /* Drop the transaction callback's reference */
  368. client_put(client);
  369. }
  370. static int ioctl_send_request(struct client *client, void *buffer)
  371. {
  372. struct fw_device *device = client->device;
  373. struct fw_cdev_send_request *request = buffer;
  374. struct response *response;
  375. int ret;
  376. /* What is the biggest size we'll accept, really? */
  377. if (request->length > 4096)
  378. return -EINVAL;
  379. response = kmalloc(sizeof(*response) + request->length, GFP_KERNEL);
  380. if (response == NULL)
  381. return -ENOMEM;
  382. response->client = client;
  383. response->response.length = request->length;
  384. response->response.closure = request->closure;
  385. if (request->data &&
  386. copy_from_user(response->response.data,
  387. u64_to_uptr(request->data), request->length)) {
  388. ret = -EFAULT;
  389. goto failed;
  390. }
  391. switch (request->tcode) {
  392. case TCODE_WRITE_QUADLET_REQUEST:
  393. case TCODE_WRITE_BLOCK_REQUEST:
  394. case TCODE_READ_QUADLET_REQUEST:
  395. case TCODE_READ_BLOCK_REQUEST:
  396. case TCODE_LOCK_MASK_SWAP:
  397. case TCODE_LOCK_COMPARE_SWAP:
  398. case TCODE_LOCK_FETCH_ADD:
  399. case TCODE_LOCK_LITTLE_ADD:
  400. case TCODE_LOCK_BOUNDED_ADD:
  401. case TCODE_LOCK_WRAP_ADD:
  402. case TCODE_LOCK_VENDOR_DEPENDENT:
  403. break;
  404. default:
  405. ret = -EINVAL;
  406. goto failed;
  407. }
  408. response->resource.release = release_transaction;
  409. ret = add_client_resource(client, &response->resource, GFP_KERNEL);
  410. if (ret < 0)
  411. goto failed;
  412. /* Get a reference for the transaction callback */
  413. client_get(client);
  414. fw_send_request(device->card, &response->transaction,
  415. request->tcode & 0x1f,
  416. device->node->node_id,
  417. request->generation,
  418. device->max_speed,
  419. request->offset,
  420. response->response.data, request->length,
  421. complete_transaction, response);
  422. if (request->data)
  423. return sizeof(request) + request->length;
  424. else
  425. return sizeof(request);
  426. failed:
  427. kfree(response);
  428. return ret;
  429. }
  430. struct address_handler {
  431. struct fw_address_handler handler;
  432. __u64 closure;
  433. struct client *client;
  434. struct client_resource resource;
  435. };
  436. struct request {
  437. struct fw_request *request;
  438. void *data;
  439. size_t length;
  440. struct client_resource resource;
  441. };
  442. struct request_event {
  443. struct event event;
  444. struct fw_cdev_event_request request;
  445. };
  446. static void release_request(struct client *client,
  447. struct client_resource *resource)
  448. {
  449. struct request *request =
  450. container_of(resource, struct request, resource);
  451. fw_send_response(client->device->card, request->request,
  452. RCODE_CONFLICT_ERROR);
  453. kfree(request);
  454. }
  455. static void handle_request(struct fw_card *card, struct fw_request *r,
  456. int tcode, int destination, int source,
  457. int generation, int speed,
  458. unsigned long long offset,
  459. void *payload, size_t length, void *callback_data)
  460. {
  461. struct address_handler *handler = callback_data;
  462. struct request *request;
  463. struct request_event *e;
  464. struct client *client = handler->client;
  465. int ret;
  466. request = kmalloc(sizeof(*request), GFP_ATOMIC);
  467. e = kmalloc(sizeof(*e), GFP_ATOMIC);
  468. if (request == NULL || e == NULL)
  469. goto failed;
  470. request->request = r;
  471. request->data = payload;
  472. request->length = length;
  473. request->resource.release = release_request;
  474. ret = add_client_resource(client, &request->resource, GFP_ATOMIC);
  475. if (ret < 0)
  476. goto failed;
  477. e->request.type = FW_CDEV_EVENT_REQUEST;
  478. e->request.tcode = tcode;
  479. e->request.offset = offset;
  480. e->request.length = length;
  481. e->request.handle = request->resource.handle;
  482. e->request.closure = handler->closure;
  483. queue_event(client, &e->event,
  484. &e->request, sizeof(e->request), payload, length);
  485. return;
  486. failed:
  487. kfree(request);
  488. kfree(e);
  489. fw_send_response(card, r, RCODE_CONFLICT_ERROR);
  490. }
  491. static void release_address_handler(struct client *client,
  492. struct client_resource *resource)
  493. {
  494. struct address_handler *handler =
  495. container_of(resource, struct address_handler, resource);
  496. fw_core_remove_address_handler(&handler->handler);
  497. kfree(handler);
  498. }
  499. static int ioctl_allocate(struct client *client, void *buffer)
  500. {
  501. struct fw_cdev_allocate *request = buffer;
  502. struct address_handler *handler;
  503. struct fw_address_region region;
  504. int ret;
  505. handler = kmalloc(sizeof(*handler), GFP_KERNEL);
  506. if (handler == NULL)
  507. return -ENOMEM;
  508. region.start = request->offset;
  509. region.end = request->offset + request->length;
  510. handler->handler.length = request->length;
  511. handler->handler.address_callback = handle_request;
  512. handler->handler.callback_data = handler;
  513. handler->closure = request->closure;
  514. handler->client = client;
  515. ret = fw_core_add_address_handler(&handler->handler, &region);
  516. if (ret < 0) {
  517. kfree(handler);
  518. return ret;
  519. }
  520. handler->resource.release = release_address_handler;
  521. ret = add_client_resource(client, &handler->resource, GFP_KERNEL);
  522. if (ret < 0) {
  523. release_address_handler(client, &handler->resource);
  524. return ret;
  525. }
  526. request->handle = handler->resource.handle;
  527. return 0;
  528. }
  529. static int ioctl_deallocate(struct client *client, void *buffer)
  530. {
  531. struct fw_cdev_deallocate *request = buffer;
  532. return release_client_resource(client, request->handle,
  533. release_address_handler, NULL);
  534. }
  535. static int ioctl_send_response(struct client *client, void *buffer)
  536. {
  537. struct fw_cdev_send_response *request = buffer;
  538. struct client_resource *resource;
  539. struct request *r;
  540. if (release_client_resource(client, request->handle,
  541. release_request, &resource) < 0)
  542. return -EINVAL;
  543. r = container_of(resource, struct request, resource);
  544. if (request->length < r->length)
  545. r->length = request->length;
  546. if (copy_from_user(r->data, u64_to_uptr(request->data), r->length))
  547. return -EFAULT;
  548. fw_send_response(client->device->card, r->request, request->rcode);
  549. kfree(r);
  550. return 0;
  551. }
  552. static int ioctl_initiate_bus_reset(struct client *client, void *buffer)
  553. {
  554. struct fw_cdev_initiate_bus_reset *request = buffer;
  555. int short_reset;
  556. short_reset = (request->type == FW_CDEV_SHORT_RESET);
  557. return fw_core_initiate_bus_reset(client->device->card, short_reset);
  558. }
  559. struct descriptor {
  560. struct fw_descriptor d;
  561. struct client_resource resource;
  562. u32 data[0];
  563. };
  564. static void release_descriptor(struct client *client,
  565. struct client_resource *resource)
  566. {
  567. struct descriptor *descriptor =
  568. container_of(resource, struct descriptor, resource);
  569. fw_core_remove_descriptor(&descriptor->d);
  570. kfree(descriptor);
  571. }
  572. static int ioctl_add_descriptor(struct client *client, void *buffer)
  573. {
  574. struct fw_cdev_add_descriptor *request = buffer;
  575. struct descriptor *descriptor;
  576. int ret;
  577. if (request->length > 256)
  578. return -EINVAL;
  579. descriptor =
  580. kmalloc(sizeof(*descriptor) + request->length * 4, GFP_KERNEL);
  581. if (descriptor == NULL)
  582. return -ENOMEM;
  583. if (copy_from_user(descriptor->data,
  584. u64_to_uptr(request->data), request->length * 4)) {
  585. ret = -EFAULT;
  586. goto failed;
  587. }
  588. descriptor->d.length = request->length;
  589. descriptor->d.immediate = request->immediate;
  590. descriptor->d.key = request->key;
  591. descriptor->d.data = descriptor->data;
  592. ret = fw_core_add_descriptor(&descriptor->d);
  593. if (ret < 0)
  594. goto failed;
  595. descriptor->resource.release = release_descriptor;
  596. ret = add_client_resource(client, &descriptor->resource, GFP_KERNEL);
  597. if (ret < 0) {
  598. fw_core_remove_descriptor(&descriptor->d);
  599. goto failed;
  600. }
  601. request->handle = descriptor->resource.handle;
  602. return 0;
  603. failed:
  604. kfree(descriptor);
  605. return ret;
  606. }
  607. static int ioctl_remove_descriptor(struct client *client, void *buffer)
  608. {
  609. struct fw_cdev_remove_descriptor *request = buffer;
  610. return release_client_resource(client, request->handle,
  611. release_descriptor, NULL);
  612. }
  613. static void iso_callback(struct fw_iso_context *context, u32 cycle,
  614. size_t header_length, void *header, void *data)
  615. {
  616. struct client *client = data;
  617. struct iso_interrupt *irq;
  618. irq = kzalloc(sizeof(*irq) + header_length, GFP_ATOMIC);
  619. if (irq == NULL)
  620. return;
  621. irq->interrupt.type = FW_CDEV_EVENT_ISO_INTERRUPT;
  622. irq->interrupt.closure = client->iso_closure;
  623. irq->interrupt.cycle = cycle;
  624. irq->interrupt.header_length = header_length;
  625. memcpy(irq->interrupt.header, header, header_length);
  626. queue_event(client, &irq->event, &irq->interrupt,
  627. sizeof(irq->interrupt) + header_length, NULL, 0);
  628. }
  629. static int ioctl_create_iso_context(struct client *client, void *buffer)
  630. {
  631. struct fw_cdev_create_iso_context *request = buffer;
  632. struct fw_iso_context *context;
  633. /* We only support one context at this time. */
  634. if (client->iso_context != NULL)
  635. return -EBUSY;
  636. if (request->channel > 63)
  637. return -EINVAL;
  638. switch (request->type) {
  639. case FW_ISO_CONTEXT_RECEIVE:
  640. if (request->header_size < 4 || (request->header_size & 3))
  641. return -EINVAL;
  642. break;
  643. case FW_ISO_CONTEXT_TRANSMIT:
  644. if (request->speed > SCODE_3200)
  645. return -EINVAL;
  646. break;
  647. default:
  648. return -EINVAL;
  649. }
  650. context = fw_iso_context_create(client->device->card,
  651. request->type,
  652. request->channel,
  653. request->speed,
  654. request->header_size,
  655. iso_callback, client);
  656. if (IS_ERR(context))
  657. return PTR_ERR(context);
  658. client->iso_closure = request->closure;
  659. client->iso_context = context;
  660. /* We only support one context at this time. */
  661. request->handle = 0;
  662. return 0;
  663. }
  664. /* Macros for decoding the iso packet control header. */
  665. #define GET_PAYLOAD_LENGTH(v) ((v) & 0xffff)
  666. #define GET_INTERRUPT(v) (((v) >> 16) & 0x01)
  667. #define GET_SKIP(v) (((v) >> 17) & 0x01)
  668. #define GET_TAG(v) (((v) >> 18) & 0x03)
  669. #define GET_SY(v) (((v) >> 20) & 0x0f)
  670. #define GET_HEADER_LENGTH(v) (((v) >> 24) & 0xff)
  671. static int ioctl_queue_iso(struct client *client, void *buffer)
  672. {
  673. struct fw_cdev_queue_iso *request = buffer;
  674. struct fw_cdev_iso_packet __user *p, *end, *next;
  675. struct fw_iso_context *ctx = client->iso_context;
  676. unsigned long payload, buffer_end, header_length;
  677. u32 control;
  678. int count;
  679. struct {
  680. struct fw_iso_packet packet;
  681. u8 header[256];
  682. } u;
  683. if (ctx == NULL || request->handle != 0)
  684. return -EINVAL;
  685. /*
  686. * If the user passes a non-NULL data pointer, has mmap()'ed
  687. * the iso buffer, and the pointer points inside the buffer,
  688. * we setup the payload pointers accordingly. Otherwise we
  689. * set them both to 0, which will still let packets with
  690. * payload_length == 0 through. In other words, if no packets
  691. * use the indirect payload, the iso buffer need not be mapped
  692. * and the request->data pointer is ignored.
  693. */
  694. payload = (unsigned long)request->data - client->vm_start;
  695. buffer_end = client->buffer.page_count << PAGE_SHIFT;
  696. if (request->data == 0 || client->buffer.pages == NULL ||
  697. payload >= buffer_end) {
  698. payload = 0;
  699. buffer_end = 0;
  700. }
  701. p = (struct fw_cdev_iso_packet __user *)u64_to_uptr(request->packets);
  702. if (!access_ok(VERIFY_READ, p, request->size))
  703. return -EFAULT;
  704. end = (void __user *)p + request->size;
  705. count = 0;
  706. while (p < end) {
  707. if (get_user(control, &p->control))
  708. return -EFAULT;
  709. u.packet.payload_length = GET_PAYLOAD_LENGTH(control);
  710. u.packet.interrupt = GET_INTERRUPT(control);
  711. u.packet.skip = GET_SKIP(control);
  712. u.packet.tag = GET_TAG(control);
  713. u.packet.sy = GET_SY(control);
  714. u.packet.header_length = GET_HEADER_LENGTH(control);
  715. if (ctx->type == FW_ISO_CONTEXT_TRANSMIT) {
  716. header_length = u.packet.header_length;
  717. } else {
  718. /*
  719. * We require that header_length is a multiple of
  720. * the fixed header size, ctx->header_size.
  721. */
  722. if (ctx->header_size == 0) {
  723. if (u.packet.header_length > 0)
  724. return -EINVAL;
  725. } else if (u.packet.header_length % ctx->header_size != 0) {
  726. return -EINVAL;
  727. }
  728. header_length = 0;
  729. }
  730. next = (struct fw_cdev_iso_packet __user *)
  731. &p->header[header_length / 4];
  732. if (next > end)
  733. return -EINVAL;
  734. if (__copy_from_user
  735. (u.packet.header, p->header, header_length))
  736. return -EFAULT;
  737. if (u.packet.skip && ctx->type == FW_ISO_CONTEXT_TRANSMIT &&
  738. u.packet.header_length + u.packet.payload_length > 0)
  739. return -EINVAL;
  740. if (payload + u.packet.payload_length > buffer_end)
  741. return -EINVAL;
  742. if (fw_iso_context_queue(ctx, &u.packet,
  743. &client->buffer, payload))
  744. break;
  745. p = next;
  746. payload += u.packet.payload_length;
  747. count++;
  748. }
  749. request->size -= uptr_to_u64(p) - request->packets;
  750. request->packets = uptr_to_u64(p);
  751. request->data = client->vm_start + payload;
  752. return count;
  753. }
  754. static int ioctl_start_iso(struct client *client, void *buffer)
  755. {
  756. struct fw_cdev_start_iso *request = buffer;
  757. if (client->iso_context == NULL || request->handle != 0)
  758. return -EINVAL;
  759. if (client->iso_context->type == FW_ISO_CONTEXT_RECEIVE) {
  760. if (request->tags == 0 || request->tags > 15)
  761. return -EINVAL;
  762. if (request->sync > 15)
  763. return -EINVAL;
  764. }
  765. return fw_iso_context_start(client->iso_context, request->cycle,
  766. request->sync, request->tags);
  767. }
  768. static int ioctl_stop_iso(struct client *client, void *buffer)
  769. {
  770. struct fw_cdev_stop_iso *request = buffer;
  771. if (client->iso_context == NULL || request->handle != 0)
  772. return -EINVAL;
  773. return fw_iso_context_stop(client->iso_context);
  774. }
  775. static int ioctl_get_cycle_timer(struct client *client, void *buffer)
  776. {
  777. struct fw_cdev_get_cycle_timer *request = buffer;
  778. struct fw_card *card = client->device->card;
  779. unsigned long long bus_time;
  780. struct timeval tv;
  781. unsigned long flags;
  782. preempt_disable();
  783. local_irq_save(flags);
  784. bus_time = card->driver->get_bus_time(card);
  785. do_gettimeofday(&tv);
  786. local_irq_restore(flags);
  787. preempt_enable();
  788. request->local_time = tv.tv_sec * 1000000ULL + tv.tv_usec;
  789. request->cycle_timer = bus_time & 0xffffffff;
  790. return 0;
  791. }
  792. static int (* const ioctl_handlers[])(struct client *client, void *buffer) = {
  793. ioctl_get_info,
  794. ioctl_send_request,
  795. ioctl_allocate,
  796. ioctl_deallocate,
  797. ioctl_send_response,
  798. ioctl_initiate_bus_reset,
  799. ioctl_add_descriptor,
  800. ioctl_remove_descriptor,
  801. ioctl_create_iso_context,
  802. ioctl_queue_iso,
  803. ioctl_start_iso,
  804. ioctl_stop_iso,
  805. ioctl_get_cycle_timer,
  806. };
  807. static int dispatch_ioctl(struct client *client,
  808. unsigned int cmd, void __user *arg)
  809. {
  810. char buffer[256];
  811. int ret;
  812. if (_IOC_TYPE(cmd) != '#' ||
  813. _IOC_NR(cmd) >= ARRAY_SIZE(ioctl_handlers))
  814. return -EINVAL;
  815. if (_IOC_DIR(cmd) & _IOC_WRITE) {
  816. if (_IOC_SIZE(cmd) > sizeof(buffer) ||
  817. copy_from_user(buffer, arg, _IOC_SIZE(cmd)))
  818. return -EFAULT;
  819. }
  820. ret = ioctl_handlers[_IOC_NR(cmd)](client, buffer);
  821. if (ret < 0)
  822. return ret;
  823. if (_IOC_DIR(cmd) & _IOC_READ) {
  824. if (_IOC_SIZE(cmd) > sizeof(buffer) ||
  825. copy_to_user(arg, buffer, _IOC_SIZE(cmd)))
  826. return -EFAULT;
  827. }
  828. return ret;
  829. }
  830. static long fw_device_op_ioctl(struct file *file,
  831. unsigned int cmd, unsigned long arg)
  832. {
  833. struct client *client = file->private_data;
  834. if (fw_device_is_shutdown(client->device))
  835. return -ENODEV;
  836. return dispatch_ioctl(client, cmd, (void __user *) arg);
  837. }
  838. #ifdef CONFIG_COMPAT
  839. static long fw_device_op_compat_ioctl(struct file *file,
  840. unsigned int cmd, unsigned long arg)
  841. {
  842. struct client *client = file->private_data;
  843. if (fw_device_is_shutdown(client->device))
  844. return -ENODEV;
  845. return dispatch_ioctl(client, cmd, compat_ptr(arg));
  846. }
  847. #endif
  848. static int fw_device_op_mmap(struct file *file, struct vm_area_struct *vma)
  849. {
  850. struct client *client = file->private_data;
  851. enum dma_data_direction direction;
  852. unsigned long size;
  853. int page_count, ret;
  854. if (fw_device_is_shutdown(client->device))
  855. return -ENODEV;
  856. /* FIXME: We could support multiple buffers, but we don't. */
  857. if (client->buffer.pages != NULL)
  858. return -EBUSY;
  859. if (!(vma->vm_flags & VM_SHARED))
  860. return -EINVAL;
  861. if (vma->vm_start & ~PAGE_MASK)
  862. return -EINVAL;
  863. client->vm_start = vma->vm_start;
  864. size = vma->vm_end - vma->vm_start;
  865. page_count = size >> PAGE_SHIFT;
  866. if (size & ~PAGE_MASK)
  867. return -EINVAL;
  868. if (vma->vm_flags & VM_WRITE)
  869. direction = DMA_TO_DEVICE;
  870. else
  871. direction = DMA_FROM_DEVICE;
  872. ret = fw_iso_buffer_init(&client->buffer, client->device->card,
  873. page_count, direction);
  874. if (ret < 0)
  875. return ret;
  876. ret = fw_iso_buffer_map(&client->buffer, vma);
  877. if (ret < 0)
  878. fw_iso_buffer_destroy(&client->buffer, client->device->card);
  879. return ret;
  880. }
  881. static int shutdown_resource(int id, void *p, void *data)
  882. {
  883. struct client_resource *r = p;
  884. struct client *client = data;
  885. r->release(client, r);
  886. client_put(client);
  887. return 0;
  888. }
  889. static int fw_device_op_release(struct inode *inode, struct file *file)
  890. {
  891. struct client *client = file->private_data;
  892. struct event *e, *next_e;
  893. unsigned long flags;
  894. mutex_lock(&client->device->client_list_mutex);
  895. list_del(&client->link);
  896. mutex_unlock(&client->device->client_list_mutex);
  897. if (client->buffer.pages)
  898. fw_iso_buffer_destroy(&client->buffer, client->device->card);
  899. if (client->iso_context)
  900. fw_iso_context_destroy(client->iso_context);
  901. /* Freeze client->resource_idr and client->event_list */
  902. spin_lock_irqsave(&client->lock, flags);
  903. client->in_shutdown = true;
  904. spin_unlock_irqrestore(&client->lock, flags);
  905. idr_for_each(&client->resource_idr, shutdown_resource, client);
  906. idr_remove_all(&client->resource_idr);
  907. idr_destroy(&client->resource_idr);
  908. list_for_each_entry_safe(e, next_e, &client->event_list, link)
  909. kfree(e);
  910. client_put(client);
  911. return 0;
  912. }
  913. static unsigned int fw_device_op_poll(struct file *file, poll_table * pt)
  914. {
  915. struct client *client = file->private_data;
  916. unsigned int mask = 0;
  917. poll_wait(file, &client->wait, pt);
  918. if (fw_device_is_shutdown(client->device))
  919. mask |= POLLHUP | POLLERR;
  920. if (!list_empty(&client->event_list))
  921. mask |= POLLIN | POLLRDNORM;
  922. return mask;
  923. }
  924. const struct file_operations fw_device_ops = {
  925. .owner = THIS_MODULE,
  926. .open = fw_device_op_open,
  927. .read = fw_device_op_read,
  928. .unlocked_ioctl = fw_device_op_ioctl,
  929. .poll = fw_device_op_poll,
  930. .release = fw_device_op_release,
  931. .mmap = fw_device_op_mmap,
  932. #ifdef CONFIG_COMPAT
  933. .compat_ioctl = fw_device_op_compat_ioctl,
  934. #endif
  935. };