ctl.c 24 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Thunderbolt driver - control channel and configuration commands
  4. *
  5. * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com>
  6. * Copyright (C) 2018, Intel Corporation
  7. */
  8. #include <linux/crc32.h>
  9. #include <linux/delay.h>
  10. #include <linux/slab.h>
  11. #include <linux/pci.h>
  12. #include <linux/dmapool.h>
  13. #include <linux/workqueue.h>
  14. #include "ctl.h"
  15. #define TB_CTL_RX_PKG_COUNT 10
  16. #define TB_CTL_RETRIES 4
  17. /**
  18. * struct tb_cfg - thunderbolt control channel
  19. */
  20. struct tb_ctl {
  21. struct tb_nhi *nhi;
  22. struct tb_ring *tx;
  23. struct tb_ring *rx;
  24. struct dma_pool *frame_pool;
  25. struct ctl_pkg *rx_packets[TB_CTL_RX_PKG_COUNT];
  26. struct mutex request_queue_lock;
  27. struct list_head request_queue;
  28. bool running;
  29. event_cb callback;
  30. void *callback_data;
  31. };
  32. #define tb_ctl_WARN(ctl, format, arg...) \
  33. dev_WARN(&(ctl)->nhi->pdev->dev, format, ## arg)
  34. #define tb_ctl_err(ctl, format, arg...) \
  35. dev_err(&(ctl)->nhi->pdev->dev, format, ## arg)
  36. #define tb_ctl_warn(ctl, format, arg...) \
  37. dev_warn(&(ctl)->nhi->pdev->dev, format, ## arg)
  38. #define tb_ctl_info(ctl, format, arg...) \
  39. dev_info(&(ctl)->nhi->pdev->dev, format, ## arg)
  40. #define tb_ctl_dbg(ctl, format, arg...) \
  41. dev_dbg(&(ctl)->nhi->pdev->dev, format, ## arg)
  42. static DECLARE_WAIT_QUEUE_HEAD(tb_cfg_request_cancel_queue);
  43. /* Serializes access to request kref_get/put */
  44. static DEFINE_MUTEX(tb_cfg_request_lock);
  45. /**
  46. * tb_cfg_request_alloc() - Allocates a new config request
  47. *
  48. * This is refcounted object so when you are done with this, call
  49. * tb_cfg_request_put() to it.
  50. */
  51. struct tb_cfg_request *tb_cfg_request_alloc(void)
  52. {
  53. struct tb_cfg_request *req;
  54. req = kzalloc(sizeof(*req), GFP_KERNEL);
  55. if (!req)
  56. return NULL;
  57. kref_init(&req->kref);
  58. return req;
  59. }
  60. /**
  61. * tb_cfg_request_get() - Increase refcount of a request
  62. * @req: Request whose refcount is increased
  63. */
  64. void tb_cfg_request_get(struct tb_cfg_request *req)
  65. {
  66. mutex_lock(&tb_cfg_request_lock);
  67. kref_get(&req->kref);
  68. mutex_unlock(&tb_cfg_request_lock);
  69. }
  70. static void tb_cfg_request_destroy(struct kref *kref)
  71. {
  72. struct tb_cfg_request *req = container_of(kref, typeof(*req), kref);
  73. kfree(req);
  74. }
  75. /**
  76. * tb_cfg_request_put() - Decrease refcount and possibly release the request
  77. * @req: Request whose refcount is decreased
  78. *
  79. * Call this function when you are done with the request. When refcount
  80. * goes to %0 the object is released.
  81. */
  82. void tb_cfg_request_put(struct tb_cfg_request *req)
  83. {
  84. mutex_lock(&tb_cfg_request_lock);
  85. kref_put(&req->kref, tb_cfg_request_destroy);
  86. mutex_unlock(&tb_cfg_request_lock);
  87. }
  88. static int tb_cfg_request_enqueue(struct tb_ctl *ctl,
  89. struct tb_cfg_request *req)
  90. {
  91. WARN_ON(test_bit(TB_CFG_REQUEST_ACTIVE, &req->flags));
  92. WARN_ON(req->ctl);
  93. mutex_lock(&ctl->request_queue_lock);
  94. if (!ctl->running) {
  95. mutex_unlock(&ctl->request_queue_lock);
  96. return -ENOTCONN;
  97. }
  98. req->ctl = ctl;
  99. list_add_tail(&req->list, &ctl->request_queue);
  100. set_bit(TB_CFG_REQUEST_ACTIVE, &req->flags);
  101. mutex_unlock(&ctl->request_queue_lock);
  102. return 0;
  103. }
  104. static void tb_cfg_request_dequeue(struct tb_cfg_request *req)
  105. {
  106. struct tb_ctl *ctl = req->ctl;
  107. mutex_lock(&ctl->request_queue_lock);
  108. list_del(&req->list);
  109. clear_bit(TB_CFG_REQUEST_ACTIVE, &req->flags);
  110. if (test_bit(TB_CFG_REQUEST_CANCELED, &req->flags))
  111. wake_up(&tb_cfg_request_cancel_queue);
  112. mutex_unlock(&ctl->request_queue_lock);
  113. }
  114. static bool tb_cfg_request_is_active(struct tb_cfg_request *req)
  115. {
  116. return test_bit(TB_CFG_REQUEST_ACTIVE, &req->flags);
  117. }
  118. static struct tb_cfg_request *
  119. tb_cfg_request_find(struct tb_ctl *ctl, struct ctl_pkg *pkg)
  120. {
  121. struct tb_cfg_request *req;
  122. bool found = false;
  123. mutex_lock(&pkg->ctl->request_queue_lock);
  124. list_for_each_entry(req, &pkg->ctl->request_queue, list) {
  125. tb_cfg_request_get(req);
  126. if (req->match(req, pkg)) {
  127. found = true;
  128. break;
  129. }
  130. tb_cfg_request_put(req);
  131. }
  132. mutex_unlock(&pkg->ctl->request_queue_lock);
  133. return found ? req : NULL;
  134. }
  135. /* utility functions */
  136. static int check_header(const struct ctl_pkg *pkg, u32 len,
  137. enum tb_cfg_pkg_type type, u64 route)
  138. {
  139. struct tb_cfg_header *header = pkg->buffer;
  140. /* check frame, TODO: frame flags */
  141. if (WARN(len != pkg->frame.size,
  142. "wrong framesize (expected %#x, got %#x)\n",
  143. len, pkg->frame.size))
  144. return -EIO;
  145. if (WARN(type != pkg->frame.eof, "wrong eof (expected %#x, got %#x)\n",
  146. type, pkg->frame.eof))
  147. return -EIO;
  148. if (WARN(pkg->frame.sof, "wrong sof (expected 0x0, got %#x)\n",
  149. pkg->frame.sof))
  150. return -EIO;
  151. /* check header */
  152. if (WARN(header->unknown != 1 << 9,
  153. "header->unknown is %#x\n", header->unknown))
  154. return -EIO;
  155. if (WARN(route != tb_cfg_get_route(header),
  156. "wrong route (expected %llx, got %llx)",
  157. route, tb_cfg_get_route(header)))
  158. return -EIO;
  159. return 0;
  160. }
  161. static int check_config_address(struct tb_cfg_address addr,
  162. enum tb_cfg_space space, u32 offset,
  163. u32 length)
  164. {
  165. if (WARN(addr.zero, "addr.zero is %#x\n", addr.zero))
  166. return -EIO;
  167. if (WARN(space != addr.space, "wrong space (expected %x, got %x\n)",
  168. space, addr.space))
  169. return -EIO;
  170. if (WARN(offset != addr.offset, "wrong offset (expected %x, got %x\n)",
  171. offset, addr.offset))
  172. return -EIO;
  173. if (WARN(length != addr.length, "wrong space (expected %x, got %x\n)",
  174. length, addr.length))
  175. return -EIO;
  176. /*
  177. * We cannot check addr->port as it is set to the upstream port of the
  178. * sender.
  179. */
  180. return 0;
  181. }
  182. static struct tb_cfg_result decode_error(const struct ctl_pkg *response)
  183. {
  184. struct cfg_error_pkg *pkg = response->buffer;
  185. struct tb_cfg_result res = { 0 };
  186. res.response_route = tb_cfg_get_route(&pkg->header);
  187. res.response_port = 0;
  188. res.err = check_header(response, sizeof(*pkg), TB_CFG_PKG_ERROR,
  189. tb_cfg_get_route(&pkg->header));
  190. if (res.err)
  191. return res;
  192. WARN(pkg->zero1, "pkg->zero1 is %#x\n", pkg->zero1);
  193. WARN(pkg->zero2, "pkg->zero1 is %#x\n", pkg->zero1);
  194. WARN(pkg->zero3, "pkg->zero1 is %#x\n", pkg->zero1);
  195. res.err = 1;
  196. res.tb_error = pkg->error;
  197. res.response_port = pkg->port;
  198. return res;
  199. }
  200. static struct tb_cfg_result parse_header(const struct ctl_pkg *pkg, u32 len,
  201. enum tb_cfg_pkg_type type, u64 route)
  202. {
  203. struct tb_cfg_header *header = pkg->buffer;
  204. struct tb_cfg_result res = { 0 };
  205. if (pkg->frame.eof == TB_CFG_PKG_ERROR)
  206. return decode_error(pkg);
  207. res.response_port = 0; /* will be updated later for cfg_read/write */
  208. res.response_route = tb_cfg_get_route(header);
  209. res.err = check_header(pkg, len, type, route);
  210. return res;
  211. }
  212. static void tb_cfg_print_error(struct tb_ctl *ctl,
  213. const struct tb_cfg_result *res)
  214. {
  215. WARN_ON(res->err != 1);
  216. switch (res->tb_error) {
  217. case TB_CFG_ERROR_PORT_NOT_CONNECTED:
  218. /* Port is not connected. This can happen during surprise
  219. * removal. Do not warn. */
  220. return;
  221. case TB_CFG_ERROR_INVALID_CONFIG_SPACE:
  222. /*
  223. * Invalid cfg_space/offset/length combination in
  224. * cfg_read/cfg_write.
  225. */
  226. tb_ctl_WARN(ctl,
  227. "CFG_ERROR(%llx:%x): Invalid config space or offset\n",
  228. res->response_route, res->response_port);
  229. return;
  230. case TB_CFG_ERROR_NO_SUCH_PORT:
  231. /*
  232. * - The route contains a non-existent port.
  233. * - The route contains a non-PHY port (e.g. PCIe).
  234. * - The port in cfg_read/cfg_write does not exist.
  235. */
  236. tb_ctl_WARN(ctl, "CFG_ERROR(%llx:%x): Invalid port\n",
  237. res->response_route, res->response_port);
  238. return;
  239. case TB_CFG_ERROR_LOOP:
  240. tb_ctl_WARN(ctl, "CFG_ERROR(%llx:%x): Route contains a loop\n",
  241. res->response_route, res->response_port);
  242. return;
  243. default:
  244. /* 5,6,7,9 and 11 are also valid error codes */
  245. tb_ctl_WARN(ctl, "CFG_ERROR(%llx:%x): Unknown error\n",
  246. res->response_route, res->response_port);
  247. return;
  248. }
  249. }
  250. static __be32 tb_crc(const void *data, size_t len)
  251. {
  252. return cpu_to_be32(~__crc32c_le(~0, data, len));
  253. }
  254. static void tb_ctl_pkg_free(struct ctl_pkg *pkg)
  255. {
  256. if (pkg) {
  257. dma_pool_free(pkg->ctl->frame_pool,
  258. pkg->buffer, pkg->frame.buffer_phy);
  259. kfree(pkg);
  260. }
  261. }
  262. static struct ctl_pkg *tb_ctl_pkg_alloc(struct tb_ctl *ctl)
  263. {
  264. struct ctl_pkg *pkg = kzalloc(sizeof(*pkg), GFP_KERNEL);
  265. if (!pkg)
  266. return NULL;
  267. pkg->ctl = ctl;
  268. pkg->buffer = dma_pool_alloc(ctl->frame_pool, GFP_KERNEL,
  269. &pkg->frame.buffer_phy);
  270. if (!pkg->buffer) {
  271. kfree(pkg);
  272. return NULL;
  273. }
  274. return pkg;
  275. }
  276. /* RX/TX handling */
  277. static void tb_ctl_tx_callback(struct tb_ring *ring, struct ring_frame *frame,
  278. bool canceled)
  279. {
  280. struct ctl_pkg *pkg = container_of(frame, typeof(*pkg), frame);
  281. tb_ctl_pkg_free(pkg);
  282. }
  283. /**
  284. * tb_cfg_tx() - transmit a packet on the control channel
  285. *
  286. * len must be a multiple of four.
  287. *
  288. * Return: Returns 0 on success or an error code on failure.
  289. */
  290. static int tb_ctl_tx(struct tb_ctl *ctl, const void *data, size_t len,
  291. enum tb_cfg_pkg_type type)
  292. {
  293. int res;
  294. struct ctl_pkg *pkg;
  295. if (len % 4 != 0) { /* required for le->be conversion */
  296. tb_ctl_WARN(ctl, "TX: invalid size: %zu\n", len);
  297. return -EINVAL;
  298. }
  299. if (len > TB_FRAME_SIZE - 4) { /* checksum is 4 bytes */
  300. tb_ctl_WARN(ctl, "TX: packet too large: %zu/%d\n",
  301. len, TB_FRAME_SIZE - 4);
  302. return -EINVAL;
  303. }
  304. pkg = tb_ctl_pkg_alloc(ctl);
  305. if (!pkg)
  306. return -ENOMEM;
  307. pkg->frame.callback = tb_ctl_tx_callback;
  308. pkg->frame.size = len + 4;
  309. pkg->frame.sof = type;
  310. pkg->frame.eof = type;
  311. cpu_to_be32_array(pkg->buffer, data, len / 4);
  312. *(__be32 *) (pkg->buffer + len) = tb_crc(pkg->buffer, len);
  313. res = tb_ring_tx(ctl->tx, &pkg->frame);
  314. if (res) /* ring is stopped */
  315. tb_ctl_pkg_free(pkg);
  316. return res;
  317. }
  318. /**
  319. * tb_ctl_handle_event() - acknowledge a plug event, invoke ctl->callback
  320. */
  321. static bool tb_ctl_handle_event(struct tb_ctl *ctl, enum tb_cfg_pkg_type type,
  322. struct ctl_pkg *pkg, size_t size)
  323. {
  324. return ctl->callback(ctl->callback_data, type, pkg->buffer, size);
  325. }
  326. static void tb_ctl_rx_submit(struct ctl_pkg *pkg)
  327. {
  328. tb_ring_rx(pkg->ctl->rx, &pkg->frame); /*
  329. * We ignore failures during stop.
  330. * All rx packets are referenced
  331. * from ctl->rx_packets, so we do
  332. * not loose them.
  333. */
  334. }
  335. static int tb_async_error(const struct ctl_pkg *pkg)
  336. {
  337. const struct cfg_error_pkg *error = (const struct cfg_error_pkg *)pkg;
  338. if (pkg->frame.eof != TB_CFG_PKG_ERROR)
  339. return false;
  340. switch (error->error) {
  341. case TB_CFG_ERROR_LINK_ERROR:
  342. case TB_CFG_ERROR_HEC_ERROR_DETECTED:
  343. case TB_CFG_ERROR_FLOW_CONTROL_ERROR:
  344. return true;
  345. default:
  346. return false;
  347. }
  348. }
  349. static void tb_ctl_rx_callback(struct tb_ring *ring, struct ring_frame *frame,
  350. bool canceled)
  351. {
  352. struct ctl_pkg *pkg = container_of(frame, typeof(*pkg), frame);
  353. struct tb_cfg_request *req;
  354. __be32 crc32;
  355. if (canceled)
  356. return; /*
  357. * ring is stopped, packet is referenced from
  358. * ctl->rx_packets.
  359. */
  360. if (frame->size < 4 || frame->size % 4 != 0) {
  361. tb_ctl_err(pkg->ctl, "RX: invalid size %#x, dropping packet\n",
  362. frame->size);
  363. goto rx;
  364. }
  365. frame->size -= 4; /* remove checksum */
  366. crc32 = tb_crc(pkg->buffer, frame->size);
  367. be32_to_cpu_array(pkg->buffer, pkg->buffer, frame->size / 4);
  368. switch (frame->eof) {
  369. case TB_CFG_PKG_READ:
  370. case TB_CFG_PKG_WRITE:
  371. case TB_CFG_PKG_ERROR:
  372. case TB_CFG_PKG_OVERRIDE:
  373. case TB_CFG_PKG_RESET:
  374. if (*(__be32 *)(pkg->buffer + frame->size) != crc32) {
  375. tb_ctl_err(pkg->ctl,
  376. "RX: checksum mismatch, dropping packet\n");
  377. goto rx;
  378. }
  379. if (tb_async_error(pkg)) {
  380. tb_ctl_handle_event(pkg->ctl, frame->eof,
  381. pkg, frame->size);
  382. goto rx;
  383. }
  384. break;
  385. case TB_CFG_PKG_EVENT:
  386. case TB_CFG_PKG_XDOMAIN_RESP:
  387. case TB_CFG_PKG_XDOMAIN_REQ:
  388. if (*(__be32 *)(pkg->buffer + frame->size) != crc32) {
  389. tb_ctl_err(pkg->ctl,
  390. "RX: checksum mismatch, dropping packet\n");
  391. goto rx;
  392. }
  393. /* Fall through */
  394. case TB_CFG_PKG_ICM_EVENT:
  395. if (tb_ctl_handle_event(pkg->ctl, frame->eof, pkg, frame->size))
  396. goto rx;
  397. break;
  398. default:
  399. break;
  400. }
  401. /*
  402. * The received packet will be processed only if there is an
  403. * active request and that the packet is what is expected. This
  404. * prevents packets such as replies coming after timeout has
  405. * triggered from messing with the active requests.
  406. */
  407. req = tb_cfg_request_find(pkg->ctl, pkg);
  408. if (req) {
  409. if (req->copy(req, pkg))
  410. schedule_work(&req->work);
  411. tb_cfg_request_put(req);
  412. }
  413. rx:
  414. tb_ctl_rx_submit(pkg);
  415. }
  416. static void tb_cfg_request_work(struct work_struct *work)
  417. {
  418. struct tb_cfg_request *req = container_of(work, typeof(*req), work);
  419. if (!test_bit(TB_CFG_REQUEST_CANCELED, &req->flags))
  420. req->callback(req->callback_data);
  421. tb_cfg_request_dequeue(req);
  422. tb_cfg_request_put(req);
  423. }
  424. /**
  425. * tb_cfg_request() - Start control request not waiting for it to complete
  426. * @ctl: Control channel to use
  427. * @req: Request to start
  428. * @callback: Callback called when the request is completed
  429. * @callback_data: Data to be passed to @callback
  430. *
  431. * This queues @req on the given control channel without waiting for it
  432. * to complete. When the request completes @callback is called.
  433. */
  434. int tb_cfg_request(struct tb_ctl *ctl, struct tb_cfg_request *req,
  435. void (*callback)(void *), void *callback_data)
  436. {
  437. int ret;
  438. req->flags = 0;
  439. req->callback = callback;
  440. req->callback_data = callback_data;
  441. INIT_WORK(&req->work, tb_cfg_request_work);
  442. INIT_LIST_HEAD(&req->list);
  443. tb_cfg_request_get(req);
  444. ret = tb_cfg_request_enqueue(ctl, req);
  445. if (ret)
  446. goto err_put;
  447. ret = tb_ctl_tx(ctl, req->request, req->request_size,
  448. req->request_type);
  449. if (ret)
  450. goto err_dequeue;
  451. if (!req->response)
  452. schedule_work(&req->work);
  453. return 0;
  454. err_dequeue:
  455. tb_cfg_request_dequeue(req);
  456. err_put:
  457. tb_cfg_request_put(req);
  458. return ret;
  459. }
  460. /**
  461. * tb_cfg_request_cancel() - Cancel a control request
  462. * @req: Request to cancel
  463. * @err: Error to assign to the request
  464. *
  465. * This function can be used to cancel ongoing request. It will wait
  466. * until the request is not active anymore.
  467. */
  468. void tb_cfg_request_cancel(struct tb_cfg_request *req, int err)
  469. {
  470. set_bit(TB_CFG_REQUEST_CANCELED, &req->flags);
  471. schedule_work(&req->work);
  472. wait_event(tb_cfg_request_cancel_queue, !tb_cfg_request_is_active(req));
  473. req->result.err = err;
  474. }
  475. static void tb_cfg_request_complete(void *data)
  476. {
  477. complete(data);
  478. }
  479. /**
  480. * tb_cfg_request_sync() - Start control request and wait until it completes
  481. * @ctl: Control channel to use
  482. * @req: Request to start
  483. * @timeout_msec: Timeout how long to wait @req to complete
  484. *
  485. * Starts a control request and waits until it completes. If timeout
  486. * triggers the request is canceled before function returns. Note the
  487. * caller needs to make sure only one message for given switch is active
  488. * at a time.
  489. */
  490. struct tb_cfg_result tb_cfg_request_sync(struct tb_ctl *ctl,
  491. struct tb_cfg_request *req,
  492. int timeout_msec)
  493. {
  494. unsigned long timeout = msecs_to_jiffies(timeout_msec);
  495. struct tb_cfg_result res = { 0 };
  496. DECLARE_COMPLETION_ONSTACK(done);
  497. int ret;
  498. ret = tb_cfg_request(ctl, req, tb_cfg_request_complete, &done);
  499. if (ret) {
  500. res.err = ret;
  501. return res;
  502. }
  503. if (!wait_for_completion_timeout(&done, timeout))
  504. tb_cfg_request_cancel(req, -ETIMEDOUT);
  505. flush_work(&req->work);
  506. return req->result;
  507. }
  508. /* public interface, alloc/start/stop/free */
  509. /**
  510. * tb_ctl_alloc() - allocate a control channel
  511. *
  512. * cb will be invoked once for every hot plug event.
  513. *
  514. * Return: Returns a pointer on success or NULL on failure.
  515. */
  516. struct tb_ctl *tb_ctl_alloc(struct tb_nhi *nhi, event_cb cb, void *cb_data)
  517. {
  518. int i;
  519. struct tb_ctl *ctl = kzalloc(sizeof(*ctl), GFP_KERNEL);
  520. if (!ctl)
  521. return NULL;
  522. ctl->nhi = nhi;
  523. ctl->callback = cb;
  524. ctl->callback_data = cb_data;
  525. mutex_init(&ctl->request_queue_lock);
  526. INIT_LIST_HEAD(&ctl->request_queue);
  527. ctl->frame_pool = dma_pool_create("thunderbolt_ctl", &nhi->pdev->dev,
  528. TB_FRAME_SIZE, 4, 0);
  529. if (!ctl->frame_pool)
  530. goto err;
  531. ctl->tx = tb_ring_alloc_tx(nhi, 0, 10, RING_FLAG_NO_SUSPEND);
  532. if (!ctl->tx)
  533. goto err;
  534. ctl->rx = tb_ring_alloc_rx(nhi, 0, 10, RING_FLAG_NO_SUSPEND, 0xffff,
  535. 0xffff, NULL, NULL);
  536. if (!ctl->rx)
  537. goto err;
  538. for (i = 0; i < TB_CTL_RX_PKG_COUNT; i++) {
  539. ctl->rx_packets[i] = tb_ctl_pkg_alloc(ctl);
  540. if (!ctl->rx_packets[i])
  541. goto err;
  542. ctl->rx_packets[i]->frame.callback = tb_ctl_rx_callback;
  543. }
  544. tb_ctl_dbg(ctl, "control channel created\n");
  545. return ctl;
  546. err:
  547. tb_ctl_free(ctl);
  548. return NULL;
  549. }
  550. /**
  551. * tb_ctl_free() - free a control channel
  552. *
  553. * Must be called after tb_ctl_stop.
  554. *
  555. * Must NOT be called from ctl->callback.
  556. */
  557. void tb_ctl_free(struct tb_ctl *ctl)
  558. {
  559. int i;
  560. if (!ctl)
  561. return;
  562. if (ctl->rx)
  563. tb_ring_free(ctl->rx);
  564. if (ctl->tx)
  565. tb_ring_free(ctl->tx);
  566. /* free RX packets */
  567. for (i = 0; i < TB_CTL_RX_PKG_COUNT; i++)
  568. tb_ctl_pkg_free(ctl->rx_packets[i]);
  569. dma_pool_destroy(ctl->frame_pool);
  570. kfree(ctl);
  571. }
  572. /**
  573. * tb_cfg_start() - start/resume the control channel
  574. */
  575. void tb_ctl_start(struct tb_ctl *ctl)
  576. {
  577. int i;
  578. tb_ctl_dbg(ctl, "control channel starting...\n");
  579. tb_ring_start(ctl->tx); /* is used to ack hotplug packets, start first */
  580. tb_ring_start(ctl->rx);
  581. for (i = 0; i < TB_CTL_RX_PKG_COUNT; i++)
  582. tb_ctl_rx_submit(ctl->rx_packets[i]);
  583. ctl->running = true;
  584. }
  585. /**
  586. * control() - pause the control channel
  587. *
  588. * All invocations of ctl->callback will have finished after this method
  589. * returns.
  590. *
  591. * Must NOT be called from ctl->callback.
  592. */
  593. void tb_ctl_stop(struct tb_ctl *ctl)
  594. {
  595. mutex_lock(&ctl->request_queue_lock);
  596. ctl->running = false;
  597. mutex_unlock(&ctl->request_queue_lock);
  598. tb_ring_stop(ctl->rx);
  599. tb_ring_stop(ctl->tx);
  600. if (!list_empty(&ctl->request_queue))
  601. tb_ctl_WARN(ctl, "dangling request in request_queue\n");
  602. INIT_LIST_HEAD(&ctl->request_queue);
  603. tb_ctl_dbg(ctl, "control channel stopped\n");
  604. }
  605. /* public interface, commands */
  606. /**
  607. * tb_cfg_error() - send error packet
  608. *
  609. * Return: Returns 0 on success or an error code on failure.
  610. */
  611. int tb_cfg_error(struct tb_ctl *ctl, u64 route, u32 port,
  612. enum tb_cfg_error error)
  613. {
  614. struct cfg_error_pkg pkg = {
  615. .header = tb_cfg_make_header(route),
  616. .port = port,
  617. .error = error,
  618. };
  619. tb_ctl_info(ctl, "resetting error on %llx:%x.\n", route, port);
  620. return tb_ctl_tx(ctl, &pkg, sizeof(pkg), TB_CFG_PKG_ERROR);
  621. }
  622. static bool tb_cfg_match(const struct tb_cfg_request *req,
  623. const struct ctl_pkg *pkg)
  624. {
  625. u64 route = tb_cfg_get_route(pkg->buffer) & ~BIT_ULL(63);
  626. if (pkg->frame.eof == TB_CFG_PKG_ERROR)
  627. return true;
  628. if (pkg->frame.eof != req->response_type)
  629. return false;
  630. if (route != tb_cfg_get_route(req->request))
  631. return false;
  632. if (pkg->frame.size != req->response_size)
  633. return false;
  634. if (pkg->frame.eof == TB_CFG_PKG_READ ||
  635. pkg->frame.eof == TB_CFG_PKG_WRITE) {
  636. const struct cfg_read_pkg *req_hdr = req->request;
  637. const struct cfg_read_pkg *res_hdr = pkg->buffer;
  638. if (req_hdr->addr.seq != res_hdr->addr.seq)
  639. return false;
  640. }
  641. return true;
  642. }
  643. static bool tb_cfg_copy(struct tb_cfg_request *req, const struct ctl_pkg *pkg)
  644. {
  645. struct tb_cfg_result res;
  646. /* Now make sure it is in expected format */
  647. res = parse_header(pkg, req->response_size, req->response_type,
  648. tb_cfg_get_route(req->request));
  649. if (!res.err)
  650. memcpy(req->response, pkg->buffer, req->response_size);
  651. req->result = res;
  652. /* Always complete when first response is received */
  653. return true;
  654. }
  655. /**
  656. * tb_cfg_reset() - send a reset packet and wait for a response
  657. *
  658. * If the switch at route is incorrectly configured then we will not receive a
  659. * reply (even though the switch will reset). The caller should check for
  660. * -ETIMEDOUT and attempt to reconfigure the switch.
  661. */
  662. struct tb_cfg_result tb_cfg_reset(struct tb_ctl *ctl, u64 route,
  663. int timeout_msec)
  664. {
  665. struct cfg_reset_pkg request = { .header = tb_cfg_make_header(route) };
  666. struct tb_cfg_result res = { 0 };
  667. struct tb_cfg_header reply;
  668. struct tb_cfg_request *req;
  669. req = tb_cfg_request_alloc();
  670. if (!req) {
  671. res.err = -ENOMEM;
  672. return res;
  673. }
  674. req->match = tb_cfg_match;
  675. req->copy = tb_cfg_copy;
  676. req->request = &request;
  677. req->request_size = sizeof(request);
  678. req->request_type = TB_CFG_PKG_RESET;
  679. req->response = &reply;
  680. req->response_size = sizeof(reply);
  681. req->response_type = TB_CFG_PKG_RESET;
  682. res = tb_cfg_request_sync(ctl, req, timeout_msec);
  683. tb_cfg_request_put(req);
  684. return res;
  685. }
  686. /**
  687. * tb_cfg_read() - read from config space into buffer
  688. *
  689. * Offset and length are in dwords.
  690. */
  691. struct tb_cfg_result tb_cfg_read_raw(struct tb_ctl *ctl, void *buffer,
  692. u64 route, u32 port, enum tb_cfg_space space,
  693. u32 offset, u32 length, int timeout_msec)
  694. {
  695. struct tb_cfg_result res = { 0 };
  696. struct cfg_read_pkg request = {
  697. .header = tb_cfg_make_header(route),
  698. .addr = {
  699. .port = port,
  700. .space = space,
  701. .offset = offset,
  702. .length = length,
  703. },
  704. };
  705. struct cfg_write_pkg reply;
  706. int retries = 0;
  707. while (retries < TB_CTL_RETRIES) {
  708. struct tb_cfg_request *req;
  709. req = tb_cfg_request_alloc();
  710. if (!req) {
  711. res.err = -ENOMEM;
  712. return res;
  713. }
  714. request.addr.seq = retries++;
  715. req->match = tb_cfg_match;
  716. req->copy = tb_cfg_copy;
  717. req->request = &request;
  718. req->request_size = sizeof(request);
  719. req->request_type = TB_CFG_PKG_READ;
  720. req->response = &reply;
  721. req->response_size = 12 + 4 * length;
  722. req->response_type = TB_CFG_PKG_READ;
  723. res = tb_cfg_request_sync(ctl, req, timeout_msec);
  724. tb_cfg_request_put(req);
  725. if (res.err != -ETIMEDOUT)
  726. break;
  727. /* Wait a bit (arbitrary time) until we send a retry */
  728. usleep_range(10, 100);
  729. }
  730. if (res.err)
  731. return res;
  732. res.response_port = reply.addr.port;
  733. res.err = check_config_address(reply.addr, space, offset, length);
  734. if (!res.err)
  735. memcpy(buffer, &reply.data, 4 * length);
  736. return res;
  737. }
  738. /**
  739. * tb_cfg_write() - write from buffer into config space
  740. *
  741. * Offset and length are in dwords.
  742. */
  743. struct tb_cfg_result tb_cfg_write_raw(struct tb_ctl *ctl, const void *buffer,
  744. u64 route, u32 port, enum tb_cfg_space space,
  745. u32 offset, u32 length, int timeout_msec)
  746. {
  747. struct tb_cfg_result res = { 0 };
  748. struct cfg_write_pkg request = {
  749. .header = tb_cfg_make_header(route),
  750. .addr = {
  751. .port = port,
  752. .space = space,
  753. .offset = offset,
  754. .length = length,
  755. },
  756. };
  757. struct cfg_read_pkg reply;
  758. int retries = 0;
  759. memcpy(&request.data, buffer, length * 4);
  760. while (retries < TB_CTL_RETRIES) {
  761. struct tb_cfg_request *req;
  762. req = tb_cfg_request_alloc();
  763. if (!req) {
  764. res.err = -ENOMEM;
  765. return res;
  766. }
  767. request.addr.seq = retries++;
  768. req->match = tb_cfg_match;
  769. req->copy = tb_cfg_copy;
  770. req->request = &request;
  771. req->request_size = 12 + 4 * length;
  772. req->request_type = TB_CFG_PKG_WRITE;
  773. req->response = &reply;
  774. req->response_size = sizeof(reply);
  775. req->response_type = TB_CFG_PKG_WRITE;
  776. res = tb_cfg_request_sync(ctl, req, timeout_msec);
  777. tb_cfg_request_put(req);
  778. if (res.err != -ETIMEDOUT)
  779. break;
  780. /* Wait a bit (arbitrary time) until we send a retry */
  781. usleep_range(10, 100);
  782. }
  783. if (res.err)
  784. return res;
  785. res.response_port = reply.addr.port;
  786. res.err = check_config_address(reply.addr, space, offset, length);
  787. return res;
  788. }
  789. int tb_cfg_read(struct tb_ctl *ctl, void *buffer, u64 route, u32 port,
  790. enum tb_cfg_space space, u32 offset, u32 length)
  791. {
  792. struct tb_cfg_result res = tb_cfg_read_raw(ctl, buffer, route, port,
  793. space, offset, length, TB_CFG_DEFAULT_TIMEOUT);
  794. switch (res.err) {
  795. case 0:
  796. /* Success */
  797. break;
  798. case 1:
  799. /* Thunderbolt error, tb_error holds the actual number */
  800. tb_cfg_print_error(ctl, &res);
  801. return -EIO;
  802. case -ETIMEDOUT:
  803. tb_ctl_warn(ctl, "timeout reading config space %u from %#x\n",
  804. space, offset);
  805. break;
  806. default:
  807. WARN(1, "tb_cfg_read: %d\n", res.err);
  808. break;
  809. }
  810. return res.err;
  811. }
  812. int tb_cfg_write(struct tb_ctl *ctl, const void *buffer, u64 route, u32 port,
  813. enum tb_cfg_space space, u32 offset, u32 length)
  814. {
  815. struct tb_cfg_result res = tb_cfg_write_raw(ctl, buffer, route, port,
  816. space, offset, length, TB_CFG_DEFAULT_TIMEOUT);
  817. switch (res.err) {
  818. case 0:
  819. /* Success */
  820. break;
  821. case 1:
  822. /* Thunderbolt error, tb_error holds the actual number */
  823. tb_cfg_print_error(ctl, &res);
  824. return -EIO;
  825. case -ETIMEDOUT:
  826. tb_ctl_warn(ctl, "timeout writing config space %u to %#x\n",
  827. space, offset);
  828. break;
  829. default:
  830. WARN(1, "tb_cfg_write: %d\n", res.err);
  831. break;
  832. }
  833. return res.err;
  834. }
  835. /**
  836. * tb_cfg_get_upstream_port() - get upstream port number of switch at route
  837. *
  838. * Reads the first dword from the switches TB_CFG_SWITCH config area and
  839. * returns the port number from which the reply originated.
  840. *
  841. * Return: Returns the upstream port number on success or an error code on
  842. * failure.
  843. */
  844. int tb_cfg_get_upstream_port(struct tb_ctl *ctl, u64 route)
  845. {
  846. u32 dummy;
  847. struct tb_cfg_result res = tb_cfg_read_raw(ctl, &dummy, route, 0,
  848. TB_CFG_SWITCH, 0, 1,
  849. TB_CFG_DEFAULT_TIMEOUT);
  850. if (res.err == 1)
  851. return -EIO;
  852. if (res.err)
  853. return res.err;
  854. return res.response_port;
  855. }