dma_port.c 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524
  1. /*
  2. * Thunderbolt DMA configuration based mailbox support
  3. *
  4. * Copyright (C) 2017, Intel Corporation
  5. * Authors: Michael Jamet <michael.jamet@intel.com>
  6. * Mika Westerberg <mika.westerberg@linux.intel.com>
  7. *
  8. * This program is free software; you can redistribute it and/or modify
  9. * it under the terms of the GNU General Public License version 2 as
  10. * published by the Free Software Foundation.
  11. */
  12. #include <linux/delay.h>
  13. #include <linux/slab.h>
  14. #include "dma_port.h"
  15. #include "tb_regs.h"
  16. #define DMA_PORT_CAP 0x3e
  17. #define MAIL_DATA 1
  18. #define MAIL_DATA_DWORDS 16
  19. #define MAIL_IN 17
  20. #define MAIL_IN_CMD_SHIFT 28
  21. #define MAIL_IN_CMD_MASK GENMASK(31, 28)
  22. #define MAIL_IN_CMD_FLASH_WRITE 0x0
  23. #define MAIL_IN_CMD_FLASH_UPDATE_AUTH 0x1
  24. #define MAIL_IN_CMD_FLASH_READ 0x2
  25. #define MAIL_IN_CMD_POWER_CYCLE 0x4
  26. #define MAIL_IN_DWORDS_SHIFT 24
  27. #define MAIL_IN_DWORDS_MASK GENMASK(27, 24)
  28. #define MAIL_IN_ADDRESS_SHIFT 2
  29. #define MAIL_IN_ADDRESS_MASK GENMASK(23, 2)
  30. #define MAIL_IN_CSS BIT(1)
  31. #define MAIL_IN_OP_REQUEST BIT(0)
  32. #define MAIL_OUT 18
  33. #define MAIL_OUT_STATUS_RESPONSE BIT(29)
  34. #define MAIL_OUT_STATUS_CMD_SHIFT 4
  35. #define MAIL_OUT_STATUS_CMD_MASK GENMASK(7, 4)
  36. #define MAIL_OUT_STATUS_MASK GENMASK(3, 0)
  37. #define MAIL_OUT_STATUS_COMPLETED 0
  38. #define MAIL_OUT_STATUS_ERR_AUTH 1
  39. #define MAIL_OUT_STATUS_ERR_ACCESS 2
  40. #define DMA_PORT_TIMEOUT 5000 /* ms */
  41. #define DMA_PORT_RETRIES 3
  42. /**
  43. * struct tb_dma_port - DMA control port
  44. * @sw: Switch the DMA port belongs to
  45. * @port: Switch port number where DMA capability is found
  46. * @base: Start offset of the mailbox registers
  47. * @buf: Temporary buffer to store a single block
  48. */
  49. struct tb_dma_port {
  50. struct tb_switch *sw;
  51. u8 port;
  52. u32 base;
  53. u8 *buf;
  54. };
  55. /*
  56. * When the switch is in safe mode it supports very little functionality
  57. * so we don't validate that much here.
  58. */
  59. static bool dma_port_match(const struct tb_cfg_request *req,
  60. const struct ctl_pkg *pkg)
  61. {
  62. u64 route = tb_cfg_get_route(pkg->buffer) & ~BIT_ULL(63);
  63. if (pkg->frame.eof == TB_CFG_PKG_ERROR)
  64. return true;
  65. if (pkg->frame.eof != req->response_type)
  66. return false;
  67. if (route != tb_cfg_get_route(req->request))
  68. return false;
  69. if (pkg->frame.size != req->response_size)
  70. return false;
  71. return true;
  72. }
  73. static bool dma_port_copy(struct tb_cfg_request *req, const struct ctl_pkg *pkg)
  74. {
  75. memcpy(req->response, pkg->buffer, req->response_size);
  76. return true;
  77. }
  78. static int dma_port_read(struct tb_ctl *ctl, void *buffer, u64 route,
  79. u32 port, u32 offset, u32 length, int timeout_msec)
  80. {
  81. struct cfg_read_pkg request = {
  82. .header = tb_cfg_make_header(route),
  83. .addr = {
  84. .seq = 1,
  85. .port = port,
  86. .space = TB_CFG_PORT,
  87. .offset = offset,
  88. .length = length,
  89. },
  90. };
  91. struct tb_cfg_request *req;
  92. struct cfg_write_pkg reply;
  93. struct tb_cfg_result res;
  94. req = tb_cfg_request_alloc();
  95. if (!req)
  96. return -ENOMEM;
  97. req->match = dma_port_match;
  98. req->copy = dma_port_copy;
  99. req->request = &request;
  100. req->request_size = sizeof(request);
  101. req->request_type = TB_CFG_PKG_READ;
  102. req->response = &reply;
  103. req->response_size = 12 + 4 * length;
  104. req->response_type = TB_CFG_PKG_READ;
  105. res = tb_cfg_request_sync(ctl, req, timeout_msec);
  106. tb_cfg_request_put(req);
  107. if (res.err)
  108. return res.err;
  109. memcpy(buffer, &reply.data, 4 * length);
  110. return 0;
  111. }
  112. static int dma_port_write(struct tb_ctl *ctl, const void *buffer, u64 route,
  113. u32 port, u32 offset, u32 length, int timeout_msec)
  114. {
  115. struct cfg_write_pkg request = {
  116. .header = tb_cfg_make_header(route),
  117. .addr = {
  118. .seq = 1,
  119. .port = port,
  120. .space = TB_CFG_PORT,
  121. .offset = offset,
  122. .length = length,
  123. },
  124. };
  125. struct tb_cfg_request *req;
  126. struct cfg_read_pkg reply;
  127. struct tb_cfg_result res;
  128. memcpy(&request.data, buffer, length * 4);
  129. req = tb_cfg_request_alloc();
  130. if (!req)
  131. return -ENOMEM;
  132. req->match = dma_port_match;
  133. req->copy = dma_port_copy;
  134. req->request = &request;
  135. req->request_size = 12 + 4 * length;
  136. req->request_type = TB_CFG_PKG_WRITE;
  137. req->response = &reply;
  138. req->response_size = sizeof(reply);
  139. req->response_type = TB_CFG_PKG_WRITE;
  140. res = tb_cfg_request_sync(ctl, req, timeout_msec);
  141. tb_cfg_request_put(req);
  142. return res.err;
  143. }
  144. static int dma_find_port(struct tb_switch *sw)
  145. {
  146. int port, ret;
  147. u32 type;
  148. /*
  149. * The DMA (NHI) port is either 3 or 5 depending on the
  150. * controller. Try both starting from 5 which is more common.
  151. */
  152. port = 5;
  153. ret = dma_port_read(sw->tb->ctl, &type, tb_route(sw), port, 2, 1,
  154. DMA_PORT_TIMEOUT);
  155. if (!ret && (type & 0xffffff) == TB_TYPE_NHI)
  156. return port;
  157. port = 3;
  158. ret = dma_port_read(sw->tb->ctl, &type, tb_route(sw), port, 2, 1,
  159. DMA_PORT_TIMEOUT);
  160. if (!ret && (type & 0xffffff) == TB_TYPE_NHI)
  161. return port;
  162. return -ENODEV;
  163. }
  164. /**
  165. * dma_port_alloc() - Finds DMA control port from a switch pointed by route
  166. * @sw: Switch from where find the DMA port
  167. *
  168. * Function checks if the switch NHI port supports DMA configuration
  169. * based mailbox capability and if it does, allocates and initializes
  170. * DMA port structure. Returns %NULL if the capabity was not found.
  171. *
  172. * The DMA control port is functional also when the switch is in safe
  173. * mode.
  174. */
  175. struct tb_dma_port *dma_port_alloc(struct tb_switch *sw)
  176. {
  177. struct tb_dma_port *dma;
  178. int port;
  179. port = dma_find_port(sw);
  180. if (port < 0)
  181. return NULL;
  182. dma = kzalloc(sizeof(*dma), GFP_KERNEL);
  183. if (!dma)
  184. return NULL;
  185. dma->buf = kmalloc_array(MAIL_DATA_DWORDS, sizeof(u32), GFP_KERNEL);
  186. if (!dma->buf) {
  187. kfree(dma);
  188. return NULL;
  189. }
  190. dma->sw = sw;
  191. dma->port = port;
  192. dma->base = DMA_PORT_CAP;
  193. return dma;
  194. }
  195. /**
  196. * dma_port_free() - Release DMA control port structure
  197. * @dma: DMA control port
  198. */
  199. void dma_port_free(struct tb_dma_port *dma)
  200. {
  201. if (dma) {
  202. kfree(dma->buf);
  203. kfree(dma);
  204. }
  205. }
  206. static int dma_port_wait_for_completion(struct tb_dma_port *dma,
  207. unsigned int timeout)
  208. {
  209. unsigned long end = jiffies + msecs_to_jiffies(timeout);
  210. struct tb_switch *sw = dma->sw;
  211. do {
  212. int ret;
  213. u32 in;
  214. ret = dma_port_read(sw->tb->ctl, &in, tb_route(sw), dma->port,
  215. dma->base + MAIL_IN, 1, 50);
  216. if (ret) {
  217. if (ret != -ETIMEDOUT)
  218. return ret;
  219. } else if (!(in & MAIL_IN_OP_REQUEST)) {
  220. return 0;
  221. }
  222. usleep_range(50, 100);
  223. } while (time_before(jiffies, end));
  224. return -ETIMEDOUT;
  225. }
  226. static int status_to_errno(u32 status)
  227. {
  228. switch (status & MAIL_OUT_STATUS_MASK) {
  229. case MAIL_OUT_STATUS_COMPLETED:
  230. return 0;
  231. case MAIL_OUT_STATUS_ERR_AUTH:
  232. return -EINVAL;
  233. case MAIL_OUT_STATUS_ERR_ACCESS:
  234. return -EACCES;
  235. }
  236. return -EIO;
  237. }
  238. static int dma_port_request(struct tb_dma_port *dma, u32 in,
  239. unsigned int timeout)
  240. {
  241. struct tb_switch *sw = dma->sw;
  242. u32 out;
  243. int ret;
  244. ret = dma_port_write(sw->tb->ctl, &in, tb_route(sw), dma->port,
  245. dma->base + MAIL_IN, 1, DMA_PORT_TIMEOUT);
  246. if (ret)
  247. return ret;
  248. ret = dma_port_wait_for_completion(dma, timeout);
  249. if (ret)
  250. return ret;
  251. ret = dma_port_read(sw->tb->ctl, &out, tb_route(sw), dma->port,
  252. dma->base + MAIL_OUT, 1, DMA_PORT_TIMEOUT);
  253. if (ret)
  254. return ret;
  255. return status_to_errno(out);
  256. }
  257. static int dma_port_flash_read_block(struct tb_dma_port *dma, u32 address,
  258. void *buf, u32 size)
  259. {
  260. struct tb_switch *sw = dma->sw;
  261. u32 in, dwaddress, dwords;
  262. int ret;
  263. dwaddress = address / 4;
  264. dwords = size / 4;
  265. in = MAIL_IN_CMD_FLASH_READ << MAIL_IN_CMD_SHIFT;
  266. if (dwords < MAIL_DATA_DWORDS)
  267. in |= (dwords << MAIL_IN_DWORDS_SHIFT) & MAIL_IN_DWORDS_MASK;
  268. in |= (dwaddress << MAIL_IN_ADDRESS_SHIFT) & MAIL_IN_ADDRESS_MASK;
  269. in |= MAIL_IN_OP_REQUEST;
  270. ret = dma_port_request(dma, in, DMA_PORT_TIMEOUT);
  271. if (ret)
  272. return ret;
  273. return dma_port_read(sw->tb->ctl, buf, tb_route(sw), dma->port,
  274. dma->base + MAIL_DATA, dwords, DMA_PORT_TIMEOUT);
  275. }
  276. static int dma_port_flash_write_block(struct tb_dma_port *dma, u32 address,
  277. const void *buf, u32 size)
  278. {
  279. struct tb_switch *sw = dma->sw;
  280. u32 in, dwaddress, dwords;
  281. int ret;
  282. dwords = size / 4;
  283. /* Write the block to MAIL_DATA registers */
  284. ret = dma_port_write(sw->tb->ctl, buf, tb_route(sw), dma->port,
  285. dma->base + MAIL_DATA, dwords, DMA_PORT_TIMEOUT);
  286. in = MAIL_IN_CMD_FLASH_WRITE << MAIL_IN_CMD_SHIFT;
  287. /* CSS header write is always done to the same magic address */
  288. if (address >= DMA_PORT_CSS_ADDRESS) {
  289. dwaddress = DMA_PORT_CSS_ADDRESS;
  290. in |= MAIL_IN_CSS;
  291. } else {
  292. dwaddress = address / 4;
  293. }
  294. in |= ((dwords - 1) << MAIL_IN_DWORDS_SHIFT) & MAIL_IN_DWORDS_MASK;
  295. in |= (dwaddress << MAIL_IN_ADDRESS_SHIFT) & MAIL_IN_ADDRESS_MASK;
  296. in |= MAIL_IN_OP_REQUEST;
  297. return dma_port_request(dma, in, DMA_PORT_TIMEOUT);
  298. }
  299. /**
  300. * dma_port_flash_read() - Read from active flash region
  301. * @dma: DMA control port
  302. * @address: Address relative to the start of active region
  303. * @buf: Buffer where the data is read
  304. * @size: Size of the buffer
  305. */
  306. int dma_port_flash_read(struct tb_dma_port *dma, unsigned int address,
  307. void *buf, size_t size)
  308. {
  309. unsigned int retries = DMA_PORT_RETRIES;
  310. unsigned int offset;
  311. offset = address & 3;
  312. address = address & ~3;
  313. do {
  314. u32 nbytes = min_t(u32, size, MAIL_DATA_DWORDS * 4);
  315. int ret;
  316. ret = dma_port_flash_read_block(dma, address, dma->buf,
  317. ALIGN(nbytes, 4));
  318. if (ret) {
  319. if (ret == -ETIMEDOUT) {
  320. if (retries--)
  321. continue;
  322. ret = -EIO;
  323. }
  324. return ret;
  325. }
  326. memcpy(buf, dma->buf + offset, nbytes);
  327. size -= nbytes;
  328. address += nbytes;
  329. buf += nbytes;
  330. } while (size > 0);
  331. return 0;
  332. }
  333. /**
  334. * dma_port_flash_write() - Write to non-active flash region
  335. * @dma: DMA control port
  336. * @address: Address relative to the start of non-active region
  337. * @buf: Data to write
  338. * @size: Size of the buffer
  339. *
  340. * Writes block of data to the non-active flash region of the switch. If
  341. * the address is given as %DMA_PORT_CSS_ADDRESS the block is written
  342. * using CSS command.
  343. */
  344. int dma_port_flash_write(struct tb_dma_port *dma, unsigned int address,
  345. const void *buf, size_t size)
  346. {
  347. unsigned int retries = DMA_PORT_RETRIES;
  348. unsigned int offset;
  349. if (address >= DMA_PORT_CSS_ADDRESS) {
  350. offset = 0;
  351. if (size > DMA_PORT_CSS_MAX_SIZE)
  352. return -E2BIG;
  353. } else {
  354. offset = address & 3;
  355. address = address & ~3;
  356. }
  357. do {
  358. u32 nbytes = min_t(u32, size, MAIL_DATA_DWORDS * 4);
  359. int ret;
  360. memcpy(dma->buf + offset, buf, nbytes);
  361. ret = dma_port_flash_write_block(dma, address, buf, nbytes);
  362. if (ret) {
  363. if (ret == -ETIMEDOUT) {
  364. if (retries--)
  365. continue;
  366. ret = -EIO;
  367. }
  368. return ret;
  369. }
  370. size -= nbytes;
  371. address += nbytes;
  372. buf += nbytes;
  373. } while (size > 0);
  374. return 0;
  375. }
  376. /**
  377. * dma_port_flash_update_auth() - Starts flash authenticate cycle
  378. * @dma: DMA control port
  379. *
  380. * Starts the flash update authentication cycle. If the image in the
  381. * non-active area was valid, the switch starts upgrade process where
  382. * active and non-active area get swapped in the end. Caller should call
  383. * dma_port_flash_update_auth_status() to get status of this command.
  384. * This is because if the switch in question is root switch the
  385. * thunderbolt host controller gets reset as well.
  386. */
  387. int dma_port_flash_update_auth(struct tb_dma_port *dma)
  388. {
  389. u32 in;
  390. in = MAIL_IN_CMD_FLASH_UPDATE_AUTH << MAIL_IN_CMD_SHIFT;
  391. in |= MAIL_IN_OP_REQUEST;
  392. return dma_port_request(dma, in, 150);
  393. }
  394. /**
  395. * dma_port_flash_update_auth_status() - Reads status of update auth command
  396. * @dma: DMA control port
  397. * @status: Status code of the operation
  398. *
  399. * The function checks if there is status available from the last update
  400. * auth command. Returns %0 if there is no status and no further
  401. * action is required. If there is status, %1 is returned instead and
  402. * @status holds the failure code.
  403. *
  404. * Negative return means there was an error reading status from the
  405. * switch.
  406. */
  407. int dma_port_flash_update_auth_status(struct tb_dma_port *dma, u32 *status)
  408. {
  409. struct tb_switch *sw = dma->sw;
  410. u32 out, cmd;
  411. int ret;
  412. ret = dma_port_read(sw->tb->ctl, &out, tb_route(sw), dma->port,
  413. dma->base + MAIL_OUT, 1, DMA_PORT_TIMEOUT);
  414. if (ret)
  415. return ret;
  416. /* Check if the status relates to flash update auth */
  417. cmd = (out & MAIL_OUT_STATUS_CMD_MASK) >> MAIL_OUT_STATUS_CMD_SHIFT;
  418. if (cmd == MAIL_IN_CMD_FLASH_UPDATE_AUTH) {
  419. if (status)
  420. *status = out & MAIL_OUT_STATUS_MASK;
  421. /* Reset is needed in any case */
  422. return 1;
  423. }
  424. return 0;
  425. }
  426. /**
  427. * dma_port_power_cycle() - Power cycles the switch
  428. * @dma: DMA control port
  429. *
  430. * Triggers power cycle to the switch.
  431. */
  432. int dma_port_power_cycle(struct tb_dma_port *dma)
  433. {
  434. u32 in;
  435. in = MAIL_IN_CMD_POWER_CYCLE << MAIL_IN_CMD_SHIFT;
  436. in |= MAIL_IN_OP_REQUEST;
  437. return dma_port_request(dma, in, 150);
  438. }