icm.c 27 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091
  1. /*
  2. * Internal Thunderbolt Connection Manager. This is a firmware running on
  3. * the Thunderbolt host controller performing most of the low-level
  4. * handling.
  5. *
  6. * Copyright (C) 2017, Intel Corporation
  7. * Authors: Michael Jamet <michael.jamet@intel.com>
  8. * Mika Westerberg <mika.westerberg@linux.intel.com>
  9. *
  10. * This program is free software; you can redistribute it and/or modify
  11. * it under the terms of the GNU General Public License version 2 as
  12. * published by the Free Software Foundation.
  13. */
  14. #include <linux/delay.h>
  15. #include <linux/mutex.h>
  16. #include <linux/pci.h>
  17. #include <linux/platform_data/x86/apple.h>
  18. #include <linux/sizes.h>
  19. #include <linux/slab.h>
  20. #include <linux/workqueue.h>
  21. #include "ctl.h"
  22. #include "nhi_regs.h"
  23. #include "tb.h"
  24. #define PCIE2CIO_CMD 0x30
  25. #define PCIE2CIO_CMD_TIMEOUT BIT(31)
  26. #define PCIE2CIO_CMD_START BIT(30)
  27. #define PCIE2CIO_CMD_WRITE BIT(21)
  28. #define PCIE2CIO_CMD_CS_MASK GENMASK(20, 19)
  29. #define PCIE2CIO_CMD_CS_SHIFT 19
  30. #define PCIE2CIO_CMD_PORT_MASK GENMASK(18, 13)
  31. #define PCIE2CIO_CMD_PORT_SHIFT 13
  32. #define PCIE2CIO_WRDATA 0x34
  33. #define PCIE2CIO_RDDATA 0x38
  34. #define PHY_PORT_CS1 0x37
  35. #define PHY_PORT_CS1_LINK_DISABLE BIT(14)
  36. #define PHY_PORT_CS1_LINK_STATE_MASK GENMASK(29, 26)
  37. #define PHY_PORT_CS1_LINK_STATE_SHIFT 26
  38. #define ICM_TIMEOUT 5000 /* ms */
  39. #define ICM_MAX_LINK 4
  40. #define ICM_MAX_DEPTH 6
  41. /**
  42. * struct icm - Internal connection manager private data
  43. * @request_lock: Makes sure only one message is send to ICM at time
  44. * @rescan_work: Work used to rescan the surviving switches after resume
  45. * @upstream_port: Pointer to the PCIe upstream port this host
  46. * controller is connected. This is only set for systems
  47. * where ICM needs to be started manually
  48. * @vnd_cap: Vendor defined capability where PCIe2CIO mailbox resides
  49. * (only set when @upstream_port is not %NULL)
  50. * @safe_mode: ICM is in safe mode
  51. * @is_supported: Checks if we can support ICM on this controller
  52. * @get_mode: Read and return the ICM firmware mode (optional)
  53. * @get_route: Find a route string for given switch
  54. * @device_connected: Handle device connected ICM message
  55. * @device_disconnected: Handle device disconnected ICM message
  56. */
  57. struct icm {
  58. struct mutex request_lock;
  59. struct delayed_work rescan_work;
  60. struct pci_dev *upstream_port;
  61. int vnd_cap;
  62. bool safe_mode;
  63. bool (*is_supported)(struct tb *tb);
  64. int (*get_mode)(struct tb *tb);
  65. int (*get_route)(struct tb *tb, u8 link, u8 depth, u64 *route);
  66. void (*device_connected)(struct tb *tb,
  67. const struct icm_pkg_header *hdr);
  68. void (*device_disconnected)(struct tb *tb,
  69. const struct icm_pkg_header *hdr);
  70. };
  71. struct icm_notification {
  72. struct work_struct work;
  73. struct icm_pkg_header *pkg;
  74. struct tb *tb;
  75. };
  76. static inline struct tb *icm_to_tb(struct icm *icm)
  77. {
  78. return ((void *)icm - sizeof(struct tb));
  79. }
  80. static inline u8 phy_port_from_route(u64 route, u8 depth)
  81. {
  82. return tb_switch_phy_port_from_link(route >> ((depth - 1) * 8));
  83. }
  84. static inline u8 dual_link_from_link(u8 link)
  85. {
  86. return link ? ((link - 1) ^ 0x01) + 1 : 0;
  87. }
  88. static inline u64 get_route(u32 route_hi, u32 route_lo)
  89. {
  90. return (u64)route_hi << 32 | route_lo;
  91. }
  92. static bool icm_match(const struct tb_cfg_request *req,
  93. const struct ctl_pkg *pkg)
  94. {
  95. const struct icm_pkg_header *res_hdr = pkg->buffer;
  96. const struct icm_pkg_header *req_hdr = req->request;
  97. if (pkg->frame.eof != req->response_type)
  98. return false;
  99. if (res_hdr->code != req_hdr->code)
  100. return false;
  101. return true;
  102. }
  103. static bool icm_copy(struct tb_cfg_request *req, const struct ctl_pkg *pkg)
  104. {
  105. const struct icm_pkg_header *hdr = pkg->buffer;
  106. if (hdr->packet_id < req->npackets) {
  107. size_t offset = hdr->packet_id * req->response_size;
  108. memcpy(req->response + offset, pkg->buffer, req->response_size);
  109. }
  110. return hdr->packet_id == hdr->total_packets - 1;
  111. }
  112. static int icm_request(struct tb *tb, const void *request, size_t request_size,
  113. void *response, size_t response_size, size_t npackets,
  114. unsigned int timeout_msec)
  115. {
  116. struct icm *icm = tb_priv(tb);
  117. int retries = 3;
  118. do {
  119. struct tb_cfg_request *req;
  120. struct tb_cfg_result res;
  121. req = tb_cfg_request_alloc();
  122. if (!req)
  123. return -ENOMEM;
  124. req->match = icm_match;
  125. req->copy = icm_copy;
  126. req->request = request;
  127. req->request_size = request_size;
  128. req->request_type = TB_CFG_PKG_ICM_CMD;
  129. req->response = response;
  130. req->npackets = npackets;
  131. req->response_size = response_size;
  132. req->response_type = TB_CFG_PKG_ICM_RESP;
  133. mutex_lock(&icm->request_lock);
  134. res = tb_cfg_request_sync(tb->ctl, req, timeout_msec);
  135. mutex_unlock(&icm->request_lock);
  136. tb_cfg_request_put(req);
  137. if (res.err != -ETIMEDOUT)
  138. return res.err == 1 ? -EIO : res.err;
  139. usleep_range(20, 50);
  140. } while (retries--);
  141. return -ETIMEDOUT;
  142. }
  143. static bool icm_fr_is_supported(struct tb *tb)
  144. {
  145. return !x86_apple_machine;
  146. }
  147. static inline int icm_fr_get_switch_index(u32 port)
  148. {
  149. int index;
  150. if ((port & ICM_PORT_TYPE_MASK) != TB_TYPE_PORT)
  151. return 0;
  152. index = port >> ICM_PORT_INDEX_SHIFT;
  153. return index != 0xff ? index : 0;
  154. }
  155. static int icm_fr_get_route(struct tb *tb, u8 link, u8 depth, u64 *route)
  156. {
  157. struct icm_fr_pkg_get_topology_response *switches, *sw;
  158. struct icm_fr_pkg_get_topology request = {
  159. .hdr = { .code = ICM_GET_TOPOLOGY },
  160. };
  161. size_t npackets = ICM_GET_TOPOLOGY_PACKETS;
  162. int ret, index;
  163. u8 i;
  164. switches = kcalloc(npackets, sizeof(*switches), GFP_KERNEL);
  165. if (!switches)
  166. return -ENOMEM;
  167. ret = icm_request(tb, &request, sizeof(request), switches,
  168. sizeof(*switches), npackets, ICM_TIMEOUT);
  169. if (ret)
  170. goto err_free;
  171. sw = &switches[0];
  172. index = icm_fr_get_switch_index(sw->ports[link]);
  173. if (!index) {
  174. ret = -ENODEV;
  175. goto err_free;
  176. }
  177. sw = &switches[index];
  178. for (i = 1; i < depth; i++) {
  179. unsigned int j;
  180. if (!(sw->first_data & ICM_SWITCH_USED)) {
  181. ret = -ENODEV;
  182. goto err_free;
  183. }
  184. for (j = 0; j < ARRAY_SIZE(sw->ports); j++) {
  185. index = icm_fr_get_switch_index(sw->ports[j]);
  186. if (index > sw->switch_index) {
  187. sw = &switches[index];
  188. break;
  189. }
  190. }
  191. }
  192. *route = get_route(sw->route_hi, sw->route_lo);
  193. err_free:
  194. kfree(switches);
  195. return ret;
  196. }
  197. static int icm_fr_approve_switch(struct tb *tb, struct tb_switch *sw)
  198. {
  199. struct icm_fr_pkg_approve_device request;
  200. struct icm_fr_pkg_approve_device reply;
  201. int ret;
  202. memset(&request, 0, sizeof(request));
  203. memcpy(&request.ep_uuid, sw->uuid, sizeof(request.ep_uuid));
  204. request.hdr.code = ICM_APPROVE_DEVICE;
  205. request.connection_id = sw->connection_id;
  206. request.connection_key = sw->connection_key;
  207. memset(&reply, 0, sizeof(reply));
  208. /* Use larger timeout as establishing tunnels can take some time */
  209. ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
  210. 1, 10000);
  211. if (ret)
  212. return ret;
  213. if (reply.hdr.flags & ICM_FLAGS_ERROR) {
  214. tb_warn(tb, "PCIe tunnel creation failed\n");
  215. return -EIO;
  216. }
  217. return 0;
  218. }
  219. static int icm_fr_add_switch_key(struct tb *tb, struct tb_switch *sw)
  220. {
  221. struct icm_fr_pkg_add_device_key request;
  222. struct icm_fr_pkg_add_device_key_response reply;
  223. int ret;
  224. memset(&request, 0, sizeof(request));
  225. memcpy(&request.ep_uuid, sw->uuid, sizeof(request.ep_uuid));
  226. request.hdr.code = ICM_ADD_DEVICE_KEY;
  227. request.connection_id = sw->connection_id;
  228. request.connection_key = sw->connection_key;
  229. memcpy(request.key, sw->key, TB_SWITCH_KEY_SIZE);
  230. memset(&reply, 0, sizeof(reply));
  231. ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
  232. 1, ICM_TIMEOUT);
  233. if (ret)
  234. return ret;
  235. if (reply.hdr.flags & ICM_FLAGS_ERROR) {
  236. tb_warn(tb, "Adding key to switch failed\n");
  237. return -EIO;
  238. }
  239. return 0;
  240. }
  241. static int icm_fr_challenge_switch_key(struct tb *tb, struct tb_switch *sw,
  242. const u8 *challenge, u8 *response)
  243. {
  244. struct icm_fr_pkg_challenge_device request;
  245. struct icm_fr_pkg_challenge_device_response reply;
  246. int ret;
  247. memset(&request, 0, sizeof(request));
  248. memcpy(&request.ep_uuid, sw->uuid, sizeof(request.ep_uuid));
  249. request.hdr.code = ICM_CHALLENGE_DEVICE;
  250. request.connection_id = sw->connection_id;
  251. request.connection_key = sw->connection_key;
  252. memcpy(request.challenge, challenge, TB_SWITCH_KEY_SIZE);
  253. memset(&reply, 0, sizeof(reply));
  254. ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
  255. 1, ICM_TIMEOUT);
  256. if (ret)
  257. return ret;
  258. if (reply.hdr.flags & ICM_FLAGS_ERROR)
  259. return -EKEYREJECTED;
  260. if (reply.hdr.flags & ICM_FLAGS_NO_KEY)
  261. return -ENOKEY;
  262. memcpy(response, reply.response, TB_SWITCH_KEY_SIZE);
  263. return 0;
  264. }
  265. static void remove_switch(struct tb_switch *sw)
  266. {
  267. struct tb_switch *parent_sw;
  268. parent_sw = tb_to_switch(sw->dev.parent);
  269. tb_port_at(tb_route(sw), parent_sw)->remote = NULL;
  270. tb_switch_remove(sw);
  271. }
  272. static void
  273. icm_fr_device_connected(struct tb *tb, const struct icm_pkg_header *hdr)
  274. {
  275. const struct icm_fr_event_device_connected *pkg =
  276. (const struct icm_fr_event_device_connected *)hdr;
  277. struct tb_switch *sw, *parent_sw;
  278. struct icm *icm = tb_priv(tb);
  279. bool authorized = false;
  280. u8 link, depth;
  281. u64 route;
  282. int ret;
  283. link = pkg->link_info & ICM_LINK_INFO_LINK_MASK;
  284. depth = (pkg->link_info & ICM_LINK_INFO_DEPTH_MASK) >>
  285. ICM_LINK_INFO_DEPTH_SHIFT;
  286. authorized = pkg->link_info & ICM_LINK_INFO_APPROVED;
  287. ret = icm->get_route(tb, link, depth, &route);
  288. if (ret) {
  289. tb_err(tb, "failed to find route string for switch at %u.%u\n",
  290. link, depth);
  291. return;
  292. }
  293. sw = tb_switch_find_by_uuid(tb, &pkg->ep_uuid);
  294. if (sw) {
  295. u8 phy_port, sw_phy_port;
  296. parent_sw = tb_to_switch(sw->dev.parent);
  297. sw_phy_port = phy_port_from_route(tb_route(sw), sw->depth);
  298. phy_port = phy_port_from_route(route, depth);
  299. /*
  300. * On resume ICM will send us connected events for the
  301. * devices that still are present. However, that
  302. * information might have changed for example by the
  303. * fact that a switch on a dual-link connection might
  304. * have been enumerated using the other link now. Make
  305. * sure our book keeping matches that.
  306. */
  307. if (sw->depth == depth && sw_phy_port == phy_port &&
  308. !!sw->authorized == authorized) {
  309. tb_port_at(tb_route(sw), parent_sw)->remote = NULL;
  310. tb_port_at(route, parent_sw)->remote =
  311. tb_upstream_port(sw);
  312. sw->config.route_hi = upper_32_bits(route);
  313. sw->config.route_lo = lower_32_bits(route);
  314. sw->connection_id = pkg->connection_id;
  315. sw->connection_key = pkg->connection_key;
  316. sw->link = link;
  317. sw->depth = depth;
  318. sw->is_unplugged = false;
  319. tb_switch_put(sw);
  320. return;
  321. }
  322. /*
  323. * User connected the same switch to another physical
  324. * port or to another part of the topology. Remove the
  325. * existing switch now before adding the new one.
  326. */
  327. remove_switch(sw);
  328. tb_switch_put(sw);
  329. }
  330. /*
  331. * If the switch was not found by UUID, look for a switch on
  332. * same physical port (taking possible link aggregation into
  333. * account) and depth. If we found one it is definitely a stale
  334. * one so remove it first.
  335. */
  336. sw = tb_switch_find_by_link_depth(tb, link, depth);
  337. if (!sw) {
  338. u8 dual_link;
  339. dual_link = dual_link_from_link(link);
  340. if (dual_link)
  341. sw = tb_switch_find_by_link_depth(tb, dual_link, depth);
  342. }
  343. if (sw) {
  344. remove_switch(sw);
  345. tb_switch_put(sw);
  346. }
  347. parent_sw = tb_switch_find_by_link_depth(tb, link, depth - 1);
  348. if (!parent_sw) {
  349. tb_err(tb, "failed to find parent switch for %u.%u\n",
  350. link, depth);
  351. return;
  352. }
  353. sw = tb_switch_alloc(tb, &parent_sw->dev, route);
  354. if (!sw) {
  355. tb_switch_put(parent_sw);
  356. return;
  357. }
  358. sw->uuid = kmemdup(&pkg->ep_uuid, sizeof(pkg->ep_uuid), GFP_KERNEL);
  359. sw->connection_id = pkg->connection_id;
  360. sw->connection_key = pkg->connection_key;
  361. sw->link = link;
  362. sw->depth = depth;
  363. sw->authorized = authorized;
  364. sw->security_level = (pkg->hdr.flags & ICM_FLAGS_SLEVEL_MASK) >>
  365. ICM_FLAGS_SLEVEL_SHIFT;
  366. /* Link the two switches now */
  367. tb_port_at(route, parent_sw)->remote = tb_upstream_port(sw);
  368. tb_upstream_port(sw)->remote = tb_port_at(route, parent_sw);
  369. ret = tb_switch_add(sw);
  370. if (ret) {
  371. tb_port_at(tb_route(sw), parent_sw)->remote = NULL;
  372. tb_switch_put(sw);
  373. }
  374. tb_switch_put(parent_sw);
  375. }
  376. static void
  377. icm_fr_device_disconnected(struct tb *tb, const struct icm_pkg_header *hdr)
  378. {
  379. const struct icm_fr_event_device_disconnected *pkg =
  380. (const struct icm_fr_event_device_disconnected *)hdr;
  381. struct tb_switch *sw;
  382. u8 link, depth;
  383. link = pkg->link_info & ICM_LINK_INFO_LINK_MASK;
  384. depth = (pkg->link_info & ICM_LINK_INFO_DEPTH_MASK) >>
  385. ICM_LINK_INFO_DEPTH_SHIFT;
  386. if (link > ICM_MAX_LINK || depth > ICM_MAX_DEPTH) {
  387. tb_warn(tb, "invalid topology %u.%u, ignoring\n", link, depth);
  388. return;
  389. }
  390. sw = tb_switch_find_by_link_depth(tb, link, depth);
  391. if (!sw) {
  392. tb_warn(tb, "no switch exists at %u.%u, ignoring\n", link,
  393. depth);
  394. return;
  395. }
  396. remove_switch(sw);
  397. tb_switch_put(sw);
  398. }
  399. static struct pci_dev *get_upstream_port(struct pci_dev *pdev)
  400. {
  401. struct pci_dev *parent;
  402. parent = pci_upstream_bridge(pdev);
  403. while (parent) {
  404. if (!pci_is_pcie(parent))
  405. return NULL;
  406. if (pci_pcie_type(parent) == PCI_EXP_TYPE_UPSTREAM)
  407. break;
  408. parent = pci_upstream_bridge(parent);
  409. }
  410. if (!parent)
  411. return NULL;
  412. switch (parent->device) {
  413. case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_2C_BRIDGE:
  414. case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_4C_BRIDGE:
  415. case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_LP_BRIDGE:
  416. case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_4C_BRIDGE:
  417. case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_2C_BRIDGE:
  418. return parent;
  419. }
  420. return NULL;
  421. }
  422. static bool icm_ar_is_supported(struct tb *tb)
  423. {
  424. struct pci_dev *upstream_port;
  425. struct icm *icm = tb_priv(tb);
  426. /*
  427. * Starting from Alpine Ridge we can use ICM on Apple machines
  428. * as well. We just need to reset and re-enable it first.
  429. */
  430. if (!x86_apple_machine)
  431. return true;
  432. /*
  433. * Find the upstream PCIe port in case we need to do reset
  434. * through its vendor specific registers.
  435. */
  436. upstream_port = get_upstream_port(tb->nhi->pdev);
  437. if (upstream_port) {
  438. int cap;
  439. cap = pci_find_ext_capability(upstream_port,
  440. PCI_EXT_CAP_ID_VNDR);
  441. if (cap > 0) {
  442. icm->upstream_port = upstream_port;
  443. icm->vnd_cap = cap;
  444. return true;
  445. }
  446. }
  447. return false;
  448. }
  449. static int icm_ar_get_mode(struct tb *tb)
  450. {
  451. struct tb_nhi *nhi = tb->nhi;
  452. int retries = 5;
  453. u32 val;
  454. do {
  455. val = ioread32(nhi->iobase + REG_FW_STS);
  456. if (val & REG_FW_STS_NVM_AUTH_DONE)
  457. break;
  458. msleep(30);
  459. } while (--retries);
  460. if (!retries) {
  461. dev_err(&nhi->pdev->dev, "ICM firmware not authenticated\n");
  462. return -ENODEV;
  463. }
  464. return nhi_mailbox_mode(nhi);
  465. }
  466. static int icm_ar_get_route(struct tb *tb, u8 link, u8 depth, u64 *route)
  467. {
  468. struct icm_ar_pkg_get_route_response reply;
  469. struct icm_ar_pkg_get_route request = {
  470. .hdr = { .code = ICM_GET_ROUTE },
  471. .link_info = depth << ICM_LINK_INFO_DEPTH_SHIFT | link,
  472. };
  473. int ret;
  474. memset(&reply, 0, sizeof(reply));
  475. ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
  476. 1, ICM_TIMEOUT);
  477. if (ret)
  478. return ret;
  479. if (reply.hdr.flags & ICM_FLAGS_ERROR)
  480. return -EIO;
  481. *route = get_route(reply.route_hi, reply.route_lo);
  482. return 0;
  483. }
  484. static void icm_handle_notification(struct work_struct *work)
  485. {
  486. struct icm_notification *n = container_of(work, typeof(*n), work);
  487. struct tb *tb = n->tb;
  488. struct icm *icm = tb_priv(tb);
  489. mutex_lock(&tb->lock);
  490. switch (n->pkg->code) {
  491. case ICM_EVENT_DEVICE_CONNECTED:
  492. icm->device_connected(tb, n->pkg);
  493. break;
  494. case ICM_EVENT_DEVICE_DISCONNECTED:
  495. icm->device_disconnected(tb, n->pkg);
  496. break;
  497. }
  498. mutex_unlock(&tb->lock);
  499. kfree(n->pkg);
  500. kfree(n);
  501. }
  502. static void icm_handle_event(struct tb *tb, enum tb_cfg_pkg_type type,
  503. const void *buf, size_t size)
  504. {
  505. struct icm_notification *n;
  506. n = kmalloc(sizeof(*n), GFP_KERNEL);
  507. if (!n)
  508. return;
  509. INIT_WORK(&n->work, icm_handle_notification);
  510. n->pkg = kmemdup(buf, size, GFP_KERNEL);
  511. n->tb = tb;
  512. queue_work(tb->wq, &n->work);
  513. }
  514. static int
  515. __icm_driver_ready(struct tb *tb, enum tb_security_level *security_level)
  516. {
  517. struct icm_pkg_driver_ready_response reply;
  518. struct icm_pkg_driver_ready request = {
  519. .hdr.code = ICM_DRIVER_READY,
  520. };
  521. unsigned int retries = 10;
  522. int ret;
  523. memset(&reply, 0, sizeof(reply));
  524. ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
  525. 1, ICM_TIMEOUT);
  526. if (ret)
  527. return ret;
  528. if (security_level)
  529. *security_level = reply.security_level & 0xf;
  530. /*
  531. * Hold on here until the switch config space is accessible so
  532. * that we can read root switch config successfully.
  533. */
  534. do {
  535. struct tb_cfg_result res;
  536. u32 tmp;
  537. res = tb_cfg_read_raw(tb->ctl, &tmp, 0, 0, TB_CFG_SWITCH,
  538. 0, 1, 100);
  539. if (!res.err)
  540. return 0;
  541. msleep(50);
  542. } while (--retries);
  543. return -ETIMEDOUT;
  544. }
  545. static int pci2cio_wait_completion(struct icm *icm, unsigned long timeout_msec)
  546. {
  547. unsigned long end = jiffies + msecs_to_jiffies(timeout_msec);
  548. u32 cmd;
  549. do {
  550. pci_read_config_dword(icm->upstream_port,
  551. icm->vnd_cap + PCIE2CIO_CMD, &cmd);
  552. if (!(cmd & PCIE2CIO_CMD_START)) {
  553. if (cmd & PCIE2CIO_CMD_TIMEOUT)
  554. break;
  555. return 0;
  556. }
  557. msleep(50);
  558. } while (time_before(jiffies, end));
  559. return -ETIMEDOUT;
  560. }
  561. static int pcie2cio_read(struct icm *icm, enum tb_cfg_space cs,
  562. unsigned int port, unsigned int index, u32 *data)
  563. {
  564. struct pci_dev *pdev = icm->upstream_port;
  565. int ret, vnd_cap = icm->vnd_cap;
  566. u32 cmd;
  567. cmd = index;
  568. cmd |= (port << PCIE2CIO_CMD_PORT_SHIFT) & PCIE2CIO_CMD_PORT_MASK;
  569. cmd |= (cs << PCIE2CIO_CMD_CS_SHIFT) & PCIE2CIO_CMD_CS_MASK;
  570. cmd |= PCIE2CIO_CMD_START;
  571. pci_write_config_dword(pdev, vnd_cap + PCIE2CIO_CMD, cmd);
  572. ret = pci2cio_wait_completion(icm, 5000);
  573. if (ret)
  574. return ret;
  575. pci_read_config_dword(pdev, vnd_cap + PCIE2CIO_RDDATA, data);
  576. return 0;
  577. }
  578. static int pcie2cio_write(struct icm *icm, enum tb_cfg_space cs,
  579. unsigned int port, unsigned int index, u32 data)
  580. {
  581. struct pci_dev *pdev = icm->upstream_port;
  582. int vnd_cap = icm->vnd_cap;
  583. u32 cmd;
  584. pci_write_config_dword(pdev, vnd_cap + PCIE2CIO_WRDATA, data);
  585. cmd = index;
  586. cmd |= (port << PCIE2CIO_CMD_PORT_SHIFT) & PCIE2CIO_CMD_PORT_MASK;
  587. cmd |= (cs << PCIE2CIO_CMD_CS_SHIFT) & PCIE2CIO_CMD_CS_MASK;
  588. cmd |= PCIE2CIO_CMD_WRITE | PCIE2CIO_CMD_START;
  589. pci_write_config_dword(pdev, vnd_cap + PCIE2CIO_CMD, cmd);
  590. return pci2cio_wait_completion(icm, 5000);
  591. }
  592. static int icm_firmware_reset(struct tb *tb, struct tb_nhi *nhi)
  593. {
  594. struct icm *icm = tb_priv(tb);
  595. u32 val;
  596. /* Put ARC to wait for CIO reset event to happen */
  597. val = ioread32(nhi->iobase + REG_FW_STS);
  598. val |= REG_FW_STS_CIO_RESET_REQ;
  599. iowrite32(val, nhi->iobase + REG_FW_STS);
  600. /* Re-start ARC */
  601. val = ioread32(nhi->iobase + REG_FW_STS);
  602. val |= REG_FW_STS_ICM_EN_INVERT;
  603. val |= REG_FW_STS_ICM_EN_CPU;
  604. iowrite32(val, nhi->iobase + REG_FW_STS);
  605. /* Trigger CIO reset now */
  606. return pcie2cio_write(icm, TB_CFG_SWITCH, 0, 0x50, BIT(9));
  607. }
  608. static int icm_firmware_start(struct tb *tb, struct tb_nhi *nhi)
  609. {
  610. unsigned int retries = 10;
  611. int ret;
  612. u32 val;
  613. /* Check if the ICM firmware is already running */
  614. val = ioread32(nhi->iobase + REG_FW_STS);
  615. if (val & REG_FW_STS_ICM_EN)
  616. return 0;
  617. dev_info(&nhi->pdev->dev, "starting ICM firmware\n");
  618. ret = icm_firmware_reset(tb, nhi);
  619. if (ret)
  620. return ret;
  621. /* Wait until the ICM firmware tells us it is up and running */
  622. do {
  623. /* Check that the ICM firmware is running */
  624. val = ioread32(nhi->iobase + REG_FW_STS);
  625. if (val & REG_FW_STS_NVM_AUTH_DONE)
  626. return 0;
  627. msleep(300);
  628. } while (--retries);
  629. return -ETIMEDOUT;
  630. }
  631. static int icm_reset_phy_port(struct tb *tb, int phy_port)
  632. {
  633. struct icm *icm = tb_priv(tb);
  634. u32 state0, state1;
  635. int port0, port1;
  636. u32 val0, val1;
  637. int ret;
  638. if (!icm->upstream_port)
  639. return 0;
  640. if (phy_port) {
  641. port0 = 3;
  642. port1 = 4;
  643. } else {
  644. port0 = 1;
  645. port1 = 2;
  646. }
  647. /*
  648. * Read link status of both null ports belonging to a single
  649. * physical port.
  650. */
  651. ret = pcie2cio_read(icm, TB_CFG_PORT, port0, PHY_PORT_CS1, &val0);
  652. if (ret)
  653. return ret;
  654. ret = pcie2cio_read(icm, TB_CFG_PORT, port1, PHY_PORT_CS1, &val1);
  655. if (ret)
  656. return ret;
  657. state0 = val0 & PHY_PORT_CS1_LINK_STATE_MASK;
  658. state0 >>= PHY_PORT_CS1_LINK_STATE_SHIFT;
  659. state1 = val1 & PHY_PORT_CS1_LINK_STATE_MASK;
  660. state1 >>= PHY_PORT_CS1_LINK_STATE_SHIFT;
  661. /* If they are both up we need to reset them now */
  662. if (state0 != TB_PORT_UP || state1 != TB_PORT_UP)
  663. return 0;
  664. val0 |= PHY_PORT_CS1_LINK_DISABLE;
  665. ret = pcie2cio_write(icm, TB_CFG_PORT, port0, PHY_PORT_CS1, val0);
  666. if (ret)
  667. return ret;
  668. val1 |= PHY_PORT_CS1_LINK_DISABLE;
  669. ret = pcie2cio_write(icm, TB_CFG_PORT, port1, PHY_PORT_CS1, val1);
  670. if (ret)
  671. return ret;
  672. /* Wait a bit and then re-enable both ports */
  673. usleep_range(10, 100);
  674. ret = pcie2cio_read(icm, TB_CFG_PORT, port0, PHY_PORT_CS1, &val0);
  675. if (ret)
  676. return ret;
  677. ret = pcie2cio_read(icm, TB_CFG_PORT, port1, PHY_PORT_CS1, &val1);
  678. if (ret)
  679. return ret;
  680. val0 &= ~PHY_PORT_CS1_LINK_DISABLE;
  681. ret = pcie2cio_write(icm, TB_CFG_PORT, port0, PHY_PORT_CS1, val0);
  682. if (ret)
  683. return ret;
  684. val1 &= ~PHY_PORT_CS1_LINK_DISABLE;
  685. return pcie2cio_write(icm, TB_CFG_PORT, port1, PHY_PORT_CS1, val1);
  686. }
  687. static int icm_firmware_init(struct tb *tb)
  688. {
  689. struct icm *icm = tb_priv(tb);
  690. struct tb_nhi *nhi = tb->nhi;
  691. int ret;
  692. ret = icm_firmware_start(tb, nhi);
  693. if (ret) {
  694. dev_err(&nhi->pdev->dev, "could not start ICM firmware\n");
  695. return ret;
  696. }
  697. if (icm->get_mode) {
  698. ret = icm->get_mode(tb);
  699. switch (ret) {
  700. case NHI_FW_SAFE_MODE:
  701. icm->safe_mode = true;
  702. break;
  703. case NHI_FW_CM_MODE:
  704. /* Ask ICM to accept all Thunderbolt devices */
  705. nhi_mailbox_cmd(nhi, NHI_MAILBOX_ALLOW_ALL_DEVS, 0);
  706. break;
  707. default:
  708. tb_err(tb, "ICM firmware is in wrong mode: %u\n", ret);
  709. return -ENODEV;
  710. }
  711. }
  712. /*
  713. * Reset both physical ports if there is anything connected to
  714. * them already.
  715. */
  716. ret = icm_reset_phy_port(tb, 0);
  717. if (ret)
  718. dev_warn(&nhi->pdev->dev, "failed to reset links on port0\n");
  719. ret = icm_reset_phy_port(tb, 1);
  720. if (ret)
  721. dev_warn(&nhi->pdev->dev, "failed to reset links on port1\n");
  722. return 0;
  723. }
  724. static int icm_driver_ready(struct tb *tb)
  725. {
  726. struct icm *icm = tb_priv(tb);
  727. int ret;
  728. ret = icm_firmware_init(tb);
  729. if (ret)
  730. return ret;
  731. if (icm->safe_mode) {
  732. tb_info(tb, "Thunderbolt host controller is in safe mode.\n");
  733. tb_info(tb, "You need to update NVM firmware of the controller before it can be used.\n");
  734. tb_info(tb, "For latest updates check https://thunderbolttechnology.net/updates.\n");
  735. return 0;
  736. }
  737. return __icm_driver_ready(tb, &tb->security_level);
  738. }
  739. static int icm_suspend(struct tb *tb)
  740. {
  741. int ret;
  742. ret = nhi_mailbox_cmd(tb->nhi, NHI_MAILBOX_SAVE_DEVS, 0);
  743. if (ret)
  744. tb_info(tb, "Ignoring mailbox command error (%d) in %s\n",
  745. ret, __func__);
  746. return 0;
  747. }
  748. /*
  749. * Mark all switches (except root switch) below this one unplugged. ICM
  750. * firmware will send us an updated list of switches after we have send
  751. * it driver ready command. If a switch is not in that list it will be
  752. * removed when we perform rescan.
  753. */
  754. static void icm_unplug_children(struct tb_switch *sw)
  755. {
  756. unsigned int i;
  757. if (tb_route(sw))
  758. sw->is_unplugged = true;
  759. for (i = 1; i <= sw->config.max_port_number; i++) {
  760. struct tb_port *port = &sw->ports[i];
  761. if (tb_is_upstream_port(port))
  762. continue;
  763. if (!port->remote)
  764. continue;
  765. icm_unplug_children(port->remote->sw);
  766. }
  767. }
  768. static void icm_free_unplugged_children(struct tb_switch *sw)
  769. {
  770. unsigned int i;
  771. for (i = 1; i <= sw->config.max_port_number; i++) {
  772. struct tb_port *port = &sw->ports[i];
  773. if (tb_is_upstream_port(port))
  774. continue;
  775. if (!port->remote)
  776. continue;
  777. if (port->remote->sw->is_unplugged) {
  778. tb_switch_remove(port->remote->sw);
  779. port->remote = NULL;
  780. } else {
  781. icm_free_unplugged_children(port->remote->sw);
  782. }
  783. }
  784. }
  785. static void icm_rescan_work(struct work_struct *work)
  786. {
  787. struct icm *icm = container_of(work, struct icm, rescan_work.work);
  788. struct tb *tb = icm_to_tb(icm);
  789. mutex_lock(&tb->lock);
  790. if (tb->root_switch)
  791. icm_free_unplugged_children(tb->root_switch);
  792. mutex_unlock(&tb->lock);
  793. }
  794. static void icm_complete(struct tb *tb)
  795. {
  796. struct icm *icm = tb_priv(tb);
  797. if (tb->nhi->going_away)
  798. return;
  799. icm_unplug_children(tb->root_switch);
  800. /*
  801. * Now all existing children should be resumed, start events
  802. * from ICM to get updated status.
  803. */
  804. __icm_driver_ready(tb, NULL);
  805. /*
  806. * We do not get notifications of devices that have been
  807. * unplugged during suspend so schedule rescan to clean them up
  808. * if any.
  809. */
  810. queue_delayed_work(tb->wq, &icm->rescan_work, msecs_to_jiffies(500));
  811. }
  812. static int icm_start(struct tb *tb)
  813. {
  814. struct icm *icm = tb_priv(tb);
  815. int ret;
  816. if (icm->safe_mode)
  817. tb->root_switch = tb_switch_alloc_safe_mode(tb, &tb->dev, 0);
  818. else
  819. tb->root_switch = tb_switch_alloc(tb, &tb->dev, 0);
  820. if (!tb->root_switch)
  821. return -ENODEV;
  822. /*
  823. * NVM upgrade has not been tested on Apple systems and they
  824. * don't provide images publicly either. To be on the safe side
  825. * prevent root switch NVM upgrade on Macs for now.
  826. */
  827. tb->root_switch->no_nvm_upgrade = x86_apple_machine;
  828. ret = tb_switch_add(tb->root_switch);
  829. if (ret)
  830. tb_switch_put(tb->root_switch);
  831. return ret;
  832. }
  833. static void icm_stop(struct tb *tb)
  834. {
  835. struct icm *icm = tb_priv(tb);
  836. cancel_delayed_work(&icm->rescan_work);
  837. tb_switch_remove(tb->root_switch);
  838. tb->root_switch = NULL;
  839. nhi_mailbox_cmd(tb->nhi, NHI_MAILBOX_DRV_UNLOADS, 0);
  840. }
  841. static int icm_disconnect_pcie_paths(struct tb *tb)
  842. {
  843. return nhi_mailbox_cmd(tb->nhi, NHI_MAILBOX_DISCONNECT_PCIE_PATHS, 0);
  844. }
  845. /* Falcon Ridge and Alpine Ridge */
  846. static const struct tb_cm_ops icm_fr_ops = {
  847. .driver_ready = icm_driver_ready,
  848. .start = icm_start,
  849. .stop = icm_stop,
  850. .suspend = icm_suspend,
  851. .complete = icm_complete,
  852. .handle_event = icm_handle_event,
  853. .approve_switch = icm_fr_approve_switch,
  854. .add_switch_key = icm_fr_add_switch_key,
  855. .challenge_switch_key = icm_fr_challenge_switch_key,
  856. .disconnect_pcie_paths = icm_disconnect_pcie_paths,
  857. };
  858. struct tb *icm_probe(struct tb_nhi *nhi)
  859. {
  860. struct icm *icm;
  861. struct tb *tb;
  862. tb = tb_domain_alloc(nhi, sizeof(struct icm));
  863. if (!tb)
  864. return NULL;
  865. icm = tb_priv(tb);
  866. INIT_DELAYED_WORK(&icm->rescan_work, icm_rescan_work);
  867. mutex_init(&icm->request_lock);
  868. switch (nhi->pdev->device) {
  869. case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_2C_NHI:
  870. case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_4C_NHI:
  871. icm->is_supported = icm_fr_is_supported;
  872. icm->get_route = icm_fr_get_route;
  873. icm->device_connected = icm_fr_device_connected;
  874. icm->device_disconnected = icm_fr_device_disconnected;
  875. tb->cm_ops = &icm_fr_ops;
  876. break;
  877. case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_2C_NHI:
  878. case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_4C_NHI:
  879. case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_LP_NHI:
  880. case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_4C_NHI:
  881. case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_2C_NHI:
  882. icm->is_supported = icm_ar_is_supported;
  883. icm->get_mode = icm_ar_get_mode;
  884. icm->get_route = icm_ar_get_route;
  885. icm->device_connected = icm_fr_device_connected;
  886. icm->device_disconnected = icm_fr_device_disconnected;
  887. tb->cm_ops = &icm_fr_ops;
  888. break;
  889. }
  890. if (!icm->is_supported || !icm->is_supported(tb)) {
  891. dev_dbg(&nhi->pdev->dev, "ICM not supported on this controller\n");
  892. tb_domain_put(tb);
  893. return NULL;
  894. }
  895. return tb;
  896. }