domain.c 10 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456
  1. /*
  2. * Thunderbolt bus support
  3. *
  4. * Copyright (C) 2017, Intel Corporation
  5. * Author: Mika Westerberg <mika.westerberg@linux.intel.com>
  6. *
  7. * This program is free software; you can redistribute it and/or modify
  8. * it under the terms of the GNU General Public License version 2 as
  9. * published by the Free Software Foundation.
  10. */
  11. #include <linux/device.h>
  12. #include <linux/idr.h>
  13. #include <linux/module.h>
  14. #include <linux/slab.h>
  15. #include <linux/random.h>
  16. #include <crypto/hash.h>
  17. #include "tb.h"
  18. static DEFINE_IDA(tb_domain_ida);
  19. static const char * const tb_security_names[] = {
  20. [TB_SECURITY_NONE] = "none",
  21. [TB_SECURITY_USER] = "user",
  22. [TB_SECURITY_SECURE] = "secure",
  23. [TB_SECURITY_DPONLY] = "dponly",
  24. };
  25. static ssize_t security_show(struct device *dev, struct device_attribute *attr,
  26. char *buf)
  27. {
  28. struct tb *tb = container_of(dev, struct tb, dev);
  29. return sprintf(buf, "%s\n", tb_security_names[tb->security_level]);
  30. }
  31. static DEVICE_ATTR_RO(security);
  32. static struct attribute *domain_attrs[] = {
  33. &dev_attr_security.attr,
  34. NULL,
  35. };
  36. static struct attribute_group domain_attr_group = {
  37. .attrs = domain_attrs,
  38. };
  39. static const struct attribute_group *domain_attr_groups[] = {
  40. &domain_attr_group,
  41. NULL,
  42. };
  43. struct bus_type tb_bus_type = {
  44. .name = "thunderbolt",
  45. };
  46. static void tb_domain_release(struct device *dev)
  47. {
  48. struct tb *tb = container_of(dev, struct tb, dev);
  49. tb_ctl_free(tb->ctl);
  50. destroy_workqueue(tb->wq);
  51. ida_simple_remove(&tb_domain_ida, tb->index);
  52. mutex_destroy(&tb->lock);
  53. kfree(tb);
  54. }
  55. struct device_type tb_domain_type = {
  56. .name = "thunderbolt_domain",
  57. .release = tb_domain_release,
  58. };
  59. /**
  60. * tb_domain_alloc() - Allocate a domain
  61. * @nhi: Pointer to the host controller
  62. * @privsize: Size of the connection manager private data
  63. *
  64. * Allocates and initializes a new Thunderbolt domain. Connection
  65. * managers are expected to call this and then fill in @cm_ops
  66. * accordingly.
  67. *
  68. * Call tb_domain_put() to release the domain before it has been added
  69. * to the system.
  70. *
  71. * Return: allocated domain structure on %NULL in case of error
  72. */
  73. struct tb *tb_domain_alloc(struct tb_nhi *nhi, size_t privsize)
  74. {
  75. struct tb *tb;
  76. /*
  77. * Make sure the structure sizes map with that the hardware
  78. * expects because bit-fields are being used.
  79. */
  80. BUILD_BUG_ON(sizeof(struct tb_regs_switch_header) != 5 * 4);
  81. BUILD_BUG_ON(sizeof(struct tb_regs_port_header) != 8 * 4);
  82. BUILD_BUG_ON(sizeof(struct tb_regs_hop) != 2 * 4);
  83. tb = kzalloc(sizeof(*tb) + privsize, GFP_KERNEL);
  84. if (!tb)
  85. return NULL;
  86. tb->nhi = nhi;
  87. mutex_init(&tb->lock);
  88. tb->index = ida_simple_get(&tb_domain_ida, 0, 0, GFP_KERNEL);
  89. if (tb->index < 0)
  90. goto err_free;
  91. tb->wq = alloc_ordered_workqueue("thunderbolt%d", 0, tb->index);
  92. if (!tb->wq)
  93. goto err_remove_ida;
  94. tb->dev.parent = &nhi->pdev->dev;
  95. tb->dev.bus = &tb_bus_type;
  96. tb->dev.type = &tb_domain_type;
  97. tb->dev.groups = domain_attr_groups;
  98. dev_set_name(&tb->dev, "domain%d", tb->index);
  99. device_initialize(&tb->dev);
  100. return tb;
  101. err_remove_ida:
  102. ida_simple_remove(&tb_domain_ida, tb->index);
  103. err_free:
  104. kfree(tb);
  105. return NULL;
  106. }
  107. static void tb_domain_event_cb(void *data, enum tb_cfg_pkg_type type,
  108. const void *buf, size_t size)
  109. {
  110. struct tb *tb = data;
  111. if (!tb->cm_ops->handle_event) {
  112. tb_warn(tb, "domain does not have event handler\n");
  113. return;
  114. }
  115. tb->cm_ops->handle_event(tb, type, buf, size);
  116. }
  117. /**
  118. * tb_domain_add() - Add domain to the system
  119. * @tb: Domain to add
  120. *
  121. * Starts the domain and adds it to the system. Hotplugging devices will
  122. * work after this has been returned successfully. In order to remove
  123. * and release the domain after this function has been called, call
  124. * tb_domain_remove().
  125. *
  126. * Return: %0 in case of success and negative errno in case of error
  127. */
  128. int tb_domain_add(struct tb *tb)
  129. {
  130. int ret;
  131. if (WARN_ON(!tb->cm_ops))
  132. return -EINVAL;
  133. mutex_lock(&tb->lock);
  134. tb->ctl = tb_ctl_alloc(tb->nhi, tb_domain_event_cb, tb);
  135. if (!tb->ctl) {
  136. ret = -ENOMEM;
  137. goto err_unlock;
  138. }
  139. /*
  140. * tb_schedule_hotplug_handler may be called as soon as the config
  141. * channel is started. Thats why we have to hold the lock here.
  142. */
  143. tb_ctl_start(tb->ctl);
  144. if (tb->cm_ops->driver_ready) {
  145. ret = tb->cm_ops->driver_ready(tb);
  146. if (ret)
  147. goto err_ctl_stop;
  148. }
  149. ret = device_add(&tb->dev);
  150. if (ret)
  151. goto err_ctl_stop;
  152. /* Start the domain */
  153. if (tb->cm_ops->start) {
  154. ret = tb->cm_ops->start(tb);
  155. if (ret)
  156. goto err_domain_del;
  157. }
  158. /* This starts event processing */
  159. mutex_unlock(&tb->lock);
  160. return 0;
  161. err_domain_del:
  162. device_del(&tb->dev);
  163. err_ctl_stop:
  164. tb_ctl_stop(tb->ctl);
  165. err_unlock:
  166. mutex_unlock(&tb->lock);
  167. return ret;
  168. }
  169. /**
  170. * tb_domain_remove() - Removes and releases a domain
  171. * @tb: Domain to remove
  172. *
  173. * Stops the domain, removes it from the system and releases all
  174. * resources once the last reference has been released.
  175. */
  176. void tb_domain_remove(struct tb *tb)
  177. {
  178. mutex_lock(&tb->lock);
  179. if (tb->cm_ops->stop)
  180. tb->cm_ops->stop(tb);
  181. /* Stop the domain control traffic */
  182. tb_ctl_stop(tb->ctl);
  183. mutex_unlock(&tb->lock);
  184. flush_workqueue(tb->wq);
  185. device_unregister(&tb->dev);
  186. }
  187. /**
  188. * tb_domain_suspend_noirq() - Suspend a domain
  189. * @tb: Domain to suspend
  190. *
  191. * Suspends all devices in the domain and stops the control channel.
  192. */
  193. int tb_domain_suspend_noirq(struct tb *tb)
  194. {
  195. int ret = 0;
  196. /*
  197. * The control channel interrupt is left enabled during suspend
  198. * and taking the lock here prevents any events happening before
  199. * we actually have stopped the domain and the control channel.
  200. */
  201. mutex_lock(&tb->lock);
  202. if (tb->cm_ops->suspend_noirq)
  203. ret = tb->cm_ops->suspend_noirq(tb);
  204. if (!ret)
  205. tb_ctl_stop(tb->ctl);
  206. mutex_unlock(&tb->lock);
  207. return ret;
  208. }
  209. /**
  210. * tb_domain_resume_noirq() - Resume a domain
  211. * @tb: Domain to resume
  212. *
  213. * Re-starts the control channel, and resumes all devices connected to
  214. * the domain.
  215. */
  216. int tb_domain_resume_noirq(struct tb *tb)
  217. {
  218. int ret = 0;
  219. mutex_lock(&tb->lock);
  220. tb_ctl_start(tb->ctl);
  221. if (tb->cm_ops->resume_noirq)
  222. ret = tb->cm_ops->resume_noirq(tb);
  223. mutex_unlock(&tb->lock);
  224. return ret;
  225. }
  226. int tb_domain_suspend(struct tb *tb)
  227. {
  228. int ret;
  229. mutex_lock(&tb->lock);
  230. if (tb->cm_ops->suspend) {
  231. ret = tb->cm_ops->suspend(tb);
  232. if (ret) {
  233. mutex_unlock(&tb->lock);
  234. return ret;
  235. }
  236. }
  237. mutex_unlock(&tb->lock);
  238. return 0;
  239. }
  240. void tb_domain_complete(struct tb *tb)
  241. {
  242. mutex_lock(&tb->lock);
  243. if (tb->cm_ops->complete)
  244. tb->cm_ops->complete(tb);
  245. mutex_unlock(&tb->lock);
  246. }
  247. /**
  248. * tb_domain_approve_switch() - Approve switch
  249. * @tb: Domain the switch belongs to
  250. * @sw: Switch to approve
  251. *
  252. * This will approve switch by connection manager specific means. In
  253. * case of success the connection manager will create tunnels for all
  254. * supported protocols.
  255. */
  256. int tb_domain_approve_switch(struct tb *tb, struct tb_switch *sw)
  257. {
  258. struct tb_switch *parent_sw;
  259. if (!tb->cm_ops->approve_switch)
  260. return -EPERM;
  261. /* The parent switch must be authorized before this one */
  262. parent_sw = tb_to_switch(sw->dev.parent);
  263. if (!parent_sw || !parent_sw->authorized)
  264. return -EINVAL;
  265. return tb->cm_ops->approve_switch(tb, sw);
  266. }
  267. /**
  268. * tb_domain_approve_switch_key() - Approve switch and add key
  269. * @tb: Domain the switch belongs to
  270. * @sw: Switch to approve
  271. *
  272. * For switches that support secure connect, this function first adds
  273. * key to the switch NVM using connection manager specific means. If
  274. * adding the key is successful, the switch is approved and connected.
  275. *
  276. * Return: %0 on success and negative errno in case of failure.
  277. */
  278. int tb_domain_approve_switch_key(struct tb *tb, struct tb_switch *sw)
  279. {
  280. struct tb_switch *parent_sw;
  281. int ret;
  282. if (!tb->cm_ops->approve_switch || !tb->cm_ops->add_switch_key)
  283. return -EPERM;
  284. /* The parent switch must be authorized before this one */
  285. parent_sw = tb_to_switch(sw->dev.parent);
  286. if (!parent_sw || !parent_sw->authorized)
  287. return -EINVAL;
  288. ret = tb->cm_ops->add_switch_key(tb, sw);
  289. if (ret)
  290. return ret;
  291. return tb->cm_ops->approve_switch(tb, sw);
  292. }
  293. /**
  294. * tb_domain_challenge_switch_key() - Challenge and approve switch
  295. * @tb: Domain the switch belongs to
  296. * @sw: Switch to approve
  297. *
  298. * For switches that support secure connect, this function generates
  299. * random challenge and sends it to the switch. The switch responds to
  300. * this and if the response matches our random challenge, the switch is
  301. * approved and connected.
  302. *
  303. * Return: %0 on success and negative errno in case of failure.
  304. */
  305. int tb_domain_challenge_switch_key(struct tb *tb, struct tb_switch *sw)
  306. {
  307. u8 challenge[TB_SWITCH_KEY_SIZE];
  308. u8 response[TB_SWITCH_KEY_SIZE];
  309. u8 hmac[TB_SWITCH_KEY_SIZE];
  310. struct tb_switch *parent_sw;
  311. struct crypto_shash *tfm;
  312. struct shash_desc *shash;
  313. int ret;
  314. if (!tb->cm_ops->approve_switch || !tb->cm_ops->challenge_switch_key)
  315. return -EPERM;
  316. /* The parent switch must be authorized before this one */
  317. parent_sw = tb_to_switch(sw->dev.parent);
  318. if (!parent_sw || !parent_sw->authorized)
  319. return -EINVAL;
  320. get_random_bytes(challenge, sizeof(challenge));
  321. ret = tb->cm_ops->challenge_switch_key(tb, sw, challenge, response);
  322. if (ret)
  323. return ret;
  324. tfm = crypto_alloc_shash("hmac(sha256)", 0, 0);
  325. if (IS_ERR(tfm))
  326. return PTR_ERR(tfm);
  327. ret = crypto_shash_setkey(tfm, sw->key, TB_SWITCH_KEY_SIZE);
  328. if (ret)
  329. goto err_free_tfm;
  330. shash = kzalloc(sizeof(*shash) + crypto_shash_descsize(tfm),
  331. GFP_KERNEL);
  332. if (!shash) {
  333. ret = -ENOMEM;
  334. goto err_free_tfm;
  335. }
  336. shash->tfm = tfm;
  337. shash->flags = CRYPTO_TFM_REQ_MAY_SLEEP;
  338. memset(hmac, 0, sizeof(hmac));
  339. ret = crypto_shash_digest(shash, challenge, sizeof(hmac), hmac);
  340. if (ret)
  341. goto err_free_shash;
  342. /* The returned HMAC must match the one we calculated */
  343. if (memcmp(response, hmac, sizeof(hmac))) {
  344. ret = -EKEYREJECTED;
  345. goto err_free_shash;
  346. }
  347. crypto_free_shash(tfm);
  348. kfree(shash);
  349. return tb->cm_ops->approve_switch(tb, sw);
  350. err_free_shash:
  351. kfree(shash);
  352. err_free_tfm:
  353. crypto_free_shash(tfm);
  354. return ret;
  355. }
  356. /**
  357. * tb_domain_disconnect_pcie_paths() - Disconnect all PCIe paths
  358. * @tb: Domain whose PCIe paths to disconnect
  359. *
  360. * This needs to be called in preparation for NVM upgrade of the host
  361. * controller. Makes sure all PCIe paths are disconnected.
  362. *
  363. * Return %0 on success and negative errno in case of error.
  364. */
  365. int tb_domain_disconnect_pcie_paths(struct tb *tb)
  366. {
  367. if (!tb->cm_ops->disconnect_pcie_paths)
  368. return -EPERM;
  369. return tb->cm_ops->disconnect_pcie_paths(tb);
  370. }
  371. int tb_domain_init(void)
  372. {
  373. return bus_register(&tb_bus_type);
  374. }
  375. void tb_domain_exit(void)
  376. {
  377. bus_unregister(&tb_bus_type);
  378. ida_destroy(&tb_domain_ida);
  379. tb_switch_exit();
  380. }