msi.c 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463
  1. /*
  2. * linux/kernel/irq/msi.c
  3. *
  4. * Copyright (C) 2014 Intel Corp.
  5. * Author: Jiang Liu <jiang.liu@linux.intel.com>
  6. *
  7. * This file is licensed under GPLv2.
  8. *
  9. * This file contains common code to support Message Signalled Interrupt for
  10. * PCI compatible and non PCI compatible devices.
  11. */
  12. #include <linux/types.h>
  13. #include <linux/device.h>
  14. #include <linux/irq.h>
  15. #include <linux/irqdomain.h>
  16. #include <linux/msi.h>
  17. #include <linux/slab.h>
  18. #include "internals.h"
  19. /**
  20. * alloc_msi_entry - Allocate an initialize msi_entry
  21. * @dev: Pointer to the device for which this is allocated
  22. * @nvec: The number of vectors used in this entry
  23. * @affinity: Optional pointer to an affinity mask array size of @nvec
  24. *
  25. * If @affinity is not NULL then a an affinity array[@nvec] is allocated
  26. * and the affinity masks from @affinity are copied.
  27. */
  28. struct msi_desc *
  29. alloc_msi_entry(struct device *dev, int nvec, const struct cpumask *affinity)
  30. {
  31. struct msi_desc *desc;
  32. desc = kzalloc(sizeof(*desc), GFP_KERNEL);
  33. if (!desc)
  34. return NULL;
  35. INIT_LIST_HEAD(&desc->list);
  36. desc->dev = dev;
  37. desc->nvec_used = nvec;
  38. if (affinity) {
  39. desc->affinity = kmemdup(affinity,
  40. nvec * sizeof(*desc->affinity), GFP_KERNEL);
  41. if (!desc->affinity) {
  42. kfree(desc);
  43. return NULL;
  44. }
  45. }
  46. return desc;
  47. }
  48. void free_msi_entry(struct msi_desc *entry)
  49. {
  50. kfree(entry->affinity);
  51. kfree(entry);
  52. }
  53. void __get_cached_msi_msg(struct msi_desc *entry, struct msi_msg *msg)
  54. {
  55. *msg = entry->msg;
  56. }
  57. void get_cached_msi_msg(unsigned int irq, struct msi_msg *msg)
  58. {
  59. struct msi_desc *entry = irq_get_msi_desc(irq);
  60. __get_cached_msi_msg(entry, msg);
  61. }
  62. EXPORT_SYMBOL_GPL(get_cached_msi_msg);
  63. #ifdef CONFIG_GENERIC_MSI_IRQ_DOMAIN
  64. static inline void irq_chip_write_msi_msg(struct irq_data *data,
  65. struct msi_msg *msg)
  66. {
  67. data->chip->irq_write_msi_msg(data, msg);
  68. }
  69. /**
  70. * msi_domain_set_affinity - Generic affinity setter function for MSI domains
  71. * @irq_data: The irq data associated to the interrupt
  72. * @mask: The affinity mask to set
  73. * @force: Flag to enforce setting (disable online checks)
  74. *
  75. * Intended to be used by MSI interrupt controllers which are
  76. * implemented with hierarchical domains.
  77. */
  78. int msi_domain_set_affinity(struct irq_data *irq_data,
  79. const struct cpumask *mask, bool force)
  80. {
  81. struct irq_data *parent = irq_data->parent_data;
  82. struct msi_msg msg;
  83. int ret;
  84. ret = parent->chip->irq_set_affinity(parent, mask, force);
  85. if (ret >= 0 && ret != IRQ_SET_MASK_OK_DONE) {
  86. BUG_ON(irq_chip_compose_msi_msg(irq_data, &msg));
  87. irq_chip_write_msi_msg(irq_data, &msg);
  88. }
  89. return ret;
  90. }
  91. static int msi_domain_activate(struct irq_domain *domain,
  92. struct irq_data *irq_data, bool early)
  93. {
  94. struct msi_msg msg;
  95. BUG_ON(irq_chip_compose_msi_msg(irq_data, &msg));
  96. irq_chip_write_msi_msg(irq_data, &msg);
  97. return 0;
  98. }
  99. static void msi_domain_deactivate(struct irq_domain *domain,
  100. struct irq_data *irq_data)
  101. {
  102. struct msi_msg msg;
  103. memset(&msg, 0, sizeof(msg));
  104. irq_chip_write_msi_msg(irq_data, &msg);
  105. }
  106. static int msi_domain_alloc(struct irq_domain *domain, unsigned int virq,
  107. unsigned int nr_irqs, void *arg)
  108. {
  109. struct msi_domain_info *info = domain->host_data;
  110. struct msi_domain_ops *ops = info->ops;
  111. irq_hw_number_t hwirq = ops->get_hwirq(info, arg);
  112. int i, ret;
  113. if (irq_find_mapping(domain, hwirq) > 0)
  114. return -EEXIST;
  115. if (domain->parent) {
  116. ret = irq_domain_alloc_irqs_parent(domain, virq, nr_irqs, arg);
  117. if (ret < 0)
  118. return ret;
  119. }
  120. for (i = 0; i < nr_irqs; i++) {
  121. ret = ops->msi_init(domain, info, virq + i, hwirq + i, arg);
  122. if (ret < 0) {
  123. if (ops->msi_free) {
  124. for (i--; i > 0; i--)
  125. ops->msi_free(domain, info, virq + i);
  126. }
  127. irq_domain_free_irqs_top(domain, virq, nr_irqs);
  128. return ret;
  129. }
  130. }
  131. return 0;
  132. }
  133. static void msi_domain_free(struct irq_domain *domain, unsigned int virq,
  134. unsigned int nr_irqs)
  135. {
  136. struct msi_domain_info *info = domain->host_data;
  137. int i;
  138. if (info->ops->msi_free) {
  139. for (i = 0; i < nr_irqs; i++)
  140. info->ops->msi_free(domain, info, virq + i);
  141. }
  142. irq_domain_free_irqs_top(domain, virq, nr_irqs);
  143. }
  144. static const struct irq_domain_ops msi_domain_ops = {
  145. .alloc = msi_domain_alloc,
  146. .free = msi_domain_free,
  147. .activate = msi_domain_activate,
  148. .deactivate = msi_domain_deactivate,
  149. };
  150. #ifdef GENERIC_MSI_DOMAIN_OPS
  151. static irq_hw_number_t msi_domain_ops_get_hwirq(struct msi_domain_info *info,
  152. msi_alloc_info_t *arg)
  153. {
  154. return arg->hwirq;
  155. }
  156. static int msi_domain_ops_prepare(struct irq_domain *domain, struct device *dev,
  157. int nvec, msi_alloc_info_t *arg)
  158. {
  159. memset(arg, 0, sizeof(*arg));
  160. return 0;
  161. }
  162. static void msi_domain_ops_set_desc(msi_alloc_info_t *arg,
  163. struct msi_desc *desc)
  164. {
  165. arg->desc = desc;
  166. }
  167. #else
  168. #define msi_domain_ops_get_hwirq NULL
  169. #define msi_domain_ops_prepare NULL
  170. #define msi_domain_ops_set_desc NULL
  171. #endif /* !GENERIC_MSI_DOMAIN_OPS */
  172. static int msi_domain_ops_init(struct irq_domain *domain,
  173. struct msi_domain_info *info,
  174. unsigned int virq, irq_hw_number_t hwirq,
  175. msi_alloc_info_t *arg)
  176. {
  177. irq_domain_set_hwirq_and_chip(domain, virq, hwirq, info->chip,
  178. info->chip_data);
  179. if (info->handler && info->handler_name) {
  180. __irq_set_handler(virq, info->handler, 0, info->handler_name);
  181. if (info->handler_data)
  182. irq_set_handler_data(virq, info->handler_data);
  183. }
  184. return 0;
  185. }
  186. static int msi_domain_ops_check(struct irq_domain *domain,
  187. struct msi_domain_info *info,
  188. struct device *dev)
  189. {
  190. return 0;
  191. }
  192. static struct msi_domain_ops msi_domain_ops_default = {
  193. .get_hwirq = msi_domain_ops_get_hwirq,
  194. .msi_init = msi_domain_ops_init,
  195. .msi_check = msi_domain_ops_check,
  196. .msi_prepare = msi_domain_ops_prepare,
  197. .set_desc = msi_domain_ops_set_desc,
  198. };
  199. static void msi_domain_update_dom_ops(struct msi_domain_info *info)
  200. {
  201. struct msi_domain_ops *ops = info->ops;
  202. if (ops == NULL) {
  203. info->ops = &msi_domain_ops_default;
  204. return;
  205. }
  206. if (ops->get_hwirq == NULL)
  207. ops->get_hwirq = msi_domain_ops_default.get_hwirq;
  208. if (ops->msi_init == NULL)
  209. ops->msi_init = msi_domain_ops_default.msi_init;
  210. if (ops->msi_check == NULL)
  211. ops->msi_check = msi_domain_ops_default.msi_check;
  212. if (ops->msi_prepare == NULL)
  213. ops->msi_prepare = msi_domain_ops_default.msi_prepare;
  214. if (ops->set_desc == NULL)
  215. ops->set_desc = msi_domain_ops_default.set_desc;
  216. }
  217. static void msi_domain_update_chip_ops(struct msi_domain_info *info)
  218. {
  219. struct irq_chip *chip = info->chip;
  220. BUG_ON(!chip || !chip->irq_mask || !chip->irq_unmask);
  221. if (!chip->irq_set_affinity)
  222. chip->irq_set_affinity = msi_domain_set_affinity;
  223. }
  224. /**
  225. * msi_create_irq_domain - Create a MSI interrupt domain
  226. * @fwnode: Optional fwnode of the interrupt controller
  227. * @info: MSI domain info
  228. * @parent: Parent irq domain
  229. */
  230. struct irq_domain *msi_create_irq_domain(struct fwnode_handle *fwnode,
  231. struct msi_domain_info *info,
  232. struct irq_domain *parent)
  233. {
  234. struct irq_domain *domain;
  235. if (info->flags & MSI_FLAG_USE_DEF_DOM_OPS)
  236. msi_domain_update_dom_ops(info);
  237. if (info->flags & MSI_FLAG_USE_DEF_CHIP_OPS)
  238. msi_domain_update_chip_ops(info);
  239. domain = irq_domain_create_hierarchy(parent, IRQ_DOMAIN_FLAG_MSI, 0,
  240. fwnode, &msi_domain_ops, info);
  241. if (domain && !domain->name && info->chip)
  242. domain->name = info->chip->name;
  243. return domain;
  244. }
  245. int msi_domain_prepare_irqs(struct irq_domain *domain, struct device *dev,
  246. int nvec, msi_alloc_info_t *arg)
  247. {
  248. struct msi_domain_info *info = domain->host_data;
  249. struct msi_domain_ops *ops = info->ops;
  250. int ret;
  251. ret = ops->msi_check(domain, info, dev);
  252. if (ret == 0)
  253. ret = ops->msi_prepare(domain, dev, nvec, arg);
  254. return ret;
  255. }
  256. int msi_domain_populate_irqs(struct irq_domain *domain, struct device *dev,
  257. int virq, int nvec, msi_alloc_info_t *arg)
  258. {
  259. struct msi_domain_info *info = domain->host_data;
  260. struct msi_domain_ops *ops = info->ops;
  261. struct msi_desc *desc;
  262. int ret = 0;
  263. for_each_msi_entry(desc, dev) {
  264. /* Don't even try the multi-MSI brain damage. */
  265. if (WARN_ON(!desc->irq || desc->nvec_used != 1)) {
  266. ret = -EINVAL;
  267. break;
  268. }
  269. if (!(desc->irq >= virq && desc->irq < (virq + nvec)))
  270. continue;
  271. ops->set_desc(arg, desc);
  272. /* Assumes the domain mutex is held! */
  273. ret = irq_domain_alloc_irqs_hierarchy(domain, desc->irq, 1,
  274. arg);
  275. if (ret)
  276. break;
  277. irq_set_msi_desc_off(desc->irq, 0, desc);
  278. }
  279. if (ret) {
  280. /* Mop up the damage */
  281. for_each_msi_entry(desc, dev) {
  282. if (!(desc->irq >= virq && desc->irq < (virq + nvec)))
  283. continue;
  284. irq_domain_free_irqs_common(domain, desc->irq, 1);
  285. }
  286. }
  287. return ret;
  288. }
  289. /**
  290. * msi_domain_alloc_irqs - Allocate interrupts from a MSI interrupt domain
  291. * @domain: The domain to allocate from
  292. * @dev: Pointer to device struct of the device for which the interrupts
  293. * are allocated
  294. * @nvec: The number of interrupts to allocate
  295. *
  296. * Returns 0 on success or an error code.
  297. */
  298. int msi_domain_alloc_irqs(struct irq_domain *domain, struct device *dev,
  299. int nvec)
  300. {
  301. struct msi_domain_info *info = domain->host_data;
  302. struct msi_domain_ops *ops = info->ops;
  303. msi_alloc_info_t arg;
  304. struct msi_desc *desc;
  305. int i, ret, virq;
  306. ret = msi_domain_prepare_irqs(domain, dev, nvec, &arg);
  307. if (ret)
  308. return ret;
  309. for_each_msi_entry(desc, dev) {
  310. ops->set_desc(&arg, desc);
  311. virq = __irq_domain_alloc_irqs(domain, -1, desc->nvec_used,
  312. dev_to_node(dev), &arg, false,
  313. desc->affinity);
  314. if (virq < 0) {
  315. ret = -ENOSPC;
  316. if (ops->handle_error)
  317. ret = ops->handle_error(domain, desc, ret);
  318. if (ops->msi_finish)
  319. ops->msi_finish(&arg, ret);
  320. return ret;
  321. }
  322. for (i = 0; i < desc->nvec_used; i++) {
  323. irq_set_msi_desc_off(virq, i, desc);
  324. irq_debugfs_copy_devname(virq + i, dev);
  325. }
  326. }
  327. if (ops->msi_finish)
  328. ops->msi_finish(&arg, 0);
  329. for_each_msi_entry(desc, dev) {
  330. virq = desc->irq;
  331. if (desc->nvec_used == 1)
  332. dev_dbg(dev, "irq %d for MSI\n", virq);
  333. else
  334. dev_dbg(dev, "irq [%d-%d] for MSI\n",
  335. virq, virq + desc->nvec_used - 1);
  336. /*
  337. * This flag is set by the PCI layer as we need to activate
  338. * the MSI entries before the PCI layer enables MSI in the
  339. * card. Otherwise the card latches a random msi message.
  340. */
  341. if (info->flags & MSI_FLAG_ACTIVATE_EARLY) {
  342. struct irq_data *irq_data;
  343. irq_data = irq_domain_get_irq_data(domain, desc->irq);
  344. ret = irq_domain_activate_irq(irq_data, true);
  345. if (ret)
  346. goto cleanup;
  347. if (info->flags & MSI_FLAG_MUST_REACTIVATE)
  348. irqd_clr_activated(irq_data);
  349. }
  350. }
  351. return 0;
  352. cleanup:
  353. for_each_msi_entry(desc, dev) {
  354. struct irq_data *irqd;
  355. if (desc->irq == virq)
  356. break;
  357. irqd = irq_domain_get_irq_data(domain, desc->irq);
  358. if (irqd_is_activated(irqd))
  359. irq_domain_deactivate_irq(irqd);
  360. }
  361. msi_domain_free_irqs(domain, dev);
  362. return ret;
  363. }
  364. /**
  365. * msi_domain_free_irqs - Free interrupts from a MSI interrupt @domain associated tp @dev
  366. * @domain: The domain to managing the interrupts
  367. * @dev: Pointer to device struct of the device for which the interrupts
  368. * are free
  369. */
  370. void msi_domain_free_irqs(struct irq_domain *domain, struct device *dev)
  371. {
  372. struct msi_desc *desc;
  373. for_each_msi_entry(desc, dev) {
  374. /*
  375. * We might have failed to allocate an MSI early
  376. * enough that there is no IRQ associated to this
  377. * entry. If that's the case, don't do anything.
  378. */
  379. if (desc->irq) {
  380. irq_domain_free_irqs(desc->irq, desc->nvec_used);
  381. desc->irq = 0;
  382. }
  383. }
  384. }
  385. /**
  386. * msi_get_domain_info - Get the MSI interrupt domain info for @domain
  387. * @domain: The interrupt domain to retrieve data from
  388. *
  389. * Returns the pointer to the msi_domain_info stored in
  390. * @domain->host_data.
  391. */
  392. struct msi_domain_info *msi_get_domain_info(struct irq_domain *domain)
  393. {
  394. return (struct msi_domain_info *)domain->host_data;
  395. }
  396. #endif /* CONFIG_GENERIC_MSI_IRQ_DOMAIN */