affinity.c 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743
  1. /*
  2. * Copyright(c) 2015, 2016 Intel Corporation.
  3. *
  4. * This file is provided under a dual BSD/GPLv2 license. When using or
  5. * redistributing this file, you may do so under either license.
  6. *
  7. * GPL LICENSE SUMMARY
  8. *
  9. * This program is free software; you can redistribute it and/or modify
  10. * it under the terms of version 2 of the GNU General Public License as
  11. * published by the Free Software Foundation.
  12. *
  13. * This program is distributed in the hope that it will be useful, but
  14. * WITHOUT ANY WARRANTY; without even the implied warranty of
  15. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  16. * General Public License for more details.
  17. *
  18. * BSD LICENSE
  19. *
  20. * Redistribution and use in source and binary forms, with or without
  21. * modification, are permitted provided that the following conditions
  22. * are met:
  23. *
  24. * - Redistributions of source code must retain the above copyright
  25. * notice, this list of conditions and the following disclaimer.
  26. * - Redistributions in binary form must reproduce the above copyright
  27. * notice, this list of conditions and the following disclaimer in
  28. * the documentation and/or other materials provided with the
  29. * distribution.
  30. * - Neither the name of Intel Corporation nor the names of its
  31. * contributors may be used to endorse or promote products derived
  32. * from this software without specific prior written permission.
  33. *
  34. * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
  35. * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
  36. * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
  37. * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
  38. * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
  39. * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
  40. * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  41. * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  42. * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  43. * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  44. * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  45. *
  46. */
  47. #include <linux/topology.h>
  48. #include <linux/cpumask.h>
  49. #include <linux/module.h>
  50. #include <linux/cpumask.h>
  51. #include "hfi.h"
  52. #include "affinity.h"
  53. #include "sdma.h"
  54. #include "trace.h"
  55. struct hfi1_affinity_node_list node_affinity = {
  56. .list = LIST_HEAD_INIT(node_affinity.list),
  57. .lock = __SPIN_LOCK_UNLOCKED(&node_affinity.lock),
  58. };
  59. /* Name of IRQ types, indexed by enum irq_type */
  60. static const char * const irq_type_names[] = {
  61. "SDMA",
  62. "RCVCTXT",
  63. "GENERAL",
  64. "OTHER",
  65. };
  66. /* Per NUMA node count of HFI devices */
  67. static unsigned int *hfi1_per_node_cntr;
  68. static inline void init_cpu_mask_set(struct cpu_mask_set *set)
  69. {
  70. cpumask_clear(&set->mask);
  71. cpumask_clear(&set->used);
  72. set->gen = 0;
  73. }
  74. /* Initialize non-HT cpu cores mask */
  75. void init_real_cpu_mask(void)
  76. {
  77. int possible, curr_cpu, i, ht;
  78. cpumask_clear(&node_affinity.real_cpu_mask);
  79. /* Start with cpu online mask as the real cpu mask */
  80. cpumask_copy(&node_affinity.real_cpu_mask, cpu_online_mask);
  81. /*
  82. * Remove HT cores from the real cpu mask. Do this in two steps below.
  83. */
  84. possible = cpumask_weight(&node_affinity.real_cpu_mask);
  85. ht = cpumask_weight(topology_sibling_cpumask(
  86. cpumask_first(&node_affinity.real_cpu_mask)));
  87. /*
  88. * Step 1. Skip over the first N HT siblings and use them as the
  89. * "real" cores. Assumes that HT cores are not enumerated in
  90. * succession (except in the single core case).
  91. */
  92. curr_cpu = cpumask_first(&node_affinity.real_cpu_mask);
  93. for (i = 0; i < possible / ht; i++)
  94. curr_cpu = cpumask_next(curr_cpu, &node_affinity.real_cpu_mask);
  95. /*
  96. * Step 2. Remove the remaining HT siblings. Use cpumask_next() to
  97. * skip any gaps.
  98. */
  99. for (; i < possible; i++) {
  100. cpumask_clear_cpu(curr_cpu, &node_affinity.real_cpu_mask);
  101. curr_cpu = cpumask_next(curr_cpu, &node_affinity.real_cpu_mask);
  102. }
  103. }
  104. int node_affinity_init(void)
  105. {
  106. int node;
  107. struct pci_dev *dev = NULL;
  108. const struct pci_device_id *ids = hfi1_pci_tbl;
  109. cpumask_clear(&node_affinity.proc.used);
  110. cpumask_copy(&node_affinity.proc.mask, cpu_online_mask);
  111. node_affinity.proc.gen = 0;
  112. node_affinity.num_core_siblings =
  113. cpumask_weight(topology_sibling_cpumask(
  114. cpumask_first(&node_affinity.proc.mask)
  115. ));
  116. node_affinity.num_online_nodes = num_online_nodes();
  117. node_affinity.num_online_cpus = num_online_cpus();
  118. /*
  119. * The real cpu mask is part of the affinity struct but it has to be
  120. * initialized early. It is needed to calculate the number of user
  121. * contexts in set_up_context_variables().
  122. */
  123. init_real_cpu_mask();
  124. hfi1_per_node_cntr = kcalloc(num_possible_nodes(),
  125. sizeof(*hfi1_per_node_cntr), GFP_KERNEL);
  126. if (!hfi1_per_node_cntr)
  127. return -ENOMEM;
  128. while (ids->vendor) {
  129. dev = NULL;
  130. while ((dev = pci_get_device(ids->vendor, ids->device, dev))) {
  131. node = pcibus_to_node(dev->bus);
  132. if (node < 0)
  133. node = numa_node_id();
  134. hfi1_per_node_cntr[node]++;
  135. }
  136. ids++;
  137. }
  138. return 0;
  139. }
  140. void node_affinity_destroy(void)
  141. {
  142. struct list_head *pos, *q;
  143. struct hfi1_affinity_node *entry;
  144. spin_lock(&node_affinity.lock);
  145. list_for_each_safe(pos, q, &node_affinity.list) {
  146. entry = list_entry(pos, struct hfi1_affinity_node,
  147. list);
  148. list_del(pos);
  149. kfree(entry);
  150. }
  151. spin_unlock(&node_affinity.lock);
  152. kfree(hfi1_per_node_cntr);
  153. }
  154. static struct hfi1_affinity_node *node_affinity_allocate(int node)
  155. {
  156. struct hfi1_affinity_node *entry;
  157. entry = kzalloc(sizeof(*entry), GFP_KERNEL);
  158. if (!entry)
  159. return NULL;
  160. entry->node = node;
  161. INIT_LIST_HEAD(&entry->list);
  162. return entry;
  163. }
  164. /*
  165. * It appends an entry to the list.
  166. * It *must* be called with node_affinity.lock held.
  167. */
  168. static void node_affinity_add_tail(struct hfi1_affinity_node *entry)
  169. {
  170. list_add_tail(&entry->list, &node_affinity.list);
  171. }
  172. /* It must be called with node_affinity.lock held */
  173. static struct hfi1_affinity_node *node_affinity_lookup(int node)
  174. {
  175. struct list_head *pos;
  176. struct hfi1_affinity_node *entry;
  177. list_for_each(pos, &node_affinity.list) {
  178. entry = list_entry(pos, struct hfi1_affinity_node, list);
  179. if (entry->node == node)
  180. return entry;
  181. }
  182. return NULL;
  183. }
  184. /*
  185. * Interrupt affinity.
  186. *
  187. * non-rcv avail gets a default mask that
  188. * starts as possible cpus with threads reset
  189. * and each rcv avail reset.
  190. *
  191. * rcv avail gets node relative 1 wrapping back
  192. * to the node relative 1 as necessary.
  193. *
  194. */
  195. int hfi1_dev_affinity_init(struct hfi1_devdata *dd)
  196. {
  197. int node = pcibus_to_node(dd->pcidev->bus);
  198. struct hfi1_affinity_node *entry;
  199. const struct cpumask *local_mask;
  200. int curr_cpu, possible, i;
  201. if (node < 0)
  202. node = numa_node_id();
  203. dd->node = node;
  204. local_mask = cpumask_of_node(dd->node);
  205. if (cpumask_first(local_mask) >= nr_cpu_ids)
  206. local_mask = topology_core_cpumask(0);
  207. spin_lock(&node_affinity.lock);
  208. entry = node_affinity_lookup(dd->node);
  209. spin_unlock(&node_affinity.lock);
  210. /*
  211. * If this is the first time this NUMA node's affinity is used,
  212. * create an entry in the global affinity structure and initialize it.
  213. */
  214. if (!entry) {
  215. entry = node_affinity_allocate(node);
  216. if (!entry) {
  217. dd_dev_err(dd,
  218. "Unable to allocate global affinity node\n");
  219. return -ENOMEM;
  220. }
  221. init_cpu_mask_set(&entry->def_intr);
  222. init_cpu_mask_set(&entry->rcv_intr);
  223. cpumask_clear(&entry->general_intr_mask);
  224. /* Use the "real" cpu mask of this node as the default */
  225. cpumask_and(&entry->def_intr.mask, &node_affinity.real_cpu_mask,
  226. local_mask);
  227. /* fill in the receive list */
  228. possible = cpumask_weight(&entry->def_intr.mask);
  229. curr_cpu = cpumask_first(&entry->def_intr.mask);
  230. if (possible == 1) {
  231. /* only one CPU, everyone will use it */
  232. cpumask_set_cpu(curr_cpu, &entry->rcv_intr.mask);
  233. cpumask_set_cpu(curr_cpu, &entry->general_intr_mask);
  234. } else {
  235. /*
  236. * The general/control context will be the first CPU in
  237. * the default list, so it is removed from the default
  238. * list and added to the general interrupt list.
  239. */
  240. cpumask_clear_cpu(curr_cpu, &entry->def_intr.mask);
  241. cpumask_set_cpu(curr_cpu, &entry->general_intr_mask);
  242. curr_cpu = cpumask_next(curr_cpu,
  243. &entry->def_intr.mask);
  244. /*
  245. * Remove the remaining kernel receive queues from
  246. * the default list and add them to the receive list.
  247. */
  248. for (i = 0;
  249. i < (dd->n_krcv_queues - 1) *
  250. hfi1_per_node_cntr[dd->node];
  251. i++) {
  252. cpumask_clear_cpu(curr_cpu,
  253. &entry->def_intr.mask);
  254. cpumask_set_cpu(curr_cpu,
  255. &entry->rcv_intr.mask);
  256. curr_cpu = cpumask_next(curr_cpu,
  257. &entry->def_intr.mask);
  258. if (curr_cpu >= nr_cpu_ids)
  259. break;
  260. }
  261. /*
  262. * If there ends up being 0 CPU cores leftover for SDMA
  263. * engines, use the same CPU cores as general/control
  264. * context.
  265. */
  266. if (cpumask_weight(&entry->def_intr.mask) == 0)
  267. cpumask_copy(&entry->def_intr.mask,
  268. &entry->general_intr_mask);
  269. }
  270. spin_lock(&node_affinity.lock);
  271. node_affinity_add_tail(entry);
  272. spin_unlock(&node_affinity.lock);
  273. }
  274. return 0;
  275. }
  276. int hfi1_get_irq_affinity(struct hfi1_devdata *dd, struct hfi1_msix_entry *msix)
  277. {
  278. int ret;
  279. cpumask_var_t diff;
  280. struct hfi1_affinity_node *entry;
  281. struct cpu_mask_set *set = NULL;
  282. struct sdma_engine *sde = NULL;
  283. struct hfi1_ctxtdata *rcd = NULL;
  284. char extra[64];
  285. int cpu = -1;
  286. extra[0] = '\0';
  287. cpumask_clear(&msix->mask);
  288. ret = zalloc_cpumask_var(&diff, GFP_KERNEL);
  289. if (!ret)
  290. return -ENOMEM;
  291. spin_lock(&node_affinity.lock);
  292. entry = node_affinity_lookup(dd->node);
  293. spin_unlock(&node_affinity.lock);
  294. switch (msix->type) {
  295. case IRQ_SDMA:
  296. sde = (struct sdma_engine *)msix->arg;
  297. scnprintf(extra, 64, "engine %u", sde->this_idx);
  298. set = &entry->def_intr;
  299. break;
  300. case IRQ_GENERAL:
  301. cpu = cpumask_first(&entry->general_intr_mask);
  302. break;
  303. case IRQ_RCVCTXT:
  304. rcd = (struct hfi1_ctxtdata *)msix->arg;
  305. if (rcd->ctxt == HFI1_CTRL_CTXT)
  306. cpu = cpumask_first(&entry->general_intr_mask);
  307. else
  308. set = &entry->rcv_intr;
  309. scnprintf(extra, 64, "ctxt %u", rcd->ctxt);
  310. break;
  311. default:
  312. dd_dev_err(dd, "Invalid IRQ type %d\n", msix->type);
  313. return -EINVAL;
  314. }
  315. /*
  316. * The general and control contexts are placed on a particular
  317. * CPU, which is set above. Skip accounting for it. Everything else
  318. * finds its CPU here.
  319. */
  320. if (cpu == -1 && set) {
  321. spin_lock(&node_affinity.lock);
  322. if (cpumask_equal(&set->mask, &set->used)) {
  323. /*
  324. * We've used up all the CPUs, bump up the generation
  325. * and reset the 'used' map
  326. */
  327. set->gen++;
  328. cpumask_clear(&set->used);
  329. }
  330. cpumask_andnot(diff, &set->mask, &set->used);
  331. cpu = cpumask_first(diff);
  332. cpumask_set_cpu(cpu, &set->used);
  333. spin_unlock(&node_affinity.lock);
  334. }
  335. switch (msix->type) {
  336. case IRQ_SDMA:
  337. sde->cpu = cpu;
  338. break;
  339. case IRQ_GENERAL:
  340. case IRQ_RCVCTXT:
  341. case IRQ_OTHER:
  342. break;
  343. }
  344. cpumask_set_cpu(cpu, &msix->mask);
  345. dd_dev_info(dd, "IRQ vector: %u, type %s %s -> cpu: %d\n",
  346. msix->msix.vector, irq_type_names[msix->type],
  347. extra, cpu);
  348. irq_set_affinity_hint(msix->msix.vector, &msix->mask);
  349. free_cpumask_var(diff);
  350. return 0;
  351. }
  352. void hfi1_put_irq_affinity(struct hfi1_devdata *dd,
  353. struct hfi1_msix_entry *msix)
  354. {
  355. struct cpu_mask_set *set = NULL;
  356. struct hfi1_ctxtdata *rcd;
  357. struct hfi1_affinity_node *entry;
  358. spin_lock(&node_affinity.lock);
  359. entry = node_affinity_lookup(dd->node);
  360. spin_unlock(&node_affinity.lock);
  361. switch (msix->type) {
  362. case IRQ_SDMA:
  363. set = &entry->def_intr;
  364. break;
  365. case IRQ_GENERAL:
  366. /* Don't do accounting for general contexts */
  367. break;
  368. case IRQ_RCVCTXT:
  369. rcd = (struct hfi1_ctxtdata *)msix->arg;
  370. /* Don't do accounting for control contexts */
  371. if (rcd->ctxt != HFI1_CTRL_CTXT)
  372. set = &entry->rcv_intr;
  373. break;
  374. default:
  375. return;
  376. }
  377. if (set) {
  378. spin_lock(&node_affinity.lock);
  379. cpumask_andnot(&set->used, &set->used, &msix->mask);
  380. if (cpumask_empty(&set->used) && set->gen) {
  381. set->gen--;
  382. cpumask_copy(&set->used, &set->mask);
  383. }
  384. spin_unlock(&node_affinity.lock);
  385. }
  386. irq_set_affinity_hint(msix->msix.vector, NULL);
  387. cpumask_clear(&msix->mask);
  388. }
  389. /* This should be called with node_affinity.lock held */
  390. static void find_hw_thread_mask(uint hw_thread_no, cpumask_var_t hw_thread_mask,
  391. struct hfi1_affinity_node_list *affinity)
  392. {
  393. int possible, curr_cpu, i;
  394. uint num_cores_per_socket = node_affinity.num_online_cpus /
  395. affinity->num_core_siblings /
  396. node_affinity.num_online_nodes;
  397. cpumask_copy(hw_thread_mask, &affinity->proc.mask);
  398. if (affinity->num_core_siblings > 0) {
  399. /* Removing other siblings not needed for now */
  400. possible = cpumask_weight(hw_thread_mask);
  401. curr_cpu = cpumask_first(hw_thread_mask);
  402. for (i = 0;
  403. i < num_cores_per_socket * node_affinity.num_online_nodes;
  404. i++)
  405. curr_cpu = cpumask_next(curr_cpu, hw_thread_mask);
  406. for (; i < possible; i++) {
  407. cpumask_clear_cpu(curr_cpu, hw_thread_mask);
  408. curr_cpu = cpumask_next(curr_cpu, hw_thread_mask);
  409. }
  410. /* Identifying correct HW threads within physical cores */
  411. cpumask_shift_left(hw_thread_mask, hw_thread_mask,
  412. num_cores_per_socket *
  413. node_affinity.num_online_nodes *
  414. hw_thread_no);
  415. }
  416. }
  417. int hfi1_get_proc_affinity(int node)
  418. {
  419. int cpu = -1, ret, i;
  420. struct hfi1_affinity_node *entry;
  421. cpumask_var_t diff, hw_thread_mask, available_mask, intrs_mask;
  422. const struct cpumask *node_mask,
  423. *proc_mask = tsk_cpus_allowed(current);
  424. struct hfi1_affinity_node_list *affinity = &node_affinity;
  425. struct cpu_mask_set *set = &affinity->proc;
  426. /*
  427. * check whether process/context affinity has already
  428. * been set
  429. */
  430. if (cpumask_weight(proc_mask) == 1) {
  431. hfi1_cdbg(PROC, "PID %u %s affinity set to CPU %*pbl",
  432. current->pid, current->comm,
  433. cpumask_pr_args(proc_mask));
  434. /*
  435. * Mark the pre-set CPU as used. This is atomic so we don't
  436. * need the lock
  437. */
  438. cpu = cpumask_first(proc_mask);
  439. cpumask_set_cpu(cpu, &set->used);
  440. goto done;
  441. } else if (cpumask_weight(proc_mask) < cpumask_weight(&set->mask)) {
  442. hfi1_cdbg(PROC, "PID %u %s affinity set to CPU set(s) %*pbl",
  443. current->pid, current->comm,
  444. cpumask_pr_args(proc_mask));
  445. goto done;
  446. }
  447. /*
  448. * The process does not have a preset CPU affinity so find one to
  449. * recommend using the following algorithm:
  450. *
  451. * For each user process that is opening a context on HFI Y:
  452. * a) If all cores are filled, reinitialize the bitmask
  453. * b) Fill real cores first, then HT cores (First set of HT
  454. * cores on all physical cores, then second set of HT core,
  455. * and, so on) in the following order:
  456. *
  457. * 1. Same NUMA node as HFI Y and not running an IRQ
  458. * handler
  459. * 2. Same NUMA node as HFI Y and running an IRQ handler
  460. * 3. Different NUMA node to HFI Y and not running an IRQ
  461. * handler
  462. * 4. Different NUMA node to HFI Y and running an IRQ
  463. * handler
  464. * c) Mark core as filled in the bitmask. As user processes are
  465. * done, clear cores from the bitmask.
  466. */
  467. ret = zalloc_cpumask_var(&diff, GFP_KERNEL);
  468. if (!ret)
  469. goto done;
  470. ret = zalloc_cpumask_var(&hw_thread_mask, GFP_KERNEL);
  471. if (!ret)
  472. goto free_diff;
  473. ret = zalloc_cpumask_var(&available_mask, GFP_KERNEL);
  474. if (!ret)
  475. goto free_hw_thread_mask;
  476. ret = zalloc_cpumask_var(&intrs_mask, GFP_KERNEL);
  477. if (!ret)
  478. goto free_available_mask;
  479. spin_lock(&affinity->lock);
  480. /*
  481. * If we've used all available HW threads, clear the mask and start
  482. * overloading.
  483. */
  484. if (cpumask_equal(&set->mask, &set->used)) {
  485. set->gen++;
  486. cpumask_clear(&set->used);
  487. }
  488. /*
  489. * If NUMA node has CPUs used by interrupt handlers, include them in the
  490. * interrupt handler mask.
  491. */
  492. entry = node_affinity_lookup(node);
  493. if (entry) {
  494. cpumask_copy(intrs_mask, (entry->def_intr.gen ?
  495. &entry->def_intr.mask :
  496. &entry->def_intr.used));
  497. cpumask_or(intrs_mask, intrs_mask, (entry->rcv_intr.gen ?
  498. &entry->rcv_intr.mask :
  499. &entry->rcv_intr.used));
  500. cpumask_or(intrs_mask, intrs_mask, &entry->general_intr_mask);
  501. }
  502. hfi1_cdbg(PROC, "CPUs used by interrupts: %*pbl",
  503. cpumask_pr_args(intrs_mask));
  504. cpumask_copy(hw_thread_mask, &set->mask);
  505. /*
  506. * If HT cores are enabled, identify which HW threads within the
  507. * physical cores should be used.
  508. */
  509. if (affinity->num_core_siblings > 0) {
  510. for (i = 0; i < affinity->num_core_siblings; i++) {
  511. find_hw_thread_mask(i, hw_thread_mask, affinity);
  512. /*
  513. * If there's at least one available core for this HW
  514. * thread number, stop looking for a core.
  515. *
  516. * diff will always be not empty at least once in this
  517. * loop as the used mask gets reset when
  518. * (set->mask == set->used) before this loop.
  519. */
  520. cpumask_andnot(diff, hw_thread_mask, &set->used);
  521. if (!cpumask_empty(diff))
  522. break;
  523. }
  524. }
  525. hfi1_cdbg(PROC, "Same available HW thread on all physical CPUs: %*pbl",
  526. cpumask_pr_args(hw_thread_mask));
  527. node_mask = cpumask_of_node(node);
  528. hfi1_cdbg(PROC, "Device on NUMA %u, CPUs %*pbl", node,
  529. cpumask_pr_args(node_mask));
  530. /* Get cpumask of available CPUs on preferred NUMA */
  531. cpumask_and(available_mask, hw_thread_mask, node_mask);
  532. cpumask_andnot(available_mask, available_mask, &set->used);
  533. hfi1_cdbg(PROC, "Available CPUs on NUMA %u: %*pbl", node,
  534. cpumask_pr_args(available_mask));
  535. /*
  536. * At first, we don't want to place processes on the same
  537. * CPUs as interrupt handlers. Then, CPUs running interrupt
  538. * handlers are used.
  539. *
  540. * 1) If diff is not empty, then there are CPUs not running
  541. * non-interrupt handlers available, so diff gets copied
  542. * over to available_mask.
  543. * 2) If diff is empty, then all CPUs not running interrupt
  544. * handlers are taken, so available_mask contains all
  545. * available CPUs running interrupt handlers.
  546. * 3) If available_mask is empty, then all CPUs on the
  547. * preferred NUMA node are taken, so other NUMA nodes are
  548. * used for process assignments using the same method as
  549. * the preferred NUMA node.
  550. */
  551. cpumask_andnot(diff, available_mask, intrs_mask);
  552. if (!cpumask_empty(diff))
  553. cpumask_copy(available_mask, diff);
  554. /* If we don't have CPUs on the preferred node, use other NUMA nodes */
  555. if (cpumask_empty(available_mask)) {
  556. cpumask_andnot(available_mask, hw_thread_mask, &set->used);
  557. /* Excluding preferred NUMA cores */
  558. cpumask_andnot(available_mask, available_mask, node_mask);
  559. hfi1_cdbg(PROC,
  560. "Preferred NUMA node cores are taken, cores available in other NUMA nodes: %*pbl",
  561. cpumask_pr_args(available_mask));
  562. /*
  563. * At first, we don't want to place processes on the same
  564. * CPUs as interrupt handlers.
  565. */
  566. cpumask_andnot(diff, available_mask, intrs_mask);
  567. if (!cpumask_empty(diff))
  568. cpumask_copy(available_mask, diff);
  569. }
  570. hfi1_cdbg(PROC, "Possible CPUs for process: %*pbl",
  571. cpumask_pr_args(available_mask));
  572. cpu = cpumask_first(available_mask);
  573. if (cpu >= nr_cpu_ids) /* empty */
  574. cpu = -1;
  575. else
  576. cpumask_set_cpu(cpu, &set->used);
  577. spin_unlock(&affinity->lock);
  578. hfi1_cdbg(PROC, "Process assigned to CPU %d", cpu);
  579. free_cpumask_var(intrs_mask);
  580. free_available_mask:
  581. free_cpumask_var(available_mask);
  582. free_hw_thread_mask:
  583. free_cpumask_var(hw_thread_mask);
  584. free_diff:
  585. free_cpumask_var(diff);
  586. done:
  587. return cpu;
  588. }
  589. void hfi1_put_proc_affinity(int cpu)
  590. {
  591. struct hfi1_affinity_node_list *affinity = &node_affinity;
  592. struct cpu_mask_set *set = &affinity->proc;
  593. if (cpu < 0)
  594. return;
  595. spin_lock(&affinity->lock);
  596. cpumask_clear_cpu(cpu, &set->used);
  597. hfi1_cdbg(PROC, "Returning CPU %d for future process assignment", cpu);
  598. if (cpumask_empty(&set->used) && set->gen) {
  599. set->gen--;
  600. cpumask_copy(&set->used, &set->mask);
  601. }
  602. spin_unlock(&affinity->lock);
  603. }
  604. /* Prevents concurrent reads and writes of the sdma_affinity attrib */
  605. static DEFINE_MUTEX(sdma_affinity_mutex);
  606. int hfi1_set_sdma_affinity(struct hfi1_devdata *dd, const char *buf,
  607. size_t count)
  608. {
  609. struct hfi1_affinity_node *entry;
  610. struct cpumask mask;
  611. int ret, i;
  612. spin_lock(&node_affinity.lock);
  613. entry = node_affinity_lookup(dd->node);
  614. spin_unlock(&node_affinity.lock);
  615. if (!entry)
  616. return -EINVAL;
  617. ret = cpulist_parse(buf, &mask);
  618. if (ret)
  619. return ret;
  620. if (!cpumask_subset(&mask, cpu_online_mask) || cpumask_empty(&mask)) {
  621. dd_dev_warn(dd, "Invalid CPU mask\n");
  622. return -EINVAL;
  623. }
  624. mutex_lock(&sdma_affinity_mutex);
  625. /* reset the SDMA interrupt affinity details */
  626. init_cpu_mask_set(&entry->def_intr);
  627. cpumask_copy(&entry->def_intr.mask, &mask);
  628. /*
  629. * Reassign the affinity for each SDMA interrupt.
  630. */
  631. for (i = 0; i < dd->num_msix_entries; i++) {
  632. struct hfi1_msix_entry *msix;
  633. msix = &dd->msix_entries[i];
  634. if (msix->type != IRQ_SDMA)
  635. continue;
  636. ret = hfi1_get_irq_affinity(dd, msix);
  637. if (ret)
  638. break;
  639. }
  640. mutex_unlock(&sdma_affinity_mutex);
  641. return ret ? ret : strnlen(buf, PAGE_SIZE);
  642. }
  643. int hfi1_get_sdma_affinity(struct hfi1_devdata *dd, char *buf)
  644. {
  645. struct hfi1_affinity_node *entry;
  646. spin_lock(&node_affinity.lock);
  647. entry = node_affinity_lookup(dd->node);
  648. spin_unlock(&node_affinity.lock);
  649. if (!entry)
  650. return -EINVAL;
  651. mutex_lock(&sdma_affinity_mutex);
  652. cpumap_print_to_pagebuf(true, buf, &entry->def_intr.mask);
  653. mutex_unlock(&sdma_affinity_mutex);
  654. return strnlen(buf, PAGE_SIZE);
  655. }