mr.c 35 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478
  1. /*
  2. * Copyright (c) 2013, Mellanox Technologies inc. All rights reserved.
  3. *
  4. * This software is available to you under a choice of one of two
  5. * licenses. You may choose to be licensed under the terms of the GNU
  6. * General Public License (GPL) Version 2, available from the file
  7. * COPYING in the main directory of this source tree, or the
  8. * OpenIB.org BSD license below:
  9. *
  10. * Redistribution and use in source and binary forms, with or
  11. * without modification, are permitted provided that the following
  12. * conditions are met:
  13. *
  14. * - Redistributions of source code must retain the above
  15. * copyright notice, this list of conditions and the following
  16. * disclaimer.
  17. *
  18. * - Redistributions in binary form must reproduce the above
  19. * copyright notice, this list of conditions and the following
  20. * disclaimer in the documentation and/or other materials
  21. * provided with the distribution.
  22. *
  23. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  24. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  25. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  26. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  27. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  28. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  29. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  30. * SOFTWARE.
  31. */
  32. #include <linux/kref.h>
  33. #include <linux/random.h>
  34. #include <linux/debugfs.h>
  35. #include <linux/export.h>
  36. #include <linux/delay.h>
  37. #include <rdma/ib_umem.h>
  38. #include <rdma/ib_umem_odp.h>
  39. #include <rdma/ib_verbs.h>
  40. #include "mlx5_ib.h"
  41. enum {
  42. MAX_PENDING_REG_MR = 8,
  43. };
  44. #define MLX5_UMR_ALIGN 2048
  45. #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
  46. static __be64 mlx5_ib_update_mtt_emergency_buffer[
  47. MLX5_UMR_MTT_MIN_CHUNK_SIZE/sizeof(__be64)]
  48. __aligned(MLX5_UMR_ALIGN);
  49. static DEFINE_MUTEX(mlx5_ib_update_mtt_emergency_buffer_mutex);
  50. #endif
  51. static int clean_mr(struct mlx5_ib_mr *mr);
  52. static int destroy_mkey(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
  53. {
  54. int err = mlx5_core_destroy_mkey(dev->mdev, &mr->mmr);
  55. #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
  56. /* Wait until all page fault handlers using the mr complete. */
  57. synchronize_srcu(&dev->mr_srcu);
  58. #endif
  59. return err;
  60. }
  61. static int order2idx(struct mlx5_ib_dev *dev, int order)
  62. {
  63. struct mlx5_mr_cache *cache = &dev->cache;
  64. if (order < cache->ent[0].order)
  65. return 0;
  66. else
  67. return order - cache->ent[0].order;
  68. }
  69. static void reg_mr_callback(int status, void *context)
  70. {
  71. struct mlx5_ib_mr *mr = context;
  72. struct mlx5_ib_dev *dev = mr->dev;
  73. struct mlx5_mr_cache *cache = &dev->cache;
  74. int c = order2idx(dev, mr->order);
  75. struct mlx5_cache_ent *ent = &cache->ent[c];
  76. u8 key;
  77. unsigned long flags;
  78. struct mlx5_mr_table *table = &dev->mdev->priv.mr_table;
  79. int err;
  80. spin_lock_irqsave(&ent->lock, flags);
  81. ent->pending--;
  82. spin_unlock_irqrestore(&ent->lock, flags);
  83. if (status) {
  84. mlx5_ib_warn(dev, "async reg mr failed. status %d\n", status);
  85. kfree(mr);
  86. dev->fill_delay = 1;
  87. mod_timer(&dev->delay_timer, jiffies + HZ);
  88. return;
  89. }
  90. if (mr->out.hdr.status) {
  91. mlx5_ib_warn(dev, "failed - status %d, syndorme 0x%x\n",
  92. mr->out.hdr.status,
  93. be32_to_cpu(mr->out.hdr.syndrome));
  94. kfree(mr);
  95. dev->fill_delay = 1;
  96. mod_timer(&dev->delay_timer, jiffies + HZ);
  97. return;
  98. }
  99. spin_lock_irqsave(&dev->mdev->priv.mkey_lock, flags);
  100. key = dev->mdev->priv.mkey_key++;
  101. spin_unlock_irqrestore(&dev->mdev->priv.mkey_lock, flags);
  102. mr->mmr.key = mlx5_idx_to_mkey(be32_to_cpu(mr->out.mkey) & 0xffffff) | key;
  103. cache->last_add = jiffies;
  104. spin_lock_irqsave(&ent->lock, flags);
  105. list_add_tail(&mr->list, &ent->head);
  106. ent->cur++;
  107. ent->size++;
  108. spin_unlock_irqrestore(&ent->lock, flags);
  109. write_lock_irqsave(&table->lock, flags);
  110. err = radix_tree_insert(&table->tree, mlx5_base_mkey(mr->mmr.key),
  111. &mr->mmr);
  112. if (err)
  113. pr_err("Error inserting to mr tree. 0x%x\n", -err);
  114. write_unlock_irqrestore(&table->lock, flags);
  115. }
  116. static int add_keys(struct mlx5_ib_dev *dev, int c, int num)
  117. {
  118. struct mlx5_mr_cache *cache = &dev->cache;
  119. struct mlx5_cache_ent *ent = &cache->ent[c];
  120. struct mlx5_create_mkey_mbox_in *in;
  121. struct mlx5_ib_mr *mr;
  122. int npages = 1 << ent->order;
  123. int err = 0;
  124. int i;
  125. in = kzalloc(sizeof(*in), GFP_KERNEL);
  126. if (!in)
  127. return -ENOMEM;
  128. for (i = 0; i < num; i++) {
  129. if (ent->pending >= MAX_PENDING_REG_MR) {
  130. err = -EAGAIN;
  131. break;
  132. }
  133. mr = kzalloc(sizeof(*mr), GFP_KERNEL);
  134. if (!mr) {
  135. err = -ENOMEM;
  136. break;
  137. }
  138. mr->order = ent->order;
  139. mr->umred = 1;
  140. mr->dev = dev;
  141. in->seg.status = MLX5_MKEY_STATUS_FREE;
  142. in->seg.xlt_oct_size = cpu_to_be32((npages + 1) / 2);
  143. in->seg.qpn_mkey7_0 = cpu_to_be32(0xffffff << 8);
  144. in->seg.flags = MLX5_ACCESS_MODE_MTT | MLX5_PERM_UMR_EN;
  145. in->seg.log2_page_size = 12;
  146. spin_lock_irq(&ent->lock);
  147. ent->pending++;
  148. spin_unlock_irq(&ent->lock);
  149. err = mlx5_core_create_mkey(dev->mdev, &mr->mmr, in,
  150. sizeof(*in), reg_mr_callback,
  151. mr, &mr->out);
  152. if (err) {
  153. spin_lock_irq(&ent->lock);
  154. ent->pending--;
  155. spin_unlock_irq(&ent->lock);
  156. mlx5_ib_warn(dev, "create mkey failed %d\n", err);
  157. kfree(mr);
  158. break;
  159. }
  160. }
  161. kfree(in);
  162. return err;
  163. }
  164. static void remove_keys(struct mlx5_ib_dev *dev, int c, int num)
  165. {
  166. struct mlx5_mr_cache *cache = &dev->cache;
  167. struct mlx5_cache_ent *ent = &cache->ent[c];
  168. struct mlx5_ib_mr *mr;
  169. int err;
  170. int i;
  171. for (i = 0; i < num; i++) {
  172. spin_lock_irq(&ent->lock);
  173. if (list_empty(&ent->head)) {
  174. spin_unlock_irq(&ent->lock);
  175. return;
  176. }
  177. mr = list_first_entry(&ent->head, struct mlx5_ib_mr, list);
  178. list_del(&mr->list);
  179. ent->cur--;
  180. ent->size--;
  181. spin_unlock_irq(&ent->lock);
  182. err = destroy_mkey(dev, mr);
  183. if (err)
  184. mlx5_ib_warn(dev, "failed destroy mkey\n");
  185. else
  186. kfree(mr);
  187. }
  188. }
  189. static ssize_t size_write(struct file *filp, const char __user *buf,
  190. size_t count, loff_t *pos)
  191. {
  192. struct mlx5_cache_ent *ent = filp->private_data;
  193. struct mlx5_ib_dev *dev = ent->dev;
  194. char lbuf[20];
  195. u32 var;
  196. int err;
  197. int c;
  198. if (copy_from_user(lbuf, buf, sizeof(lbuf)))
  199. return -EFAULT;
  200. c = order2idx(dev, ent->order);
  201. lbuf[sizeof(lbuf) - 1] = 0;
  202. if (sscanf(lbuf, "%u", &var) != 1)
  203. return -EINVAL;
  204. if (var < ent->limit)
  205. return -EINVAL;
  206. if (var > ent->size) {
  207. do {
  208. err = add_keys(dev, c, var - ent->size);
  209. if (err && err != -EAGAIN)
  210. return err;
  211. usleep_range(3000, 5000);
  212. } while (err);
  213. } else if (var < ent->size) {
  214. remove_keys(dev, c, ent->size - var);
  215. }
  216. return count;
  217. }
  218. static ssize_t size_read(struct file *filp, char __user *buf, size_t count,
  219. loff_t *pos)
  220. {
  221. struct mlx5_cache_ent *ent = filp->private_data;
  222. char lbuf[20];
  223. int err;
  224. if (*pos)
  225. return 0;
  226. err = snprintf(lbuf, sizeof(lbuf), "%d\n", ent->size);
  227. if (err < 0)
  228. return err;
  229. if (copy_to_user(buf, lbuf, err))
  230. return -EFAULT;
  231. *pos += err;
  232. return err;
  233. }
  234. static const struct file_operations size_fops = {
  235. .owner = THIS_MODULE,
  236. .open = simple_open,
  237. .write = size_write,
  238. .read = size_read,
  239. };
  240. static ssize_t limit_write(struct file *filp, const char __user *buf,
  241. size_t count, loff_t *pos)
  242. {
  243. struct mlx5_cache_ent *ent = filp->private_data;
  244. struct mlx5_ib_dev *dev = ent->dev;
  245. char lbuf[20];
  246. u32 var;
  247. int err;
  248. int c;
  249. if (copy_from_user(lbuf, buf, sizeof(lbuf)))
  250. return -EFAULT;
  251. c = order2idx(dev, ent->order);
  252. lbuf[sizeof(lbuf) - 1] = 0;
  253. if (sscanf(lbuf, "%u", &var) != 1)
  254. return -EINVAL;
  255. if (var > ent->size)
  256. return -EINVAL;
  257. ent->limit = var;
  258. if (ent->cur < ent->limit) {
  259. err = add_keys(dev, c, 2 * ent->limit - ent->cur);
  260. if (err)
  261. return err;
  262. }
  263. return count;
  264. }
  265. static ssize_t limit_read(struct file *filp, char __user *buf, size_t count,
  266. loff_t *pos)
  267. {
  268. struct mlx5_cache_ent *ent = filp->private_data;
  269. char lbuf[20];
  270. int err;
  271. if (*pos)
  272. return 0;
  273. err = snprintf(lbuf, sizeof(lbuf), "%d\n", ent->limit);
  274. if (err < 0)
  275. return err;
  276. if (copy_to_user(buf, lbuf, err))
  277. return -EFAULT;
  278. *pos += err;
  279. return err;
  280. }
  281. static const struct file_operations limit_fops = {
  282. .owner = THIS_MODULE,
  283. .open = simple_open,
  284. .write = limit_write,
  285. .read = limit_read,
  286. };
  287. static int someone_adding(struct mlx5_mr_cache *cache)
  288. {
  289. int i;
  290. for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++) {
  291. if (cache->ent[i].cur < cache->ent[i].limit)
  292. return 1;
  293. }
  294. return 0;
  295. }
  296. static void __cache_work_func(struct mlx5_cache_ent *ent)
  297. {
  298. struct mlx5_ib_dev *dev = ent->dev;
  299. struct mlx5_mr_cache *cache = &dev->cache;
  300. int i = order2idx(dev, ent->order);
  301. int err;
  302. if (cache->stopped)
  303. return;
  304. ent = &dev->cache.ent[i];
  305. if (ent->cur < 2 * ent->limit && !dev->fill_delay) {
  306. err = add_keys(dev, i, 1);
  307. if (ent->cur < 2 * ent->limit) {
  308. if (err == -EAGAIN) {
  309. mlx5_ib_dbg(dev, "returned eagain, order %d\n",
  310. i + 2);
  311. queue_delayed_work(cache->wq, &ent->dwork,
  312. msecs_to_jiffies(3));
  313. } else if (err) {
  314. mlx5_ib_warn(dev, "command failed order %d, err %d\n",
  315. i + 2, err);
  316. queue_delayed_work(cache->wq, &ent->dwork,
  317. msecs_to_jiffies(1000));
  318. } else {
  319. queue_work(cache->wq, &ent->work);
  320. }
  321. }
  322. } else if (ent->cur > 2 * ent->limit) {
  323. if (!someone_adding(cache) &&
  324. time_after(jiffies, cache->last_add + 300 * HZ)) {
  325. remove_keys(dev, i, 1);
  326. if (ent->cur > ent->limit)
  327. queue_work(cache->wq, &ent->work);
  328. } else {
  329. queue_delayed_work(cache->wq, &ent->dwork, 300 * HZ);
  330. }
  331. }
  332. }
  333. static void delayed_cache_work_func(struct work_struct *work)
  334. {
  335. struct mlx5_cache_ent *ent;
  336. ent = container_of(work, struct mlx5_cache_ent, dwork.work);
  337. __cache_work_func(ent);
  338. }
  339. static void cache_work_func(struct work_struct *work)
  340. {
  341. struct mlx5_cache_ent *ent;
  342. ent = container_of(work, struct mlx5_cache_ent, work);
  343. __cache_work_func(ent);
  344. }
  345. static struct mlx5_ib_mr *alloc_cached_mr(struct mlx5_ib_dev *dev, int order)
  346. {
  347. struct mlx5_mr_cache *cache = &dev->cache;
  348. struct mlx5_ib_mr *mr = NULL;
  349. struct mlx5_cache_ent *ent;
  350. int c;
  351. int i;
  352. c = order2idx(dev, order);
  353. if (c < 0 || c >= MAX_MR_CACHE_ENTRIES) {
  354. mlx5_ib_warn(dev, "order %d, cache index %d\n", order, c);
  355. return NULL;
  356. }
  357. for (i = c; i < MAX_MR_CACHE_ENTRIES; i++) {
  358. ent = &cache->ent[i];
  359. mlx5_ib_dbg(dev, "order %d, cache index %d\n", ent->order, i);
  360. spin_lock_irq(&ent->lock);
  361. if (!list_empty(&ent->head)) {
  362. mr = list_first_entry(&ent->head, struct mlx5_ib_mr,
  363. list);
  364. list_del(&mr->list);
  365. ent->cur--;
  366. spin_unlock_irq(&ent->lock);
  367. if (ent->cur < ent->limit)
  368. queue_work(cache->wq, &ent->work);
  369. break;
  370. }
  371. spin_unlock_irq(&ent->lock);
  372. queue_work(cache->wq, &ent->work);
  373. if (mr)
  374. break;
  375. }
  376. if (!mr)
  377. cache->ent[c].miss++;
  378. return mr;
  379. }
  380. static void free_cached_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
  381. {
  382. struct mlx5_mr_cache *cache = &dev->cache;
  383. struct mlx5_cache_ent *ent;
  384. int shrink = 0;
  385. int c;
  386. c = order2idx(dev, mr->order);
  387. if (c < 0 || c >= MAX_MR_CACHE_ENTRIES) {
  388. mlx5_ib_warn(dev, "order %d, cache index %d\n", mr->order, c);
  389. return;
  390. }
  391. ent = &cache->ent[c];
  392. spin_lock_irq(&ent->lock);
  393. list_add_tail(&mr->list, &ent->head);
  394. ent->cur++;
  395. if (ent->cur > 2 * ent->limit)
  396. shrink = 1;
  397. spin_unlock_irq(&ent->lock);
  398. if (shrink)
  399. queue_work(cache->wq, &ent->work);
  400. }
  401. static void clean_keys(struct mlx5_ib_dev *dev, int c)
  402. {
  403. struct mlx5_mr_cache *cache = &dev->cache;
  404. struct mlx5_cache_ent *ent = &cache->ent[c];
  405. struct mlx5_ib_mr *mr;
  406. int err;
  407. cancel_delayed_work(&ent->dwork);
  408. while (1) {
  409. spin_lock_irq(&ent->lock);
  410. if (list_empty(&ent->head)) {
  411. spin_unlock_irq(&ent->lock);
  412. return;
  413. }
  414. mr = list_first_entry(&ent->head, struct mlx5_ib_mr, list);
  415. list_del(&mr->list);
  416. ent->cur--;
  417. ent->size--;
  418. spin_unlock_irq(&ent->lock);
  419. err = destroy_mkey(dev, mr);
  420. if (err)
  421. mlx5_ib_warn(dev, "failed destroy mkey\n");
  422. else
  423. kfree(mr);
  424. }
  425. }
  426. static int mlx5_mr_cache_debugfs_init(struct mlx5_ib_dev *dev)
  427. {
  428. struct mlx5_mr_cache *cache = &dev->cache;
  429. struct mlx5_cache_ent *ent;
  430. int i;
  431. if (!mlx5_debugfs_root)
  432. return 0;
  433. cache->root = debugfs_create_dir("mr_cache", dev->mdev->priv.dbg_root);
  434. if (!cache->root)
  435. return -ENOMEM;
  436. for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++) {
  437. ent = &cache->ent[i];
  438. sprintf(ent->name, "%d", ent->order);
  439. ent->dir = debugfs_create_dir(ent->name, cache->root);
  440. if (!ent->dir)
  441. return -ENOMEM;
  442. ent->fsize = debugfs_create_file("size", 0600, ent->dir, ent,
  443. &size_fops);
  444. if (!ent->fsize)
  445. return -ENOMEM;
  446. ent->flimit = debugfs_create_file("limit", 0600, ent->dir, ent,
  447. &limit_fops);
  448. if (!ent->flimit)
  449. return -ENOMEM;
  450. ent->fcur = debugfs_create_u32("cur", 0400, ent->dir,
  451. &ent->cur);
  452. if (!ent->fcur)
  453. return -ENOMEM;
  454. ent->fmiss = debugfs_create_u32("miss", 0600, ent->dir,
  455. &ent->miss);
  456. if (!ent->fmiss)
  457. return -ENOMEM;
  458. }
  459. return 0;
  460. }
  461. static void mlx5_mr_cache_debugfs_cleanup(struct mlx5_ib_dev *dev)
  462. {
  463. if (!mlx5_debugfs_root)
  464. return;
  465. debugfs_remove_recursive(dev->cache.root);
  466. }
  467. static void delay_time_func(unsigned long ctx)
  468. {
  469. struct mlx5_ib_dev *dev = (struct mlx5_ib_dev *)ctx;
  470. dev->fill_delay = 0;
  471. }
  472. int mlx5_mr_cache_init(struct mlx5_ib_dev *dev)
  473. {
  474. struct mlx5_mr_cache *cache = &dev->cache;
  475. struct mlx5_cache_ent *ent;
  476. int limit;
  477. int err;
  478. int i;
  479. cache->wq = create_singlethread_workqueue("mkey_cache");
  480. if (!cache->wq) {
  481. mlx5_ib_warn(dev, "failed to create work queue\n");
  482. return -ENOMEM;
  483. }
  484. setup_timer(&dev->delay_timer, delay_time_func, (unsigned long)dev);
  485. for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++) {
  486. INIT_LIST_HEAD(&cache->ent[i].head);
  487. spin_lock_init(&cache->ent[i].lock);
  488. ent = &cache->ent[i];
  489. INIT_LIST_HEAD(&ent->head);
  490. spin_lock_init(&ent->lock);
  491. ent->order = i + 2;
  492. ent->dev = dev;
  493. if (dev->mdev->profile->mask & MLX5_PROF_MASK_MR_CACHE)
  494. limit = dev->mdev->profile->mr_cache[i].limit;
  495. else
  496. limit = 0;
  497. INIT_WORK(&ent->work, cache_work_func);
  498. INIT_DELAYED_WORK(&ent->dwork, delayed_cache_work_func);
  499. ent->limit = limit;
  500. queue_work(cache->wq, &ent->work);
  501. }
  502. err = mlx5_mr_cache_debugfs_init(dev);
  503. if (err)
  504. mlx5_ib_warn(dev, "cache debugfs failure\n");
  505. return 0;
  506. }
  507. int mlx5_mr_cache_cleanup(struct mlx5_ib_dev *dev)
  508. {
  509. int i;
  510. dev->cache.stopped = 1;
  511. flush_workqueue(dev->cache.wq);
  512. mlx5_mr_cache_debugfs_cleanup(dev);
  513. for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++)
  514. clean_keys(dev, i);
  515. destroy_workqueue(dev->cache.wq);
  516. del_timer_sync(&dev->delay_timer);
  517. return 0;
  518. }
  519. struct ib_mr *mlx5_ib_get_dma_mr(struct ib_pd *pd, int acc)
  520. {
  521. struct mlx5_ib_dev *dev = to_mdev(pd->device);
  522. struct mlx5_core_dev *mdev = dev->mdev;
  523. struct mlx5_create_mkey_mbox_in *in;
  524. struct mlx5_mkey_seg *seg;
  525. struct mlx5_ib_mr *mr;
  526. int err;
  527. mr = kzalloc(sizeof(*mr), GFP_KERNEL);
  528. if (!mr)
  529. return ERR_PTR(-ENOMEM);
  530. in = kzalloc(sizeof(*in), GFP_KERNEL);
  531. if (!in) {
  532. err = -ENOMEM;
  533. goto err_free;
  534. }
  535. seg = &in->seg;
  536. seg->flags = convert_access(acc) | MLX5_ACCESS_MODE_PA;
  537. seg->flags_pd = cpu_to_be32(to_mpd(pd)->pdn | MLX5_MKEY_LEN64);
  538. seg->qpn_mkey7_0 = cpu_to_be32(0xffffff << 8);
  539. seg->start_addr = 0;
  540. err = mlx5_core_create_mkey(mdev, &mr->mmr, in, sizeof(*in), NULL, NULL,
  541. NULL);
  542. if (err)
  543. goto err_in;
  544. kfree(in);
  545. mr->ibmr.lkey = mr->mmr.key;
  546. mr->ibmr.rkey = mr->mmr.key;
  547. mr->umem = NULL;
  548. return &mr->ibmr;
  549. err_in:
  550. kfree(in);
  551. err_free:
  552. kfree(mr);
  553. return ERR_PTR(err);
  554. }
  555. static int get_octo_len(u64 addr, u64 len, int page_size)
  556. {
  557. u64 offset;
  558. int npages;
  559. offset = addr & (page_size - 1);
  560. npages = ALIGN(len + offset, page_size) >> ilog2(page_size);
  561. return (npages + 1) / 2;
  562. }
  563. static int use_umr(int order)
  564. {
  565. return order <= MLX5_MAX_UMR_SHIFT;
  566. }
  567. static void prep_umr_reg_wqe(struct ib_pd *pd, struct ib_send_wr *wr,
  568. struct ib_sge *sg, u64 dma, int n, u32 key,
  569. int page_shift, u64 virt_addr, u64 len,
  570. int access_flags)
  571. {
  572. struct mlx5_ib_dev *dev = to_mdev(pd->device);
  573. struct ib_mr *mr = dev->umrc.mr;
  574. struct mlx5_umr_wr *umrwr = (struct mlx5_umr_wr *)&wr->wr.fast_reg;
  575. sg->addr = dma;
  576. sg->length = ALIGN(sizeof(u64) * n, 64);
  577. sg->lkey = mr->lkey;
  578. wr->next = NULL;
  579. wr->send_flags = 0;
  580. wr->sg_list = sg;
  581. if (n)
  582. wr->num_sge = 1;
  583. else
  584. wr->num_sge = 0;
  585. wr->opcode = MLX5_IB_WR_UMR;
  586. umrwr->npages = n;
  587. umrwr->page_shift = page_shift;
  588. umrwr->mkey = key;
  589. umrwr->target.virt_addr = virt_addr;
  590. umrwr->length = len;
  591. umrwr->access_flags = access_flags;
  592. umrwr->pd = pd;
  593. }
  594. static void prep_umr_unreg_wqe(struct mlx5_ib_dev *dev,
  595. struct ib_send_wr *wr, u32 key)
  596. {
  597. struct mlx5_umr_wr *umrwr = (struct mlx5_umr_wr *)&wr->wr.fast_reg;
  598. wr->send_flags = MLX5_IB_SEND_UMR_UNREG | MLX5_IB_SEND_UMR_FAIL_IF_FREE;
  599. wr->opcode = MLX5_IB_WR_UMR;
  600. umrwr->mkey = key;
  601. }
  602. void mlx5_umr_cq_handler(struct ib_cq *cq, void *cq_context)
  603. {
  604. struct mlx5_ib_umr_context *context;
  605. struct ib_wc wc;
  606. int err;
  607. while (1) {
  608. err = ib_poll_cq(cq, 1, &wc);
  609. if (err < 0) {
  610. pr_warn("poll cq error %d\n", err);
  611. return;
  612. }
  613. if (err == 0)
  614. break;
  615. context = (struct mlx5_ib_umr_context *) (unsigned long) wc.wr_id;
  616. context->status = wc.status;
  617. complete(&context->done);
  618. }
  619. ib_req_notify_cq(cq, IB_CQ_NEXT_COMP);
  620. }
  621. static struct mlx5_ib_mr *reg_umr(struct ib_pd *pd, struct ib_umem *umem,
  622. u64 virt_addr, u64 len, int npages,
  623. int page_shift, int order, int access_flags)
  624. {
  625. struct mlx5_ib_dev *dev = to_mdev(pd->device);
  626. struct device *ddev = dev->ib_dev.dma_device;
  627. struct umr_common *umrc = &dev->umrc;
  628. struct mlx5_ib_umr_context umr_context;
  629. struct ib_send_wr wr, *bad;
  630. struct mlx5_ib_mr *mr;
  631. struct ib_sge sg;
  632. int size;
  633. __be64 *mr_pas;
  634. __be64 *pas;
  635. dma_addr_t dma;
  636. int err = 0;
  637. int i;
  638. for (i = 0; i < 1; i++) {
  639. mr = alloc_cached_mr(dev, order);
  640. if (mr)
  641. break;
  642. err = add_keys(dev, order2idx(dev, order), 1);
  643. if (err && err != -EAGAIN) {
  644. mlx5_ib_warn(dev, "add_keys failed, err %d\n", err);
  645. break;
  646. }
  647. }
  648. if (!mr)
  649. return ERR_PTR(-EAGAIN);
  650. /* UMR copies MTTs in units of MLX5_UMR_MTT_ALIGNMENT bytes.
  651. * To avoid copying garbage after the pas array, we allocate
  652. * a little more. */
  653. size = ALIGN(sizeof(u64) * npages, MLX5_UMR_MTT_ALIGNMENT);
  654. mr_pas = kmalloc(size + MLX5_UMR_ALIGN - 1, GFP_KERNEL);
  655. if (!mr_pas) {
  656. err = -ENOMEM;
  657. goto free_mr;
  658. }
  659. pas = PTR_ALIGN(mr_pas, MLX5_UMR_ALIGN);
  660. mlx5_ib_populate_pas(dev, umem, page_shift, pas, MLX5_IB_MTT_PRESENT);
  661. /* Clear padding after the actual pages. */
  662. memset(pas + npages, 0, size - npages * sizeof(u64));
  663. dma = dma_map_single(ddev, pas, size, DMA_TO_DEVICE);
  664. if (dma_mapping_error(ddev, dma)) {
  665. err = -ENOMEM;
  666. goto free_pas;
  667. }
  668. memset(&wr, 0, sizeof(wr));
  669. wr.wr_id = (u64)(unsigned long)&umr_context;
  670. prep_umr_reg_wqe(pd, &wr, &sg, dma, npages, mr->mmr.key, page_shift,
  671. virt_addr, len, access_flags);
  672. mlx5_ib_init_umr_context(&umr_context);
  673. down(&umrc->sem);
  674. err = ib_post_send(umrc->qp, &wr, &bad);
  675. if (err) {
  676. mlx5_ib_warn(dev, "post send failed, err %d\n", err);
  677. goto unmap_dma;
  678. } else {
  679. wait_for_completion(&umr_context.done);
  680. if (umr_context.status != IB_WC_SUCCESS) {
  681. mlx5_ib_warn(dev, "reg umr failed\n");
  682. err = -EFAULT;
  683. }
  684. }
  685. mr->mmr.iova = virt_addr;
  686. mr->mmr.size = len;
  687. mr->mmr.pd = to_mpd(pd)->pdn;
  688. mr->live = 1;
  689. unmap_dma:
  690. up(&umrc->sem);
  691. dma_unmap_single(ddev, dma, size, DMA_TO_DEVICE);
  692. free_pas:
  693. kfree(mr_pas);
  694. free_mr:
  695. if (err) {
  696. free_cached_mr(dev, mr);
  697. return ERR_PTR(err);
  698. }
  699. return mr;
  700. }
  701. #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
  702. int mlx5_ib_update_mtt(struct mlx5_ib_mr *mr, u64 start_page_index, int npages,
  703. int zap)
  704. {
  705. struct mlx5_ib_dev *dev = mr->dev;
  706. struct device *ddev = dev->ib_dev.dma_device;
  707. struct umr_common *umrc = &dev->umrc;
  708. struct mlx5_ib_umr_context umr_context;
  709. struct ib_umem *umem = mr->umem;
  710. int size;
  711. __be64 *pas;
  712. dma_addr_t dma;
  713. struct ib_send_wr wr, *bad;
  714. struct mlx5_umr_wr *umrwr = (struct mlx5_umr_wr *)&wr.wr.fast_reg;
  715. struct ib_sge sg;
  716. int err = 0;
  717. const int page_index_alignment = MLX5_UMR_MTT_ALIGNMENT / sizeof(u64);
  718. const int page_index_mask = page_index_alignment - 1;
  719. size_t pages_mapped = 0;
  720. size_t pages_to_map = 0;
  721. size_t pages_iter = 0;
  722. int use_emergency_buf = 0;
  723. /* UMR copies MTTs in units of MLX5_UMR_MTT_ALIGNMENT bytes,
  724. * so we need to align the offset and length accordingly */
  725. if (start_page_index & page_index_mask) {
  726. npages += start_page_index & page_index_mask;
  727. start_page_index &= ~page_index_mask;
  728. }
  729. pages_to_map = ALIGN(npages, page_index_alignment);
  730. if (start_page_index + pages_to_map > MLX5_MAX_UMR_PAGES)
  731. return -EINVAL;
  732. size = sizeof(u64) * pages_to_map;
  733. size = min_t(int, PAGE_SIZE, size);
  734. /* We allocate with GFP_ATOMIC to avoid recursion into page-reclaim
  735. * code, when we are called from an invalidation. The pas buffer must
  736. * be 2k-aligned for Connect-IB. */
  737. pas = (__be64 *)get_zeroed_page(GFP_ATOMIC);
  738. if (!pas) {
  739. mlx5_ib_warn(dev, "unable to allocate memory during MTT update, falling back to slower chunked mechanism.\n");
  740. pas = mlx5_ib_update_mtt_emergency_buffer;
  741. size = MLX5_UMR_MTT_MIN_CHUNK_SIZE;
  742. use_emergency_buf = 1;
  743. mutex_lock(&mlx5_ib_update_mtt_emergency_buffer_mutex);
  744. memset(pas, 0, size);
  745. }
  746. pages_iter = size / sizeof(u64);
  747. dma = dma_map_single(ddev, pas, size, DMA_TO_DEVICE);
  748. if (dma_mapping_error(ddev, dma)) {
  749. mlx5_ib_err(dev, "unable to map DMA during MTT update.\n");
  750. err = -ENOMEM;
  751. goto free_pas;
  752. }
  753. for (pages_mapped = 0;
  754. pages_mapped < pages_to_map && !err;
  755. pages_mapped += pages_iter, start_page_index += pages_iter) {
  756. dma_sync_single_for_cpu(ddev, dma, size, DMA_TO_DEVICE);
  757. npages = min_t(size_t,
  758. pages_iter,
  759. ib_umem_num_pages(umem) - start_page_index);
  760. if (!zap) {
  761. __mlx5_ib_populate_pas(dev, umem, PAGE_SHIFT,
  762. start_page_index, npages, pas,
  763. MLX5_IB_MTT_PRESENT);
  764. /* Clear padding after the pages brought from the
  765. * umem. */
  766. memset(pas + npages, 0, size - npages * sizeof(u64));
  767. }
  768. dma_sync_single_for_device(ddev, dma, size, DMA_TO_DEVICE);
  769. memset(&wr, 0, sizeof(wr));
  770. wr.wr_id = (u64)(unsigned long)&umr_context;
  771. sg.addr = dma;
  772. sg.length = ALIGN(npages * sizeof(u64),
  773. MLX5_UMR_MTT_ALIGNMENT);
  774. sg.lkey = dev->umrc.mr->lkey;
  775. wr.send_flags = MLX5_IB_SEND_UMR_FAIL_IF_FREE |
  776. MLX5_IB_SEND_UMR_UPDATE_MTT;
  777. wr.sg_list = &sg;
  778. wr.num_sge = 1;
  779. wr.opcode = MLX5_IB_WR_UMR;
  780. umrwr->npages = sg.length / sizeof(u64);
  781. umrwr->page_shift = PAGE_SHIFT;
  782. umrwr->mkey = mr->mmr.key;
  783. umrwr->target.offset = start_page_index;
  784. mlx5_ib_init_umr_context(&umr_context);
  785. down(&umrc->sem);
  786. err = ib_post_send(umrc->qp, &wr, &bad);
  787. if (err) {
  788. mlx5_ib_err(dev, "UMR post send failed, err %d\n", err);
  789. } else {
  790. wait_for_completion(&umr_context.done);
  791. if (umr_context.status != IB_WC_SUCCESS) {
  792. mlx5_ib_err(dev, "UMR completion failed, code %d\n",
  793. umr_context.status);
  794. err = -EFAULT;
  795. }
  796. }
  797. up(&umrc->sem);
  798. }
  799. dma_unmap_single(ddev, dma, size, DMA_TO_DEVICE);
  800. free_pas:
  801. if (!use_emergency_buf)
  802. free_page((unsigned long)pas);
  803. else
  804. mutex_unlock(&mlx5_ib_update_mtt_emergency_buffer_mutex);
  805. return err;
  806. }
  807. #endif
  808. static struct mlx5_ib_mr *reg_create(struct ib_pd *pd, u64 virt_addr,
  809. u64 length, struct ib_umem *umem,
  810. int npages, int page_shift,
  811. int access_flags)
  812. {
  813. struct mlx5_ib_dev *dev = to_mdev(pd->device);
  814. struct mlx5_create_mkey_mbox_in *in;
  815. struct mlx5_ib_mr *mr;
  816. int inlen;
  817. int err;
  818. bool pg_cap = !!(dev->mdev->caps.gen.flags &
  819. MLX5_DEV_CAP_FLAG_ON_DMND_PG);
  820. mr = kzalloc(sizeof(*mr), GFP_KERNEL);
  821. if (!mr)
  822. return ERR_PTR(-ENOMEM);
  823. inlen = sizeof(*in) + sizeof(*in->pas) * ((npages + 1) / 2) * 2;
  824. in = mlx5_vzalloc(inlen);
  825. if (!in) {
  826. err = -ENOMEM;
  827. goto err_1;
  828. }
  829. mlx5_ib_populate_pas(dev, umem, page_shift, in->pas,
  830. pg_cap ? MLX5_IB_MTT_PRESENT : 0);
  831. /* The MLX5_MKEY_INBOX_PG_ACCESS bit allows setting the access flags
  832. * in the page list submitted with the command. */
  833. in->flags = pg_cap ? cpu_to_be32(MLX5_MKEY_INBOX_PG_ACCESS) : 0;
  834. in->seg.flags = convert_access(access_flags) |
  835. MLX5_ACCESS_MODE_MTT;
  836. in->seg.flags_pd = cpu_to_be32(to_mpd(pd)->pdn);
  837. in->seg.start_addr = cpu_to_be64(virt_addr);
  838. in->seg.len = cpu_to_be64(length);
  839. in->seg.bsfs_octo_size = 0;
  840. in->seg.xlt_oct_size = cpu_to_be32(get_octo_len(virt_addr, length, 1 << page_shift));
  841. in->seg.log2_page_size = page_shift;
  842. in->seg.qpn_mkey7_0 = cpu_to_be32(0xffffff << 8);
  843. in->xlat_oct_act_size = cpu_to_be32(get_octo_len(virt_addr, length,
  844. 1 << page_shift));
  845. err = mlx5_core_create_mkey(dev->mdev, &mr->mmr, in, inlen, NULL,
  846. NULL, NULL);
  847. if (err) {
  848. mlx5_ib_warn(dev, "create mkey failed\n");
  849. goto err_2;
  850. }
  851. mr->umem = umem;
  852. mr->live = 1;
  853. kvfree(in);
  854. mlx5_ib_dbg(dev, "mkey = 0x%x\n", mr->mmr.key);
  855. return mr;
  856. err_2:
  857. kvfree(in);
  858. err_1:
  859. kfree(mr);
  860. return ERR_PTR(err);
  861. }
  862. struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
  863. u64 virt_addr, int access_flags,
  864. struct ib_udata *udata)
  865. {
  866. struct mlx5_ib_dev *dev = to_mdev(pd->device);
  867. struct mlx5_ib_mr *mr = NULL;
  868. struct ib_umem *umem;
  869. int page_shift;
  870. int npages;
  871. int ncont;
  872. int order;
  873. int err;
  874. mlx5_ib_dbg(dev, "start 0x%llx, virt_addr 0x%llx, length 0x%llx, access_flags 0x%x\n",
  875. start, virt_addr, length, access_flags);
  876. umem = ib_umem_get(pd->uobject->context, start, length, access_flags,
  877. 0);
  878. if (IS_ERR(umem)) {
  879. mlx5_ib_dbg(dev, "umem get failed (%ld)\n", PTR_ERR(umem));
  880. return (void *)umem;
  881. }
  882. mlx5_ib_cont_pages(umem, start, &npages, &page_shift, &ncont, &order);
  883. if (!npages) {
  884. mlx5_ib_warn(dev, "avoid zero region\n");
  885. err = -EINVAL;
  886. goto error;
  887. }
  888. mlx5_ib_dbg(dev, "npages %d, ncont %d, order %d, page_shift %d\n",
  889. npages, ncont, order, page_shift);
  890. if (use_umr(order)) {
  891. mr = reg_umr(pd, umem, virt_addr, length, ncont, page_shift,
  892. order, access_flags);
  893. if (PTR_ERR(mr) == -EAGAIN) {
  894. mlx5_ib_dbg(dev, "cache empty for order %d", order);
  895. mr = NULL;
  896. }
  897. } else if (access_flags & IB_ACCESS_ON_DEMAND) {
  898. err = -EINVAL;
  899. pr_err("Got MR registration for ODP MR > 512MB, not supported for Connect-IB");
  900. goto error;
  901. }
  902. if (!mr)
  903. mr = reg_create(pd, virt_addr, length, umem, ncont, page_shift,
  904. access_flags);
  905. if (IS_ERR(mr)) {
  906. err = PTR_ERR(mr);
  907. goto error;
  908. }
  909. mlx5_ib_dbg(dev, "mkey 0x%x\n", mr->mmr.key);
  910. mr->umem = umem;
  911. mr->npages = npages;
  912. atomic_add(npages, &dev->mdev->priv.reg_pages);
  913. mr->ibmr.lkey = mr->mmr.key;
  914. mr->ibmr.rkey = mr->mmr.key;
  915. #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
  916. if (umem->odp_data) {
  917. /*
  918. * This barrier prevents the compiler from moving the
  919. * setting of umem->odp_data->private to point to our
  920. * MR, before reg_umr finished, to ensure that the MR
  921. * initialization have finished before starting to
  922. * handle invalidations.
  923. */
  924. smp_wmb();
  925. mr->umem->odp_data->private = mr;
  926. /*
  927. * Make sure we will see the new
  928. * umem->odp_data->private value in the invalidation
  929. * routines, before we can get page faults on the
  930. * MR. Page faults can happen once we put the MR in
  931. * the tree, below this line. Without the barrier,
  932. * there can be a fault handling and an invalidation
  933. * before umem->odp_data->private == mr is visible to
  934. * the invalidation handler.
  935. */
  936. smp_wmb();
  937. }
  938. #endif
  939. return &mr->ibmr;
  940. error:
  941. /*
  942. * Destroy the umem *before* destroying the MR, to ensure we
  943. * will not have any in-flight notifiers when destroying the
  944. * MR.
  945. *
  946. * As the MR is completely invalid to begin with, and this
  947. * error path is only taken if we can't push the mr entry into
  948. * the pagefault tree, this is safe.
  949. */
  950. ib_umem_release(umem);
  951. /* Kill the MR, and return an error code. */
  952. clean_mr(mr);
  953. return ERR_PTR(err);
  954. }
  955. static int unreg_umr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
  956. {
  957. struct umr_common *umrc = &dev->umrc;
  958. struct mlx5_ib_umr_context umr_context;
  959. struct ib_send_wr wr, *bad;
  960. int err;
  961. memset(&wr, 0, sizeof(wr));
  962. wr.wr_id = (u64)(unsigned long)&umr_context;
  963. prep_umr_unreg_wqe(dev, &wr, mr->mmr.key);
  964. mlx5_ib_init_umr_context(&umr_context);
  965. down(&umrc->sem);
  966. err = ib_post_send(umrc->qp, &wr, &bad);
  967. if (err) {
  968. up(&umrc->sem);
  969. mlx5_ib_dbg(dev, "err %d\n", err);
  970. goto error;
  971. } else {
  972. wait_for_completion(&umr_context.done);
  973. up(&umrc->sem);
  974. }
  975. if (umr_context.status != IB_WC_SUCCESS) {
  976. mlx5_ib_warn(dev, "unreg umr failed\n");
  977. err = -EFAULT;
  978. goto error;
  979. }
  980. return 0;
  981. error:
  982. return err;
  983. }
  984. static int clean_mr(struct mlx5_ib_mr *mr)
  985. {
  986. struct mlx5_ib_dev *dev = to_mdev(mr->ibmr.device);
  987. int umred = mr->umred;
  988. int err;
  989. if (!umred) {
  990. err = destroy_mkey(dev, mr);
  991. if (err) {
  992. mlx5_ib_warn(dev, "failed to destroy mkey 0x%x (%d)\n",
  993. mr->mmr.key, err);
  994. return err;
  995. }
  996. } else {
  997. err = unreg_umr(dev, mr);
  998. if (err) {
  999. mlx5_ib_warn(dev, "failed unregister\n");
  1000. return err;
  1001. }
  1002. free_cached_mr(dev, mr);
  1003. }
  1004. if (!umred)
  1005. kfree(mr);
  1006. return 0;
  1007. }
  1008. int mlx5_ib_dereg_mr(struct ib_mr *ibmr)
  1009. {
  1010. struct mlx5_ib_dev *dev = to_mdev(ibmr->device);
  1011. struct mlx5_ib_mr *mr = to_mmr(ibmr);
  1012. int npages = mr->npages;
  1013. struct ib_umem *umem = mr->umem;
  1014. #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
  1015. if (umem && umem->odp_data) {
  1016. /* Prevent new page faults from succeeding */
  1017. mr->live = 0;
  1018. /* Wait for all running page-fault handlers to finish. */
  1019. synchronize_srcu(&dev->mr_srcu);
  1020. /* Destroy all page mappings */
  1021. mlx5_ib_invalidate_range(umem, ib_umem_start(umem),
  1022. ib_umem_end(umem));
  1023. /*
  1024. * We kill the umem before the MR for ODP,
  1025. * so that there will not be any invalidations in
  1026. * flight, looking at the *mr struct.
  1027. */
  1028. ib_umem_release(umem);
  1029. atomic_sub(npages, &dev->mdev->priv.reg_pages);
  1030. /* Avoid double-freeing the umem. */
  1031. umem = NULL;
  1032. }
  1033. #endif
  1034. clean_mr(mr);
  1035. if (umem) {
  1036. ib_umem_release(umem);
  1037. atomic_sub(npages, &dev->mdev->priv.reg_pages);
  1038. }
  1039. return 0;
  1040. }
  1041. struct ib_mr *mlx5_ib_create_mr(struct ib_pd *pd,
  1042. struct ib_mr_init_attr *mr_init_attr)
  1043. {
  1044. struct mlx5_ib_dev *dev = to_mdev(pd->device);
  1045. struct mlx5_create_mkey_mbox_in *in;
  1046. struct mlx5_ib_mr *mr;
  1047. int access_mode, err;
  1048. int ndescs = roundup(mr_init_attr->max_reg_descriptors, 4);
  1049. mr = kzalloc(sizeof(*mr), GFP_KERNEL);
  1050. if (!mr)
  1051. return ERR_PTR(-ENOMEM);
  1052. in = kzalloc(sizeof(*in), GFP_KERNEL);
  1053. if (!in) {
  1054. err = -ENOMEM;
  1055. goto err_free;
  1056. }
  1057. in->seg.status = MLX5_MKEY_STATUS_FREE;
  1058. in->seg.xlt_oct_size = cpu_to_be32(ndescs);
  1059. in->seg.qpn_mkey7_0 = cpu_to_be32(0xffffff << 8);
  1060. in->seg.flags_pd = cpu_to_be32(to_mpd(pd)->pdn);
  1061. access_mode = MLX5_ACCESS_MODE_MTT;
  1062. if (mr_init_attr->flags & IB_MR_SIGNATURE_EN) {
  1063. u32 psv_index[2];
  1064. in->seg.flags_pd = cpu_to_be32(be32_to_cpu(in->seg.flags_pd) |
  1065. MLX5_MKEY_BSF_EN);
  1066. in->seg.bsfs_octo_size = cpu_to_be32(MLX5_MKEY_BSF_OCTO_SIZE);
  1067. mr->sig = kzalloc(sizeof(*mr->sig), GFP_KERNEL);
  1068. if (!mr->sig) {
  1069. err = -ENOMEM;
  1070. goto err_free_in;
  1071. }
  1072. /* create mem & wire PSVs */
  1073. err = mlx5_core_create_psv(dev->mdev, to_mpd(pd)->pdn,
  1074. 2, psv_index);
  1075. if (err)
  1076. goto err_free_sig;
  1077. access_mode = MLX5_ACCESS_MODE_KLM;
  1078. mr->sig->psv_memory.psv_idx = psv_index[0];
  1079. mr->sig->psv_wire.psv_idx = psv_index[1];
  1080. mr->sig->sig_status_checked = true;
  1081. mr->sig->sig_err_exists = false;
  1082. /* Next UMR, Arm SIGERR */
  1083. ++mr->sig->sigerr_count;
  1084. }
  1085. in->seg.flags = MLX5_PERM_UMR_EN | access_mode;
  1086. err = mlx5_core_create_mkey(dev->mdev, &mr->mmr, in, sizeof(*in),
  1087. NULL, NULL, NULL);
  1088. if (err)
  1089. goto err_destroy_psv;
  1090. mr->ibmr.lkey = mr->mmr.key;
  1091. mr->ibmr.rkey = mr->mmr.key;
  1092. mr->umem = NULL;
  1093. kfree(in);
  1094. return &mr->ibmr;
  1095. err_destroy_psv:
  1096. if (mr->sig) {
  1097. if (mlx5_core_destroy_psv(dev->mdev,
  1098. mr->sig->psv_memory.psv_idx))
  1099. mlx5_ib_warn(dev, "failed to destroy mem psv %d\n",
  1100. mr->sig->psv_memory.psv_idx);
  1101. if (mlx5_core_destroy_psv(dev->mdev,
  1102. mr->sig->psv_wire.psv_idx))
  1103. mlx5_ib_warn(dev, "failed to destroy wire psv %d\n",
  1104. mr->sig->psv_wire.psv_idx);
  1105. }
  1106. err_free_sig:
  1107. kfree(mr->sig);
  1108. err_free_in:
  1109. kfree(in);
  1110. err_free:
  1111. kfree(mr);
  1112. return ERR_PTR(err);
  1113. }
  1114. int mlx5_ib_destroy_mr(struct ib_mr *ibmr)
  1115. {
  1116. struct mlx5_ib_dev *dev = to_mdev(ibmr->device);
  1117. struct mlx5_ib_mr *mr = to_mmr(ibmr);
  1118. int err;
  1119. if (mr->sig) {
  1120. if (mlx5_core_destroy_psv(dev->mdev,
  1121. mr->sig->psv_memory.psv_idx))
  1122. mlx5_ib_warn(dev, "failed to destroy mem psv %d\n",
  1123. mr->sig->psv_memory.psv_idx);
  1124. if (mlx5_core_destroy_psv(dev->mdev,
  1125. mr->sig->psv_wire.psv_idx))
  1126. mlx5_ib_warn(dev, "failed to destroy wire psv %d\n",
  1127. mr->sig->psv_wire.psv_idx);
  1128. kfree(mr->sig);
  1129. }
  1130. err = destroy_mkey(dev, mr);
  1131. if (err) {
  1132. mlx5_ib_warn(dev, "failed to destroy mkey 0x%x (%d)\n",
  1133. mr->mmr.key, err);
  1134. return err;
  1135. }
  1136. kfree(mr);
  1137. return err;
  1138. }
  1139. struct ib_mr *mlx5_ib_alloc_fast_reg_mr(struct ib_pd *pd,
  1140. int max_page_list_len)
  1141. {
  1142. struct mlx5_ib_dev *dev = to_mdev(pd->device);
  1143. struct mlx5_create_mkey_mbox_in *in;
  1144. struct mlx5_ib_mr *mr;
  1145. int err;
  1146. mr = kzalloc(sizeof(*mr), GFP_KERNEL);
  1147. if (!mr)
  1148. return ERR_PTR(-ENOMEM);
  1149. in = kzalloc(sizeof(*in), GFP_KERNEL);
  1150. if (!in) {
  1151. err = -ENOMEM;
  1152. goto err_free;
  1153. }
  1154. in->seg.status = MLX5_MKEY_STATUS_FREE;
  1155. in->seg.xlt_oct_size = cpu_to_be32((max_page_list_len + 1) / 2);
  1156. in->seg.qpn_mkey7_0 = cpu_to_be32(0xffffff << 8);
  1157. in->seg.flags = MLX5_PERM_UMR_EN | MLX5_ACCESS_MODE_MTT;
  1158. in->seg.flags_pd = cpu_to_be32(to_mpd(pd)->pdn);
  1159. /*
  1160. * TBD not needed - issue 197292 */
  1161. in->seg.log2_page_size = PAGE_SHIFT;
  1162. err = mlx5_core_create_mkey(dev->mdev, &mr->mmr, in, sizeof(*in), NULL,
  1163. NULL, NULL);
  1164. kfree(in);
  1165. if (err)
  1166. goto err_free;
  1167. mr->ibmr.lkey = mr->mmr.key;
  1168. mr->ibmr.rkey = mr->mmr.key;
  1169. mr->umem = NULL;
  1170. return &mr->ibmr;
  1171. err_free:
  1172. kfree(mr);
  1173. return ERR_PTR(err);
  1174. }
  1175. struct ib_fast_reg_page_list *mlx5_ib_alloc_fast_reg_page_list(struct ib_device *ibdev,
  1176. int page_list_len)
  1177. {
  1178. struct mlx5_ib_fast_reg_page_list *mfrpl;
  1179. int size = page_list_len * sizeof(u64);
  1180. mfrpl = kmalloc(sizeof(*mfrpl), GFP_KERNEL);
  1181. if (!mfrpl)
  1182. return ERR_PTR(-ENOMEM);
  1183. mfrpl->ibfrpl.page_list = kmalloc(size, GFP_KERNEL);
  1184. if (!mfrpl->ibfrpl.page_list)
  1185. goto err_free;
  1186. mfrpl->mapped_page_list = dma_alloc_coherent(ibdev->dma_device,
  1187. size, &mfrpl->map,
  1188. GFP_KERNEL);
  1189. if (!mfrpl->mapped_page_list)
  1190. goto err_free;
  1191. WARN_ON(mfrpl->map & 0x3f);
  1192. return &mfrpl->ibfrpl;
  1193. err_free:
  1194. kfree(mfrpl->ibfrpl.page_list);
  1195. kfree(mfrpl);
  1196. return ERR_PTR(-ENOMEM);
  1197. }
  1198. void mlx5_ib_free_fast_reg_page_list(struct ib_fast_reg_page_list *page_list)
  1199. {
  1200. struct mlx5_ib_fast_reg_page_list *mfrpl = to_mfrpl(page_list);
  1201. struct mlx5_ib_dev *dev = to_mdev(page_list->device);
  1202. int size = page_list->max_page_list_len * sizeof(u64);
  1203. dma_free_coherent(&dev->mdev->pdev->dev, size, mfrpl->mapped_page_list,
  1204. mfrpl->map);
  1205. kfree(mfrpl->ibfrpl.page_list);
  1206. kfree(mfrpl);
  1207. }
  1208. int mlx5_ib_check_mr_status(struct ib_mr *ibmr, u32 check_mask,
  1209. struct ib_mr_status *mr_status)
  1210. {
  1211. struct mlx5_ib_mr *mmr = to_mmr(ibmr);
  1212. int ret = 0;
  1213. if (check_mask & ~IB_MR_CHECK_SIG_STATUS) {
  1214. pr_err("Invalid status check mask\n");
  1215. ret = -EINVAL;
  1216. goto done;
  1217. }
  1218. mr_status->fail_status = 0;
  1219. if (check_mask & IB_MR_CHECK_SIG_STATUS) {
  1220. if (!mmr->sig) {
  1221. ret = -EINVAL;
  1222. pr_err("signature status check requested on a non-signature enabled MR\n");
  1223. goto done;
  1224. }
  1225. mmr->sig->sig_status_checked = true;
  1226. if (!mmr->sig->sig_err_exists)
  1227. goto done;
  1228. if (ibmr->lkey == mmr->sig->err_item.key)
  1229. memcpy(&mr_status->sig_err, &mmr->sig->err_item,
  1230. sizeof(mr_status->sig_err));
  1231. else {
  1232. mr_status->sig_err.err_type = IB_SIG_BAD_GUARD;
  1233. mr_status->sig_err.sig_err_offset = 0;
  1234. mr_status->sig_err.key = mmr->sig->err_item.key;
  1235. }
  1236. mmr->sig->sig_err_exists = false;
  1237. mr_status->fail_status |= IB_MR_CHECK_SIG_STATUS;
  1238. }
  1239. done:
  1240. return ret;
  1241. }