cache.c 31 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240
  1. /*
  2. * Copyright (c) 2004 Topspin Communications. All rights reserved.
  3. * Copyright (c) 2005 Intel Corporation. All rights reserved.
  4. * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
  5. * Copyright (c) 2005 Voltaire, Inc. All rights reserved.
  6. *
  7. * This software is available to you under a choice of one of two
  8. * licenses. You may choose to be licensed under the terms of the GNU
  9. * General Public License (GPL) Version 2, available from the file
  10. * COPYING in the main directory of this source tree, or the
  11. * OpenIB.org BSD license below:
  12. *
  13. * Redistribution and use in source and binary forms, with or
  14. * without modification, are permitted provided that the following
  15. * conditions are met:
  16. *
  17. * - Redistributions of source code must retain the above
  18. * copyright notice, this list of conditions and the following
  19. * disclaimer.
  20. *
  21. * - Redistributions in binary form must reproduce the above
  22. * copyright notice, this list of conditions and the following
  23. * disclaimer in the documentation and/or other materials
  24. * provided with the distribution.
  25. *
  26. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  27. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  28. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  29. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  30. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  31. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  32. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  33. * SOFTWARE.
  34. */
  35. #include <linux/module.h>
  36. #include <linux/errno.h>
  37. #include <linux/slab.h>
  38. #include <linux/workqueue.h>
  39. #include <linux/netdevice.h>
  40. #include <net/addrconf.h>
  41. #include <rdma/ib_cache.h>
  42. #include "core_priv.h"
  43. struct ib_pkey_cache {
  44. int table_len;
  45. u16 table[0];
  46. };
  47. struct ib_update_work {
  48. struct work_struct work;
  49. struct ib_device *device;
  50. u8 port_num;
  51. };
  52. union ib_gid zgid;
  53. EXPORT_SYMBOL(zgid);
  54. static const struct ib_gid_attr zattr;
  55. enum gid_attr_find_mask {
  56. GID_ATTR_FIND_MASK_GID = 1UL << 0,
  57. GID_ATTR_FIND_MASK_NETDEV = 1UL << 1,
  58. GID_ATTR_FIND_MASK_DEFAULT = 1UL << 2,
  59. GID_ATTR_FIND_MASK_GID_TYPE = 1UL << 3,
  60. };
  61. enum gid_table_entry_props {
  62. GID_TABLE_ENTRY_INVALID = 1UL << 0,
  63. GID_TABLE_ENTRY_DEFAULT = 1UL << 1,
  64. };
  65. enum gid_table_write_action {
  66. GID_TABLE_WRITE_ACTION_ADD,
  67. GID_TABLE_WRITE_ACTION_DEL,
  68. /* MODIFY only updates the GID table. Currently only used by
  69. * ib_cache_update.
  70. */
  71. GID_TABLE_WRITE_ACTION_MODIFY
  72. };
  73. struct ib_gid_table_entry {
  74. unsigned long props;
  75. union ib_gid gid;
  76. struct ib_gid_attr attr;
  77. void *context;
  78. };
  79. struct ib_gid_table {
  80. int sz;
  81. /* In RoCE, adding a GID to the table requires:
  82. * (a) Find if this GID is already exists.
  83. * (b) Find a free space.
  84. * (c) Write the new GID
  85. *
  86. * Delete requires different set of operations:
  87. * (a) Find the GID
  88. * (b) Delete it.
  89. *
  90. * Add/delete should be carried out atomically.
  91. * This is done by locking this mutex from multiple
  92. * writers. We don't need this lock for IB, as the MAD
  93. * layer replaces all entries. All data_vec entries
  94. * are locked by this lock.
  95. **/
  96. struct mutex lock;
  97. /* This lock protects the table entries from being
  98. * read and written simultaneously.
  99. */
  100. rwlock_t rwlock;
  101. struct ib_gid_table_entry *data_vec;
  102. };
  103. static void dispatch_gid_change_event(struct ib_device *ib_dev, u8 port)
  104. {
  105. if (rdma_cap_roce_gid_table(ib_dev, port)) {
  106. struct ib_event event;
  107. event.device = ib_dev;
  108. event.element.port_num = port;
  109. event.event = IB_EVENT_GID_CHANGE;
  110. ib_dispatch_event(&event);
  111. }
  112. }
  113. static const char * const gid_type_str[] = {
  114. [IB_GID_TYPE_IB] = "IB/RoCE v1",
  115. [IB_GID_TYPE_ROCE_UDP_ENCAP] = "RoCE v2",
  116. };
  117. const char *ib_cache_gid_type_str(enum ib_gid_type gid_type)
  118. {
  119. if (gid_type < ARRAY_SIZE(gid_type_str) && gid_type_str[gid_type])
  120. return gid_type_str[gid_type];
  121. return "Invalid GID type";
  122. }
  123. EXPORT_SYMBOL(ib_cache_gid_type_str);
  124. int ib_cache_gid_parse_type_str(const char *buf)
  125. {
  126. unsigned int i;
  127. size_t len;
  128. int err = -EINVAL;
  129. len = strlen(buf);
  130. if (len == 0)
  131. return -EINVAL;
  132. if (buf[len - 1] == '\n')
  133. len--;
  134. for (i = 0; i < ARRAY_SIZE(gid_type_str); ++i)
  135. if (gid_type_str[i] && !strncmp(buf, gid_type_str[i], len) &&
  136. len == strlen(gid_type_str[i])) {
  137. err = i;
  138. break;
  139. }
  140. return err;
  141. }
  142. EXPORT_SYMBOL(ib_cache_gid_parse_type_str);
  143. /* This function expects that rwlock will be write locked in all
  144. * scenarios and that lock will be locked in sleep-able (RoCE)
  145. * scenarios.
  146. */
  147. static int write_gid(struct ib_device *ib_dev, u8 port,
  148. struct ib_gid_table *table, int ix,
  149. const union ib_gid *gid,
  150. const struct ib_gid_attr *attr,
  151. enum gid_table_write_action action,
  152. bool default_gid)
  153. __releases(&table->rwlock) __acquires(&table->rwlock)
  154. {
  155. int ret = 0;
  156. struct net_device *old_net_dev;
  157. enum ib_gid_type old_gid_type;
  158. /* in rdma_cap_roce_gid_table, this funciton should be protected by a
  159. * sleep-able lock.
  160. */
  161. if (rdma_cap_roce_gid_table(ib_dev, port)) {
  162. table->data_vec[ix].props |= GID_TABLE_ENTRY_INVALID;
  163. write_unlock_irq(&table->rwlock);
  164. /* GID_TABLE_WRITE_ACTION_MODIFY currently isn't supported by
  165. * RoCE providers and thus only updates the cache.
  166. */
  167. if (action == GID_TABLE_WRITE_ACTION_ADD)
  168. ret = ib_dev->add_gid(ib_dev, port, ix, gid, attr,
  169. &table->data_vec[ix].context);
  170. else if (action == GID_TABLE_WRITE_ACTION_DEL)
  171. ret = ib_dev->del_gid(ib_dev, port, ix,
  172. &table->data_vec[ix].context);
  173. write_lock_irq(&table->rwlock);
  174. }
  175. old_net_dev = table->data_vec[ix].attr.ndev;
  176. old_gid_type = table->data_vec[ix].attr.gid_type;
  177. if (old_net_dev && old_net_dev != attr->ndev)
  178. dev_put(old_net_dev);
  179. /* if modify_gid failed, just delete the old gid */
  180. if (ret || action == GID_TABLE_WRITE_ACTION_DEL) {
  181. gid = &zgid;
  182. attr = &zattr;
  183. table->data_vec[ix].context = NULL;
  184. }
  185. memcpy(&table->data_vec[ix].gid, gid, sizeof(*gid));
  186. memcpy(&table->data_vec[ix].attr, attr, sizeof(*attr));
  187. if (default_gid) {
  188. table->data_vec[ix].props |= GID_TABLE_ENTRY_DEFAULT;
  189. if (action == GID_TABLE_WRITE_ACTION_DEL)
  190. table->data_vec[ix].attr.gid_type = old_gid_type;
  191. }
  192. if (table->data_vec[ix].attr.ndev &&
  193. table->data_vec[ix].attr.ndev != old_net_dev)
  194. dev_hold(table->data_vec[ix].attr.ndev);
  195. table->data_vec[ix].props &= ~GID_TABLE_ENTRY_INVALID;
  196. return ret;
  197. }
  198. static int add_gid(struct ib_device *ib_dev, u8 port,
  199. struct ib_gid_table *table, int ix,
  200. const union ib_gid *gid,
  201. const struct ib_gid_attr *attr,
  202. bool default_gid) {
  203. return write_gid(ib_dev, port, table, ix, gid, attr,
  204. GID_TABLE_WRITE_ACTION_ADD, default_gid);
  205. }
  206. static int modify_gid(struct ib_device *ib_dev, u8 port,
  207. struct ib_gid_table *table, int ix,
  208. const union ib_gid *gid,
  209. const struct ib_gid_attr *attr,
  210. bool default_gid) {
  211. return write_gid(ib_dev, port, table, ix, gid, attr,
  212. GID_TABLE_WRITE_ACTION_MODIFY, default_gid);
  213. }
  214. static int del_gid(struct ib_device *ib_dev, u8 port,
  215. struct ib_gid_table *table, int ix,
  216. bool default_gid) {
  217. return write_gid(ib_dev, port, table, ix, &zgid, &zattr,
  218. GID_TABLE_WRITE_ACTION_DEL, default_gid);
  219. }
  220. /* rwlock should be read locked */
  221. static int find_gid(struct ib_gid_table *table, const union ib_gid *gid,
  222. const struct ib_gid_attr *val, bool default_gid,
  223. unsigned long mask, int *pempty)
  224. {
  225. int i = 0;
  226. int found = -1;
  227. int empty = pempty ? -1 : 0;
  228. while (i < table->sz && (found < 0 || empty < 0)) {
  229. struct ib_gid_table_entry *data = &table->data_vec[i];
  230. struct ib_gid_attr *attr = &data->attr;
  231. int curr_index = i;
  232. i++;
  233. if (data->props & GID_TABLE_ENTRY_INVALID)
  234. continue;
  235. if (empty < 0)
  236. if (!memcmp(&data->gid, &zgid, sizeof(*gid)) &&
  237. !memcmp(attr, &zattr, sizeof(*attr)) &&
  238. !data->props)
  239. empty = curr_index;
  240. if (found >= 0)
  241. continue;
  242. if (mask & GID_ATTR_FIND_MASK_GID_TYPE &&
  243. attr->gid_type != val->gid_type)
  244. continue;
  245. if (mask & GID_ATTR_FIND_MASK_GID &&
  246. memcmp(gid, &data->gid, sizeof(*gid)))
  247. continue;
  248. if (mask & GID_ATTR_FIND_MASK_NETDEV &&
  249. attr->ndev != val->ndev)
  250. continue;
  251. if (mask & GID_ATTR_FIND_MASK_DEFAULT &&
  252. !!(data->props & GID_TABLE_ENTRY_DEFAULT) !=
  253. default_gid)
  254. continue;
  255. found = curr_index;
  256. }
  257. if (pempty)
  258. *pempty = empty;
  259. return found;
  260. }
  261. static void make_default_gid(struct net_device *dev, union ib_gid *gid)
  262. {
  263. gid->global.subnet_prefix = cpu_to_be64(0xfe80000000000000LL);
  264. addrconf_ifid_eui48(&gid->raw[8], dev);
  265. }
  266. int ib_cache_gid_add(struct ib_device *ib_dev, u8 port,
  267. union ib_gid *gid, struct ib_gid_attr *attr)
  268. {
  269. struct ib_gid_table **ports_table = ib_dev->cache.gid_cache;
  270. struct ib_gid_table *table;
  271. int ix;
  272. int ret = 0;
  273. struct net_device *idev;
  274. int empty;
  275. table = ports_table[port - rdma_start_port(ib_dev)];
  276. if (!memcmp(gid, &zgid, sizeof(*gid)))
  277. return -EINVAL;
  278. if (ib_dev->get_netdev) {
  279. idev = ib_dev->get_netdev(ib_dev, port);
  280. if (idev && attr->ndev != idev) {
  281. union ib_gid default_gid;
  282. /* Adding default GIDs in not permitted */
  283. make_default_gid(idev, &default_gid);
  284. if (!memcmp(gid, &default_gid, sizeof(*gid))) {
  285. dev_put(idev);
  286. return -EPERM;
  287. }
  288. }
  289. if (idev)
  290. dev_put(idev);
  291. }
  292. mutex_lock(&table->lock);
  293. write_lock_irq(&table->rwlock);
  294. ix = find_gid(table, gid, attr, false, GID_ATTR_FIND_MASK_GID |
  295. GID_ATTR_FIND_MASK_GID_TYPE |
  296. GID_ATTR_FIND_MASK_NETDEV, &empty);
  297. if (ix >= 0)
  298. goto out_unlock;
  299. if (empty < 0) {
  300. ret = -ENOSPC;
  301. goto out_unlock;
  302. }
  303. ret = add_gid(ib_dev, port, table, empty, gid, attr, false);
  304. if (!ret)
  305. dispatch_gid_change_event(ib_dev, port);
  306. out_unlock:
  307. write_unlock_irq(&table->rwlock);
  308. mutex_unlock(&table->lock);
  309. return ret;
  310. }
  311. int ib_cache_gid_del(struct ib_device *ib_dev, u8 port,
  312. union ib_gid *gid, struct ib_gid_attr *attr)
  313. {
  314. struct ib_gid_table **ports_table = ib_dev->cache.gid_cache;
  315. struct ib_gid_table *table;
  316. int ix;
  317. table = ports_table[port - rdma_start_port(ib_dev)];
  318. mutex_lock(&table->lock);
  319. write_lock_irq(&table->rwlock);
  320. ix = find_gid(table, gid, attr, false,
  321. GID_ATTR_FIND_MASK_GID |
  322. GID_ATTR_FIND_MASK_GID_TYPE |
  323. GID_ATTR_FIND_MASK_NETDEV |
  324. GID_ATTR_FIND_MASK_DEFAULT,
  325. NULL);
  326. if (ix < 0)
  327. goto out_unlock;
  328. if (!del_gid(ib_dev, port, table, ix, false))
  329. dispatch_gid_change_event(ib_dev, port);
  330. out_unlock:
  331. write_unlock_irq(&table->rwlock);
  332. mutex_unlock(&table->lock);
  333. return 0;
  334. }
  335. int ib_cache_gid_del_all_netdev_gids(struct ib_device *ib_dev, u8 port,
  336. struct net_device *ndev)
  337. {
  338. struct ib_gid_table **ports_table = ib_dev->cache.gid_cache;
  339. struct ib_gid_table *table;
  340. int ix;
  341. bool deleted = false;
  342. table = ports_table[port - rdma_start_port(ib_dev)];
  343. mutex_lock(&table->lock);
  344. write_lock_irq(&table->rwlock);
  345. for (ix = 0; ix < table->sz; ix++)
  346. if (table->data_vec[ix].attr.ndev == ndev)
  347. if (!del_gid(ib_dev, port, table, ix,
  348. !!(table->data_vec[ix].props &
  349. GID_TABLE_ENTRY_DEFAULT)))
  350. deleted = true;
  351. write_unlock_irq(&table->rwlock);
  352. mutex_unlock(&table->lock);
  353. if (deleted)
  354. dispatch_gid_change_event(ib_dev, port);
  355. return 0;
  356. }
  357. static int __ib_cache_gid_get(struct ib_device *ib_dev, u8 port, int index,
  358. union ib_gid *gid, struct ib_gid_attr *attr)
  359. {
  360. struct ib_gid_table **ports_table = ib_dev->cache.gid_cache;
  361. struct ib_gid_table *table;
  362. table = ports_table[port - rdma_start_port(ib_dev)];
  363. if (index < 0 || index >= table->sz)
  364. return -EINVAL;
  365. if (table->data_vec[index].props & GID_TABLE_ENTRY_INVALID)
  366. return -EAGAIN;
  367. memcpy(gid, &table->data_vec[index].gid, sizeof(*gid));
  368. if (attr) {
  369. memcpy(attr, &table->data_vec[index].attr, sizeof(*attr));
  370. if (attr->ndev)
  371. dev_hold(attr->ndev);
  372. }
  373. return 0;
  374. }
  375. static int _ib_cache_gid_table_find(struct ib_device *ib_dev,
  376. const union ib_gid *gid,
  377. const struct ib_gid_attr *val,
  378. unsigned long mask,
  379. u8 *port, u16 *index)
  380. {
  381. struct ib_gid_table **ports_table = ib_dev->cache.gid_cache;
  382. struct ib_gid_table *table;
  383. u8 p;
  384. int local_index;
  385. unsigned long flags;
  386. for (p = 0; p < ib_dev->phys_port_cnt; p++) {
  387. table = ports_table[p];
  388. read_lock_irqsave(&table->rwlock, flags);
  389. local_index = find_gid(table, gid, val, false, mask, NULL);
  390. if (local_index >= 0) {
  391. if (index)
  392. *index = local_index;
  393. if (port)
  394. *port = p + rdma_start_port(ib_dev);
  395. read_unlock_irqrestore(&table->rwlock, flags);
  396. return 0;
  397. }
  398. read_unlock_irqrestore(&table->rwlock, flags);
  399. }
  400. return -ENOENT;
  401. }
  402. static int ib_cache_gid_find(struct ib_device *ib_dev,
  403. const union ib_gid *gid,
  404. enum ib_gid_type gid_type,
  405. struct net_device *ndev, u8 *port,
  406. u16 *index)
  407. {
  408. unsigned long mask = GID_ATTR_FIND_MASK_GID |
  409. GID_ATTR_FIND_MASK_GID_TYPE;
  410. struct ib_gid_attr gid_attr_val = {.ndev = ndev, .gid_type = gid_type};
  411. if (ndev)
  412. mask |= GID_ATTR_FIND_MASK_NETDEV;
  413. return _ib_cache_gid_table_find(ib_dev, gid, &gid_attr_val,
  414. mask, port, index);
  415. }
  416. int ib_find_cached_gid_by_port(struct ib_device *ib_dev,
  417. const union ib_gid *gid,
  418. enum ib_gid_type gid_type,
  419. u8 port, struct net_device *ndev,
  420. u16 *index)
  421. {
  422. int local_index;
  423. struct ib_gid_table **ports_table = ib_dev->cache.gid_cache;
  424. struct ib_gid_table *table;
  425. unsigned long mask = GID_ATTR_FIND_MASK_GID |
  426. GID_ATTR_FIND_MASK_GID_TYPE;
  427. struct ib_gid_attr val = {.ndev = ndev, .gid_type = gid_type};
  428. unsigned long flags;
  429. if (port < rdma_start_port(ib_dev) ||
  430. port > rdma_end_port(ib_dev))
  431. return -ENOENT;
  432. table = ports_table[port - rdma_start_port(ib_dev)];
  433. if (ndev)
  434. mask |= GID_ATTR_FIND_MASK_NETDEV;
  435. read_lock_irqsave(&table->rwlock, flags);
  436. local_index = find_gid(table, gid, &val, false, mask, NULL);
  437. if (local_index >= 0) {
  438. if (index)
  439. *index = local_index;
  440. read_unlock_irqrestore(&table->rwlock, flags);
  441. return 0;
  442. }
  443. read_unlock_irqrestore(&table->rwlock, flags);
  444. return -ENOENT;
  445. }
  446. EXPORT_SYMBOL(ib_find_cached_gid_by_port);
  447. /**
  448. * ib_find_gid_by_filter - Returns the GID table index where a specified
  449. * GID value occurs
  450. * @device: The device to query.
  451. * @gid: The GID value to search for.
  452. * @port_num: The port number of the device where the GID value could be
  453. * searched.
  454. * @filter: The filter function is executed on any matching GID in the table.
  455. * If the filter function returns true, the corresponding index is returned,
  456. * otherwise, we continue searching the GID table. It's guaranteed that
  457. * while filter is executed, ndev field is valid and the structure won't
  458. * change. filter is executed in an atomic context. filter must not be NULL.
  459. * @index: The index into the cached GID table where the GID was found. This
  460. * parameter may be NULL.
  461. *
  462. * ib_cache_gid_find_by_filter() searches for the specified GID value
  463. * of which the filter function returns true in the port's GID table.
  464. * This function is only supported on RoCE ports.
  465. *
  466. */
  467. static int ib_cache_gid_find_by_filter(struct ib_device *ib_dev,
  468. const union ib_gid *gid,
  469. u8 port,
  470. bool (*filter)(const union ib_gid *,
  471. const struct ib_gid_attr *,
  472. void *),
  473. void *context,
  474. u16 *index)
  475. {
  476. struct ib_gid_table **ports_table = ib_dev->cache.gid_cache;
  477. struct ib_gid_table *table;
  478. unsigned int i;
  479. unsigned long flags;
  480. bool found = false;
  481. if (!ports_table)
  482. return -EOPNOTSUPP;
  483. if (port < rdma_start_port(ib_dev) ||
  484. port > rdma_end_port(ib_dev) ||
  485. !rdma_protocol_roce(ib_dev, port))
  486. return -EPROTONOSUPPORT;
  487. table = ports_table[port - rdma_start_port(ib_dev)];
  488. read_lock_irqsave(&table->rwlock, flags);
  489. for (i = 0; i < table->sz; i++) {
  490. struct ib_gid_attr attr;
  491. if (table->data_vec[i].props & GID_TABLE_ENTRY_INVALID)
  492. goto next;
  493. if (memcmp(gid, &table->data_vec[i].gid, sizeof(*gid)))
  494. goto next;
  495. memcpy(&attr, &table->data_vec[i].attr, sizeof(attr));
  496. if (filter(gid, &attr, context))
  497. found = true;
  498. next:
  499. if (found)
  500. break;
  501. }
  502. read_unlock_irqrestore(&table->rwlock, flags);
  503. if (!found)
  504. return -ENOENT;
  505. if (index)
  506. *index = i;
  507. return 0;
  508. }
  509. static struct ib_gid_table *alloc_gid_table(int sz)
  510. {
  511. struct ib_gid_table *table =
  512. kzalloc(sizeof(struct ib_gid_table), GFP_KERNEL);
  513. if (!table)
  514. return NULL;
  515. table->data_vec = kcalloc(sz, sizeof(*table->data_vec), GFP_KERNEL);
  516. if (!table->data_vec)
  517. goto err_free_table;
  518. mutex_init(&table->lock);
  519. table->sz = sz;
  520. rwlock_init(&table->rwlock);
  521. return table;
  522. err_free_table:
  523. kfree(table);
  524. return NULL;
  525. }
  526. static void release_gid_table(struct ib_gid_table *table)
  527. {
  528. if (table) {
  529. kfree(table->data_vec);
  530. kfree(table);
  531. }
  532. }
  533. static void cleanup_gid_table_port(struct ib_device *ib_dev, u8 port,
  534. struct ib_gid_table *table)
  535. {
  536. int i;
  537. bool deleted = false;
  538. if (!table)
  539. return;
  540. write_lock_irq(&table->rwlock);
  541. for (i = 0; i < table->sz; ++i) {
  542. if (memcmp(&table->data_vec[i].gid, &zgid,
  543. sizeof(table->data_vec[i].gid)))
  544. if (!del_gid(ib_dev, port, table, i,
  545. table->data_vec[i].props &
  546. GID_ATTR_FIND_MASK_DEFAULT))
  547. deleted = true;
  548. }
  549. write_unlock_irq(&table->rwlock);
  550. if (deleted)
  551. dispatch_gid_change_event(ib_dev, port);
  552. }
  553. void ib_cache_gid_set_default_gid(struct ib_device *ib_dev, u8 port,
  554. struct net_device *ndev,
  555. unsigned long gid_type_mask,
  556. enum ib_cache_gid_default_mode mode)
  557. {
  558. struct ib_gid_table **ports_table = ib_dev->cache.gid_cache;
  559. union ib_gid gid;
  560. struct ib_gid_attr gid_attr;
  561. struct ib_gid_attr zattr_type = zattr;
  562. struct ib_gid_table *table;
  563. unsigned int gid_type;
  564. table = ports_table[port - rdma_start_port(ib_dev)];
  565. make_default_gid(ndev, &gid);
  566. memset(&gid_attr, 0, sizeof(gid_attr));
  567. gid_attr.ndev = ndev;
  568. for (gid_type = 0; gid_type < IB_GID_TYPE_SIZE; ++gid_type) {
  569. int ix;
  570. union ib_gid current_gid;
  571. struct ib_gid_attr current_gid_attr = {};
  572. if (1UL << gid_type & ~gid_type_mask)
  573. continue;
  574. gid_attr.gid_type = gid_type;
  575. mutex_lock(&table->lock);
  576. write_lock_irq(&table->rwlock);
  577. ix = find_gid(table, NULL, &gid_attr, true,
  578. GID_ATTR_FIND_MASK_GID_TYPE |
  579. GID_ATTR_FIND_MASK_DEFAULT,
  580. NULL);
  581. /* Coudn't find default GID location */
  582. if (WARN_ON(ix < 0))
  583. goto release;
  584. zattr_type.gid_type = gid_type;
  585. if (!__ib_cache_gid_get(ib_dev, port, ix,
  586. &current_gid, &current_gid_attr) &&
  587. mode == IB_CACHE_GID_DEFAULT_MODE_SET &&
  588. !memcmp(&gid, &current_gid, sizeof(gid)) &&
  589. !memcmp(&gid_attr, &current_gid_attr, sizeof(gid_attr)))
  590. goto release;
  591. if (memcmp(&current_gid, &zgid, sizeof(current_gid)) ||
  592. memcmp(&current_gid_attr, &zattr_type,
  593. sizeof(current_gid_attr))) {
  594. if (del_gid(ib_dev, port, table, ix, true)) {
  595. pr_warn("ib_cache_gid: can't delete index %d for default gid %pI6\n",
  596. ix, gid.raw);
  597. goto release;
  598. } else {
  599. dispatch_gid_change_event(ib_dev, port);
  600. }
  601. }
  602. if (mode == IB_CACHE_GID_DEFAULT_MODE_SET) {
  603. if (add_gid(ib_dev, port, table, ix, &gid, &gid_attr, true))
  604. pr_warn("ib_cache_gid: unable to add default gid %pI6\n",
  605. gid.raw);
  606. else
  607. dispatch_gid_change_event(ib_dev, port);
  608. }
  609. release:
  610. if (current_gid_attr.ndev)
  611. dev_put(current_gid_attr.ndev);
  612. write_unlock_irq(&table->rwlock);
  613. mutex_unlock(&table->lock);
  614. }
  615. }
  616. static int gid_table_reserve_default(struct ib_device *ib_dev, u8 port,
  617. struct ib_gid_table *table)
  618. {
  619. unsigned int i;
  620. unsigned long roce_gid_type_mask;
  621. unsigned int num_default_gids;
  622. unsigned int current_gid = 0;
  623. roce_gid_type_mask = roce_gid_type_mask_support(ib_dev, port);
  624. num_default_gids = hweight_long(roce_gid_type_mask);
  625. for (i = 0; i < num_default_gids && i < table->sz; i++) {
  626. struct ib_gid_table_entry *entry =
  627. &table->data_vec[i];
  628. entry->props |= GID_TABLE_ENTRY_DEFAULT;
  629. current_gid = find_next_bit(&roce_gid_type_mask,
  630. BITS_PER_LONG,
  631. current_gid);
  632. entry->attr.gid_type = current_gid++;
  633. }
  634. return 0;
  635. }
  636. static int _gid_table_setup_one(struct ib_device *ib_dev)
  637. {
  638. u8 port;
  639. struct ib_gid_table **table;
  640. int err = 0;
  641. table = kcalloc(ib_dev->phys_port_cnt, sizeof(*table), GFP_KERNEL);
  642. if (!table) {
  643. pr_warn("failed to allocate ib gid cache for %s\n",
  644. ib_dev->name);
  645. return -ENOMEM;
  646. }
  647. for (port = 0; port < ib_dev->phys_port_cnt; port++) {
  648. u8 rdma_port = port + rdma_start_port(ib_dev);
  649. table[port] =
  650. alloc_gid_table(
  651. ib_dev->port_immutable[rdma_port].gid_tbl_len);
  652. if (!table[port]) {
  653. err = -ENOMEM;
  654. goto rollback_table_setup;
  655. }
  656. err = gid_table_reserve_default(ib_dev,
  657. port + rdma_start_port(ib_dev),
  658. table[port]);
  659. if (err)
  660. goto rollback_table_setup;
  661. }
  662. ib_dev->cache.gid_cache = table;
  663. return 0;
  664. rollback_table_setup:
  665. for (port = 0; port < ib_dev->phys_port_cnt; port++) {
  666. cleanup_gid_table_port(ib_dev, port + rdma_start_port(ib_dev),
  667. table[port]);
  668. release_gid_table(table[port]);
  669. }
  670. kfree(table);
  671. return err;
  672. }
  673. static void gid_table_release_one(struct ib_device *ib_dev)
  674. {
  675. struct ib_gid_table **table = ib_dev->cache.gid_cache;
  676. u8 port;
  677. if (!table)
  678. return;
  679. for (port = 0; port < ib_dev->phys_port_cnt; port++)
  680. release_gid_table(table[port]);
  681. kfree(table);
  682. ib_dev->cache.gid_cache = NULL;
  683. }
  684. static void gid_table_cleanup_one(struct ib_device *ib_dev)
  685. {
  686. struct ib_gid_table **table = ib_dev->cache.gid_cache;
  687. u8 port;
  688. if (!table)
  689. return;
  690. for (port = 0; port < ib_dev->phys_port_cnt; port++)
  691. cleanup_gid_table_port(ib_dev, port + rdma_start_port(ib_dev),
  692. table[port]);
  693. }
  694. static int gid_table_setup_one(struct ib_device *ib_dev)
  695. {
  696. int err;
  697. err = _gid_table_setup_one(ib_dev);
  698. if (err)
  699. return err;
  700. err = roce_rescan_device(ib_dev);
  701. if (err) {
  702. gid_table_cleanup_one(ib_dev);
  703. gid_table_release_one(ib_dev);
  704. }
  705. return err;
  706. }
  707. int ib_get_cached_gid(struct ib_device *device,
  708. u8 port_num,
  709. int index,
  710. union ib_gid *gid,
  711. struct ib_gid_attr *gid_attr)
  712. {
  713. int res;
  714. unsigned long flags;
  715. struct ib_gid_table **ports_table = device->cache.gid_cache;
  716. struct ib_gid_table *table = ports_table[port_num - rdma_start_port(device)];
  717. if (port_num < rdma_start_port(device) || port_num > rdma_end_port(device))
  718. return -EINVAL;
  719. read_lock_irqsave(&table->rwlock, flags);
  720. res = __ib_cache_gid_get(device, port_num, index, gid, gid_attr);
  721. read_unlock_irqrestore(&table->rwlock, flags);
  722. return res;
  723. }
  724. EXPORT_SYMBOL(ib_get_cached_gid);
  725. int ib_find_cached_gid(struct ib_device *device,
  726. const union ib_gid *gid,
  727. enum ib_gid_type gid_type,
  728. struct net_device *ndev,
  729. u8 *port_num,
  730. u16 *index)
  731. {
  732. return ib_cache_gid_find(device, gid, gid_type, ndev, port_num, index);
  733. }
  734. EXPORT_SYMBOL(ib_find_cached_gid);
  735. int ib_find_gid_by_filter(struct ib_device *device,
  736. const union ib_gid *gid,
  737. u8 port_num,
  738. bool (*filter)(const union ib_gid *gid,
  739. const struct ib_gid_attr *,
  740. void *),
  741. void *context, u16 *index)
  742. {
  743. /* Only RoCE GID table supports filter function */
  744. if (!rdma_cap_roce_gid_table(device, port_num) && filter)
  745. return -EPROTONOSUPPORT;
  746. return ib_cache_gid_find_by_filter(device, gid,
  747. port_num, filter,
  748. context, index);
  749. }
  750. EXPORT_SYMBOL(ib_find_gid_by_filter);
  751. int ib_get_cached_pkey(struct ib_device *device,
  752. u8 port_num,
  753. int index,
  754. u16 *pkey)
  755. {
  756. struct ib_pkey_cache *cache;
  757. unsigned long flags;
  758. int ret = 0;
  759. if (port_num < rdma_start_port(device) || port_num > rdma_end_port(device))
  760. return -EINVAL;
  761. read_lock_irqsave(&device->cache.lock, flags);
  762. cache = device->cache.pkey_cache[port_num - rdma_start_port(device)];
  763. if (index < 0 || index >= cache->table_len)
  764. ret = -EINVAL;
  765. else
  766. *pkey = cache->table[index];
  767. read_unlock_irqrestore(&device->cache.lock, flags);
  768. return ret;
  769. }
  770. EXPORT_SYMBOL(ib_get_cached_pkey);
  771. int ib_find_cached_pkey(struct ib_device *device,
  772. u8 port_num,
  773. u16 pkey,
  774. u16 *index)
  775. {
  776. struct ib_pkey_cache *cache;
  777. unsigned long flags;
  778. int i;
  779. int ret = -ENOENT;
  780. int partial_ix = -1;
  781. if (port_num < rdma_start_port(device) || port_num > rdma_end_port(device))
  782. return -EINVAL;
  783. read_lock_irqsave(&device->cache.lock, flags);
  784. cache = device->cache.pkey_cache[port_num - rdma_start_port(device)];
  785. *index = -1;
  786. for (i = 0; i < cache->table_len; ++i)
  787. if ((cache->table[i] & 0x7fff) == (pkey & 0x7fff)) {
  788. if (cache->table[i] & 0x8000) {
  789. *index = i;
  790. ret = 0;
  791. break;
  792. } else
  793. partial_ix = i;
  794. }
  795. if (ret && partial_ix >= 0) {
  796. *index = partial_ix;
  797. ret = 0;
  798. }
  799. read_unlock_irqrestore(&device->cache.lock, flags);
  800. return ret;
  801. }
  802. EXPORT_SYMBOL(ib_find_cached_pkey);
  803. int ib_find_exact_cached_pkey(struct ib_device *device,
  804. u8 port_num,
  805. u16 pkey,
  806. u16 *index)
  807. {
  808. struct ib_pkey_cache *cache;
  809. unsigned long flags;
  810. int i;
  811. int ret = -ENOENT;
  812. if (port_num < rdma_start_port(device) || port_num > rdma_end_port(device))
  813. return -EINVAL;
  814. read_lock_irqsave(&device->cache.lock, flags);
  815. cache = device->cache.pkey_cache[port_num - rdma_start_port(device)];
  816. *index = -1;
  817. for (i = 0; i < cache->table_len; ++i)
  818. if (cache->table[i] == pkey) {
  819. *index = i;
  820. ret = 0;
  821. break;
  822. }
  823. read_unlock_irqrestore(&device->cache.lock, flags);
  824. return ret;
  825. }
  826. EXPORT_SYMBOL(ib_find_exact_cached_pkey);
  827. int ib_get_cached_lmc(struct ib_device *device,
  828. u8 port_num,
  829. u8 *lmc)
  830. {
  831. unsigned long flags;
  832. int ret = 0;
  833. if (port_num < rdma_start_port(device) || port_num > rdma_end_port(device))
  834. return -EINVAL;
  835. read_lock_irqsave(&device->cache.lock, flags);
  836. *lmc = device->cache.lmc_cache[port_num - rdma_start_port(device)];
  837. read_unlock_irqrestore(&device->cache.lock, flags);
  838. return ret;
  839. }
  840. EXPORT_SYMBOL(ib_get_cached_lmc);
  841. static void ib_cache_update(struct ib_device *device,
  842. u8 port)
  843. {
  844. struct ib_port_attr *tprops = NULL;
  845. struct ib_pkey_cache *pkey_cache = NULL, *old_pkey_cache;
  846. struct ib_gid_cache {
  847. int table_len;
  848. union ib_gid table[0];
  849. } *gid_cache = NULL;
  850. int i;
  851. int ret;
  852. struct ib_gid_table *table;
  853. struct ib_gid_table **ports_table = device->cache.gid_cache;
  854. bool use_roce_gid_table =
  855. rdma_cap_roce_gid_table(device, port);
  856. if (port < rdma_start_port(device) || port > rdma_end_port(device))
  857. return;
  858. table = ports_table[port - rdma_start_port(device)];
  859. tprops = kmalloc(sizeof *tprops, GFP_KERNEL);
  860. if (!tprops)
  861. return;
  862. ret = ib_query_port(device, port, tprops);
  863. if (ret) {
  864. pr_warn("ib_query_port failed (%d) for %s\n",
  865. ret, device->name);
  866. goto err;
  867. }
  868. pkey_cache = kmalloc(sizeof *pkey_cache + tprops->pkey_tbl_len *
  869. sizeof *pkey_cache->table, GFP_KERNEL);
  870. if (!pkey_cache)
  871. goto err;
  872. pkey_cache->table_len = tprops->pkey_tbl_len;
  873. if (!use_roce_gid_table) {
  874. gid_cache = kmalloc(sizeof(*gid_cache) + tprops->gid_tbl_len *
  875. sizeof(*gid_cache->table), GFP_KERNEL);
  876. if (!gid_cache)
  877. goto err;
  878. gid_cache->table_len = tprops->gid_tbl_len;
  879. }
  880. for (i = 0; i < pkey_cache->table_len; ++i) {
  881. ret = ib_query_pkey(device, port, i, pkey_cache->table + i);
  882. if (ret) {
  883. pr_warn("ib_query_pkey failed (%d) for %s (index %d)\n",
  884. ret, device->name, i);
  885. goto err;
  886. }
  887. }
  888. if (!use_roce_gid_table) {
  889. for (i = 0; i < gid_cache->table_len; ++i) {
  890. ret = ib_query_gid(device, port, i,
  891. gid_cache->table + i, NULL);
  892. if (ret) {
  893. pr_warn("ib_query_gid failed (%d) for %s (index %d)\n",
  894. ret, device->name, i);
  895. goto err;
  896. }
  897. }
  898. }
  899. write_lock_irq(&device->cache.lock);
  900. old_pkey_cache = device->cache.pkey_cache[port - rdma_start_port(device)];
  901. device->cache.pkey_cache[port - rdma_start_port(device)] = pkey_cache;
  902. if (!use_roce_gid_table) {
  903. write_lock(&table->rwlock);
  904. for (i = 0; i < gid_cache->table_len; i++) {
  905. modify_gid(device, port, table, i, gid_cache->table + i,
  906. &zattr, false);
  907. }
  908. write_unlock(&table->rwlock);
  909. }
  910. device->cache.lmc_cache[port - rdma_start_port(device)] = tprops->lmc;
  911. write_unlock_irq(&device->cache.lock);
  912. kfree(gid_cache);
  913. kfree(old_pkey_cache);
  914. kfree(tprops);
  915. return;
  916. err:
  917. kfree(pkey_cache);
  918. kfree(gid_cache);
  919. kfree(tprops);
  920. }
  921. static void ib_cache_task(struct work_struct *_work)
  922. {
  923. struct ib_update_work *work =
  924. container_of(_work, struct ib_update_work, work);
  925. ib_cache_update(work->device, work->port_num);
  926. kfree(work);
  927. }
  928. static void ib_cache_event(struct ib_event_handler *handler,
  929. struct ib_event *event)
  930. {
  931. struct ib_update_work *work;
  932. if (event->event == IB_EVENT_PORT_ERR ||
  933. event->event == IB_EVENT_PORT_ACTIVE ||
  934. event->event == IB_EVENT_LID_CHANGE ||
  935. event->event == IB_EVENT_PKEY_CHANGE ||
  936. event->event == IB_EVENT_SM_CHANGE ||
  937. event->event == IB_EVENT_CLIENT_REREGISTER ||
  938. event->event == IB_EVENT_GID_CHANGE) {
  939. work = kmalloc(sizeof *work, GFP_ATOMIC);
  940. if (work) {
  941. INIT_WORK(&work->work, ib_cache_task);
  942. work->device = event->device;
  943. work->port_num = event->element.port_num;
  944. queue_work(ib_wq, &work->work);
  945. }
  946. }
  947. }
  948. int ib_cache_setup_one(struct ib_device *device)
  949. {
  950. int p;
  951. int err;
  952. rwlock_init(&device->cache.lock);
  953. device->cache.pkey_cache =
  954. kzalloc(sizeof *device->cache.pkey_cache *
  955. (rdma_end_port(device) - rdma_start_port(device) + 1), GFP_KERNEL);
  956. device->cache.lmc_cache = kmalloc(sizeof *device->cache.lmc_cache *
  957. (rdma_end_port(device) -
  958. rdma_start_port(device) + 1),
  959. GFP_KERNEL);
  960. if (!device->cache.pkey_cache ||
  961. !device->cache.lmc_cache) {
  962. pr_warn("Couldn't allocate cache for %s\n", device->name);
  963. return -ENOMEM;
  964. }
  965. err = gid_table_setup_one(device);
  966. if (err)
  967. /* Allocated memory will be cleaned in the release function */
  968. return err;
  969. for (p = 0; p <= rdma_end_port(device) - rdma_start_port(device); ++p)
  970. ib_cache_update(device, p + rdma_start_port(device));
  971. INIT_IB_EVENT_HANDLER(&device->cache.event_handler,
  972. device, ib_cache_event);
  973. err = ib_register_event_handler(&device->cache.event_handler);
  974. if (err)
  975. goto err;
  976. return 0;
  977. err:
  978. gid_table_cleanup_one(device);
  979. return err;
  980. }
  981. void ib_cache_release_one(struct ib_device *device)
  982. {
  983. int p;
  984. /*
  985. * The release function frees all the cache elements.
  986. * This function should be called as part of freeing
  987. * all the device's resources when the cache could no
  988. * longer be accessed.
  989. */
  990. if (device->cache.pkey_cache)
  991. for (p = 0;
  992. p <= rdma_end_port(device) - rdma_start_port(device); ++p)
  993. kfree(device->cache.pkey_cache[p]);
  994. gid_table_release_one(device);
  995. kfree(device->cache.pkey_cache);
  996. kfree(device->cache.lmc_cache);
  997. }
  998. void ib_cache_cleanup_one(struct ib_device *device)
  999. {
  1000. /* The cleanup function unregisters the event handler,
  1001. * waits for all in-progress workqueue elements and cleans
  1002. * up the GID cache. This function should be called after
  1003. * the device was removed from the devices list and all
  1004. * clients were removed, so the cache exists but is
  1005. * non-functional and shouldn't be updated anymore.
  1006. */
  1007. ib_unregister_event_handler(&device->cache.event_handler);
  1008. flush_workqueue(ib_wq);
  1009. gid_table_cleanup_one(device);
  1010. }
  1011. void __init ib_cache_setup(void)
  1012. {
  1013. roce_gid_mgmt_init();
  1014. }
  1015. void __exit ib_cache_cleanup(void)
  1016. {
  1017. roce_gid_mgmt_cleanup();
  1018. }