cache.c 33 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311
  1. /*
  2. * Copyright (c) 2004 Topspin Communications. All rights reserved.
  3. * Copyright (c) 2005 Intel Corporation. All rights reserved.
  4. * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
  5. * Copyright (c) 2005 Voltaire, Inc. All rights reserved.
  6. *
  7. * This software is available to you under a choice of one of two
  8. * licenses. You may choose to be licensed under the terms of the GNU
  9. * General Public License (GPL) Version 2, available from the file
  10. * COPYING in the main directory of this source tree, or the
  11. * OpenIB.org BSD license below:
  12. *
  13. * Redistribution and use in source and binary forms, with or
  14. * without modification, are permitted provided that the following
  15. * conditions are met:
  16. *
  17. * - Redistributions of source code must retain the above
  18. * copyright notice, this list of conditions and the following
  19. * disclaimer.
  20. *
  21. * - Redistributions in binary form must reproduce the above
  22. * copyright notice, this list of conditions and the following
  23. * disclaimer in the documentation and/or other materials
  24. * provided with the distribution.
  25. *
  26. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  27. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  28. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  29. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  30. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  31. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  32. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  33. * SOFTWARE.
  34. */
  35. #include <linux/module.h>
  36. #include <linux/errno.h>
  37. #include <linux/slab.h>
  38. #include <linux/workqueue.h>
  39. #include <linux/netdevice.h>
  40. #include <net/addrconf.h>
  41. #include <rdma/ib_cache.h>
  42. #include "core_priv.h"
  43. struct ib_pkey_cache {
  44. int table_len;
  45. u16 table[0];
  46. };
  47. struct ib_update_work {
  48. struct work_struct work;
  49. struct ib_device *device;
  50. u8 port_num;
  51. bool enforce_security;
  52. };
  53. union ib_gid zgid;
  54. EXPORT_SYMBOL(zgid);
  55. enum gid_attr_find_mask {
  56. GID_ATTR_FIND_MASK_GID = 1UL << 0,
  57. GID_ATTR_FIND_MASK_NETDEV = 1UL << 1,
  58. GID_ATTR_FIND_MASK_DEFAULT = 1UL << 2,
  59. GID_ATTR_FIND_MASK_GID_TYPE = 1UL << 3,
  60. };
  61. enum gid_table_entry_props {
  62. GID_TABLE_ENTRY_INVALID = 1UL << 0,
  63. GID_TABLE_ENTRY_DEFAULT = 1UL << 1,
  64. };
  65. struct ib_gid_table_entry {
  66. unsigned long props;
  67. union ib_gid gid;
  68. struct ib_gid_attr attr;
  69. void *context;
  70. };
  71. struct ib_gid_table {
  72. int sz;
  73. /* In RoCE, adding a GID to the table requires:
  74. * (a) Find if this GID is already exists.
  75. * (b) Find a free space.
  76. * (c) Write the new GID
  77. *
  78. * Delete requires different set of operations:
  79. * (a) Find the GID
  80. * (b) Delete it.
  81. *
  82. **/
  83. /* Any writer to data_vec must hold this lock and the write side of
  84. * rwlock. readers must hold only rwlock. All writers must be in a
  85. * sleepable context.
  86. */
  87. struct mutex lock;
  88. /* rwlock protects data_vec[ix]->props. */
  89. rwlock_t rwlock;
  90. struct ib_gid_table_entry *data_vec;
  91. };
  92. static void dispatch_gid_change_event(struct ib_device *ib_dev, u8 port)
  93. {
  94. struct ib_event event;
  95. event.device = ib_dev;
  96. event.element.port_num = port;
  97. event.event = IB_EVENT_GID_CHANGE;
  98. ib_dispatch_event(&event);
  99. }
  100. static const char * const gid_type_str[] = {
  101. [IB_GID_TYPE_IB] = "IB/RoCE v1",
  102. [IB_GID_TYPE_ROCE_UDP_ENCAP] = "RoCE v2",
  103. };
  104. const char *ib_cache_gid_type_str(enum ib_gid_type gid_type)
  105. {
  106. if (gid_type < ARRAY_SIZE(gid_type_str) && gid_type_str[gid_type])
  107. return gid_type_str[gid_type];
  108. return "Invalid GID type";
  109. }
  110. EXPORT_SYMBOL(ib_cache_gid_type_str);
  111. int ib_cache_gid_parse_type_str(const char *buf)
  112. {
  113. unsigned int i;
  114. size_t len;
  115. int err = -EINVAL;
  116. len = strlen(buf);
  117. if (len == 0)
  118. return -EINVAL;
  119. if (buf[len - 1] == '\n')
  120. len--;
  121. for (i = 0; i < ARRAY_SIZE(gid_type_str); ++i)
  122. if (gid_type_str[i] && !strncmp(buf, gid_type_str[i], len) &&
  123. len == strlen(gid_type_str[i])) {
  124. err = i;
  125. break;
  126. }
  127. return err;
  128. }
  129. EXPORT_SYMBOL(ib_cache_gid_parse_type_str);
  130. static void del_roce_gid(struct ib_device *device, u8 port_num,
  131. struct ib_gid_table *table, int ix)
  132. {
  133. pr_debug("%s device=%s port=%d index=%d gid %pI6\n", __func__,
  134. device->name, port_num, ix,
  135. table->data_vec[ix].gid.raw);
  136. if (rdma_cap_roce_gid_table(device, port_num))
  137. device->del_gid(&table->data_vec[ix].attr,
  138. &table->data_vec[ix].context);
  139. dev_put(table->data_vec[ix].attr.ndev);
  140. }
  141. static int add_roce_gid(struct ib_gid_table *table,
  142. const union ib_gid *gid,
  143. const struct ib_gid_attr *attr)
  144. {
  145. struct ib_gid_table_entry *entry;
  146. int ix = attr->index;
  147. int ret = 0;
  148. if (!attr->ndev) {
  149. pr_err("%s NULL netdev device=%s port=%d index=%d\n",
  150. __func__, attr->device->name, attr->port_num,
  151. attr->index);
  152. return -EINVAL;
  153. }
  154. entry = &table->data_vec[ix];
  155. if ((entry->props & GID_TABLE_ENTRY_INVALID) == 0) {
  156. WARN(1, "GID table corruption device=%s port=%d index=%d\n",
  157. attr->device->name, attr->port_num,
  158. attr->index);
  159. return -EINVAL;
  160. }
  161. if (rdma_cap_roce_gid_table(attr->device, attr->port_num)) {
  162. ret = attr->device->add_gid(gid, attr, &entry->context);
  163. if (ret) {
  164. pr_err("%s GID add failed device=%s port=%d index=%d\n",
  165. __func__, attr->device->name, attr->port_num,
  166. attr->index);
  167. goto add_err;
  168. }
  169. }
  170. dev_hold(attr->ndev);
  171. add_err:
  172. if (!ret)
  173. pr_debug("%s device=%s port=%d index=%d gid %pI6\n", __func__,
  174. attr->device->name, attr->port_num, ix, gid->raw);
  175. return ret;
  176. }
  177. /**
  178. * add_modify_gid - Add or modify GID table entry
  179. *
  180. * @table: GID table in which GID to be added or modified
  181. * @gid: GID content
  182. * @attr: Attributes of the GID
  183. *
  184. * Returns 0 on success or appropriate error code. It accepts zero
  185. * GID addition for non RoCE ports for HCA's who report them as valid
  186. * GID. However such zero GIDs are not added to the cache.
  187. */
  188. static int add_modify_gid(struct ib_gid_table *table,
  189. const union ib_gid *gid,
  190. const struct ib_gid_attr *attr)
  191. {
  192. int ret;
  193. if (rdma_protocol_roce(attr->device, attr->port_num)) {
  194. ret = add_roce_gid(table, gid, attr);
  195. if (ret)
  196. return ret;
  197. } else {
  198. /*
  199. * Some HCA's report multiple GID entries with only one
  200. * valid GID, but remaining as zero GID.
  201. * So ignore such behavior for IB link layer and don't
  202. * fail the call, but don't add such entry to GID cache.
  203. */
  204. if (!memcmp(gid, &zgid, sizeof(*gid)))
  205. return 0;
  206. }
  207. lockdep_assert_held(&table->lock);
  208. memcpy(&table->data_vec[attr->index].gid, gid, sizeof(*gid));
  209. memcpy(&table->data_vec[attr->index].attr, attr, sizeof(*attr));
  210. write_lock_irq(&table->rwlock);
  211. table->data_vec[attr->index].props &= ~GID_TABLE_ENTRY_INVALID;
  212. write_unlock_irq(&table->rwlock);
  213. return 0;
  214. }
  215. /**
  216. * del_gid - Delete GID table entry
  217. *
  218. * @ib_dev: IB device whose GID entry to be deleted
  219. * @port: Port number of the IB device
  220. * @table: GID table of the IB device for a port
  221. * @ix: GID entry index to delete
  222. *
  223. */
  224. static void del_gid(struct ib_device *ib_dev, u8 port,
  225. struct ib_gid_table *table, int ix)
  226. {
  227. lockdep_assert_held(&table->lock);
  228. write_lock_irq(&table->rwlock);
  229. table->data_vec[ix].props |= GID_TABLE_ENTRY_INVALID;
  230. write_unlock_irq(&table->rwlock);
  231. if (rdma_protocol_roce(ib_dev, port))
  232. del_roce_gid(ib_dev, port, table, ix);
  233. memcpy(&table->data_vec[ix].gid, &zgid, sizeof(zgid));
  234. memset(&table->data_vec[ix].attr, 0, sizeof(table->data_vec[ix].attr));
  235. table->data_vec[ix].context = NULL;
  236. }
  237. /* rwlock should be read locked, or lock should be held */
  238. static int find_gid(struct ib_gid_table *table, const union ib_gid *gid,
  239. const struct ib_gid_attr *val, bool default_gid,
  240. unsigned long mask, int *pempty)
  241. {
  242. int i = 0;
  243. int found = -1;
  244. int empty = pempty ? -1 : 0;
  245. while (i < table->sz && (found < 0 || empty < 0)) {
  246. struct ib_gid_table_entry *data = &table->data_vec[i];
  247. struct ib_gid_attr *attr = &data->attr;
  248. int curr_index = i;
  249. i++;
  250. /* find_gid() is used during GID addition where it is expected
  251. * to return a free entry slot which is not duplicate.
  252. * Free entry slot is requested and returned if pempty is set,
  253. * so lookup free slot only if requested.
  254. */
  255. if (pempty && empty < 0) {
  256. if (data->props & GID_TABLE_ENTRY_INVALID &&
  257. (default_gid ==
  258. !!(data->props & GID_TABLE_ENTRY_DEFAULT))) {
  259. /*
  260. * Found an invalid (free) entry; allocate it.
  261. * If default GID is requested, then our
  262. * found slot must be one of the DEFAULT
  263. * reserved slots or we fail.
  264. * This ensures that only DEFAULT reserved
  265. * slots are used for default property GIDs.
  266. */
  267. empty = curr_index;
  268. }
  269. }
  270. /*
  271. * Additionally find_gid() is used to find valid entry during
  272. * lookup operation, where validity needs to be checked. So
  273. * find the empty entry first to continue to search for a free
  274. * slot and ignore its INVALID flag.
  275. */
  276. if (data->props & GID_TABLE_ENTRY_INVALID)
  277. continue;
  278. if (found >= 0)
  279. continue;
  280. if (mask & GID_ATTR_FIND_MASK_GID_TYPE &&
  281. attr->gid_type != val->gid_type)
  282. continue;
  283. if (mask & GID_ATTR_FIND_MASK_GID &&
  284. memcmp(gid, &data->gid, sizeof(*gid)))
  285. continue;
  286. if (mask & GID_ATTR_FIND_MASK_NETDEV &&
  287. attr->ndev != val->ndev)
  288. continue;
  289. if (mask & GID_ATTR_FIND_MASK_DEFAULT &&
  290. !!(data->props & GID_TABLE_ENTRY_DEFAULT) !=
  291. default_gid)
  292. continue;
  293. found = curr_index;
  294. }
  295. if (pempty)
  296. *pempty = empty;
  297. return found;
  298. }
  299. static void make_default_gid(struct net_device *dev, union ib_gid *gid)
  300. {
  301. gid->global.subnet_prefix = cpu_to_be64(0xfe80000000000000LL);
  302. addrconf_ifid_eui48(&gid->raw[8], dev);
  303. }
  304. static int __ib_cache_gid_add(struct ib_device *ib_dev, u8 port,
  305. union ib_gid *gid, struct ib_gid_attr *attr,
  306. unsigned long mask, bool default_gid)
  307. {
  308. struct ib_gid_table *table;
  309. int ret = 0;
  310. int empty;
  311. int ix;
  312. /* Do not allow adding zero GID in support of
  313. * IB spec version 1.3 section 4.1.1 point (6) and
  314. * section 12.7.10 and section 12.7.20
  315. */
  316. if (!memcmp(gid, &zgid, sizeof(*gid)))
  317. return -EINVAL;
  318. table = ib_dev->cache.ports[port - rdma_start_port(ib_dev)].gid;
  319. mutex_lock(&table->lock);
  320. ix = find_gid(table, gid, attr, default_gid, mask, &empty);
  321. if (ix >= 0)
  322. goto out_unlock;
  323. if (empty < 0) {
  324. ret = -ENOSPC;
  325. goto out_unlock;
  326. }
  327. attr->device = ib_dev;
  328. attr->index = empty;
  329. attr->port_num = port;
  330. ret = add_modify_gid(table, gid, attr);
  331. if (!ret)
  332. dispatch_gid_change_event(ib_dev, port);
  333. out_unlock:
  334. mutex_unlock(&table->lock);
  335. if (ret)
  336. pr_warn("%s: unable to add gid %pI6 error=%d\n",
  337. __func__, gid->raw, ret);
  338. return ret;
  339. }
  340. int ib_cache_gid_add(struct ib_device *ib_dev, u8 port,
  341. union ib_gid *gid, struct ib_gid_attr *attr)
  342. {
  343. struct net_device *idev;
  344. unsigned long mask;
  345. int ret;
  346. if (ib_dev->get_netdev) {
  347. idev = ib_dev->get_netdev(ib_dev, port);
  348. if (idev && attr->ndev != idev) {
  349. union ib_gid default_gid;
  350. /* Adding default GIDs in not permitted */
  351. make_default_gid(idev, &default_gid);
  352. if (!memcmp(gid, &default_gid, sizeof(*gid))) {
  353. dev_put(idev);
  354. return -EPERM;
  355. }
  356. }
  357. if (idev)
  358. dev_put(idev);
  359. }
  360. mask = GID_ATTR_FIND_MASK_GID |
  361. GID_ATTR_FIND_MASK_GID_TYPE |
  362. GID_ATTR_FIND_MASK_NETDEV;
  363. ret = __ib_cache_gid_add(ib_dev, port, gid, attr, mask, false);
  364. return ret;
  365. }
  366. static int
  367. _ib_cache_gid_del(struct ib_device *ib_dev, u8 port,
  368. union ib_gid *gid, struct ib_gid_attr *attr,
  369. unsigned long mask, bool default_gid)
  370. {
  371. struct ib_gid_table *table;
  372. int ret = 0;
  373. int ix;
  374. table = ib_dev->cache.ports[port - rdma_start_port(ib_dev)].gid;
  375. mutex_lock(&table->lock);
  376. ix = find_gid(table, gid, attr, default_gid, mask, NULL);
  377. if (ix < 0) {
  378. ret = -EINVAL;
  379. goto out_unlock;
  380. }
  381. del_gid(ib_dev, port, table, ix);
  382. dispatch_gid_change_event(ib_dev, port);
  383. out_unlock:
  384. mutex_unlock(&table->lock);
  385. if (ret)
  386. pr_debug("%s: can't delete gid %pI6 error=%d\n",
  387. __func__, gid->raw, ret);
  388. return ret;
  389. }
  390. int ib_cache_gid_del(struct ib_device *ib_dev, u8 port,
  391. union ib_gid *gid, struct ib_gid_attr *attr)
  392. {
  393. unsigned long mask = GID_ATTR_FIND_MASK_GID |
  394. GID_ATTR_FIND_MASK_GID_TYPE |
  395. GID_ATTR_FIND_MASK_DEFAULT |
  396. GID_ATTR_FIND_MASK_NETDEV;
  397. return _ib_cache_gid_del(ib_dev, port, gid, attr, mask, false);
  398. }
  399. int ib_cache_gid_del_all_netdev_gids(struct ib_device *ib_dev, u8 port,
  400. struct net_device *ndev)
  401. {
  402. struct ib_gid_table *table;
  403. int ix;
  404. bool deleted = false;
  405. table = ib_dev->cache.ports[port - rdma_start_port(ib_dev)].gid;
  406. mutex_lock(&table->lock);
  407. for (ix = 0; ix < table->sz; ix++) {
  408. if (table->data_vec[ix].attr.ndev == ndev) {
  409. del_gid(ib_dev, port, table, ix);
  410. deleted = true;
  411. }
  412. }
  413. mutex_unlock(&table->lock);
  414. if (deleted)
  415. dispatch_gid_change_event(ib_dev, port);
  416. return 0;
  417. }
  418. static int __ib_cache_gid_get(struct ib_device *ib_dev, u8 port, int index,
  419. union ib_gid *gid, struct ib_gid_attr *attr)
  420. {
  421. struct ib_gid_table *table;
  422. table = ib_dev->cache.ports[port - rdma_start_port(ib_dev)].gid;
  423. if (index < 0 || index >= table->sz)
  424. return -EINVAL;
  425. if (table->data_vec[index].props & GID_TABLE_ENTRY_INVALID)
  426. return -EAGAIN;
  427. memcpy(gid, &table->data_vec[index].gid, sizeof(*gid));
  428. if (attr) {
  429. memcpy(attr, &table->data_vec[index].attr, sizeof(*attr));
  430. if (attr->ndev)
  431. dev_hold(attr->ndev);
  432. }
  433. return 0;
  434. }
  435. static int _ib_cache_gid_table_find(struct ib_device *ib_dev,
  436. const union ib_gid *gid,
  437. const struct ib_gid_attr *val,
  438. unsigned long mask,
  439. u8 *port, u16 *index)
  440. {
  441. struct ib_gid_table *table;
  442. u8 p;
  443. int local_index;
  444. unsigned long flags;
  445. for (p = 0; p < ib_dev->phys_port_cnt; p++) {
  446. table = ib_dev->cache.ports[p].gid;
  447. read_lock_irqsave(&table->rwlock, flags);
  448. local_index = find_gid(table, gid, val, false, mask, NULL);
  449. if (local_index >= 0) {
  450. if (index)
  451. *index = local_index;
  452. if (port)
  453. *port = p + rdma_start_port(ib_dev);
  454. read_unlock_irqrestore(&table->rwlock, flags);
  455. return 0;
  456. }
  457. read_unlock_irqrestore(&table->rwlock, flags);
  458. }
  459. return -ENOENT;
  460. }
  461. static int ib_cache_gid_find(struct ib_device *ib_dev,
  462. const union ib_gid *gid,
  463. enum ib_gid_type gid_type,
  464. struct net_device *ndev, u8 *port,
  465. u16 *index)
  466. {
  467. unsigned long mask = GID_ATTR_FIND_MASK_GID |
  468. GID_ATTR_FIND_MASK_GID_TYPE;
  469. struct ib_gid_attr gid_attr_val = {.ndev = ndev, .gid_type = gid_type};
  470. if (ndev)
  471. mask |= GID_ATTR_FIND_MASK_NETDEV;
  472. return _ib_cache_gid_table_find(ib_dev, gid, &gid_attr_val,
  473. mask, port, index);
  474. }
  475. /**
  476. * ib_find_cached_gid_by_port - Returns the GID table index where a specified
  477. * GID value occurs. It searches for the specified GID value in the local
  478. * software cache.
  479. * @device: The device to query.
  480. * @gid: The GID value to search for.
  481. * @gid_type: The GID type to search for.
  482. * @port_num: The port number of the device where the GID value should be
  483. * searched.
  484. * @ndev: In RoCE, the net device of the device. Null means ignore.
  485. * @index: The index into the cached GID table where the GID was found. This
  486. * parameter may be NULL.
  487. */
  488. int ib_find_cached_gid_by_port(struct ib_device *ib_dev,
  489. const union ib_gid *gid,
  490. enum ib_gid_type gid_type,
  491. u8 port, struct net_device *ndev,
  492. u16 *index)
  493. {
  494. int local_index;
  495. struct ib_gid_table *table;
  496. unsigned long mask = GID_ATTR_FIND_MASK_GID |
  497. GID_ATTR_FIND_MASK_GID_TYPE;
  498. struct ib_gid_attr val = {.ndev = ndev, .gid_type = gid_type};
  499. unsigned long flags;
  500. if (!rdma_is_port_valid(ib_dev, port))
  501. return -ENOENT;
  502. table = ib_dev->cache.ports[port - rdma_start_port(ib_dev)].gid;
  503. if (ndev)
  504. mask |= GID_ATTR_FIND_MASK_NETDEV;
  505. read_lock_irqsave(&table->rwlock, flags);
  506. local_index = find_gid(table, gid, &val, false, mask, NULL);
  507. if (local_index >= 0) {
  508. if (index)
  509. *index = local_index;
  510. read_unlock_irqrestore(&table->rwlock, flags);
  511. return 0;
  512. }
  513. read_unlock_irqrestore(&table->rwlock, flags);
  514. return -ENOENT;
  515. }
  516. EXPORT_SYMBOL(ib_find_cached_gid_by_port);
  517. /**
  518. * ib_cache_gid_find_by_filter - Returns the GID table index where a specified
  519. * GID value occurs
  520. * @device: The device to query.
  521. * @gid: The GID value to search for.
  522. * @port_num: The port number of the device where the GID value could be
  523. * searched.
  524. * @filter: The filter function is executed on any matching GID in the table.
  525. * If the filter function returns true, the corresponding index is returned,
  526. * otherwise, we continue searching the GID table. It's guaranteed that
  527. * while filter is executed, ndev field is valid and the structure won't
  528. * change. filter is executed in an atomic context. filter must not be NULL.
  529. * @index: The index into the cached GID table where the GID was found. This
  530. * parameter may be NULL.
  531. *
  532. * ib_cache_gid_find_by_filter() searches for the specified GID value
  533. * of which the filter function returns true in the port's GID table.
  534. * This function is only supported on RoCE ports.
  535. *
  536. */
  537. static int ib_cache_gid_find_by_filter(struct ib_device *ib_dev,
  538. const union ib_gid *gid,
  539. u8 port,
  540. bool (*filter)(const union ib_gid *,
  541. const struct ib_gid_attr *,
  542. void *),
  543. void *context,
  544. u16 *index)
  545. {
  546. struct ib_gid_table *table;
  547. unsigned int i;
  548. unsigned long flags;
  549. bool found = false;
  550. if (!rdma_is_port_valid(ib_dev, port) ||
  551. !rdma_protocol_roce(ib_dev, port))
  552. return -EPROTONOSUPPORT;
  553. table = ib_dev->cache.ports[port - rdma_start_port(ib_dev)].gid;
  554. read_lock_irqsave(&table->rwlock, flags);
  555. for (i = 0; i < table->sz; i++) {
  556. struct ib_gid_attr attr;
  557. if (table->data_vec[i].props & GID_TABLE_ENTRY_INVALID)
  558. continue;
  559. if (memcmp(gid, &table->data_vec[i].gid, sizeof(*gid)))
  560. continue;
  561. memcpy(&attr, &table->data_vec[i].attr, sizeof(attr));
  562. if (filter(gid, &attr, context)) {
  563. found = true;
  564. if (index)
  565. *index = i;
  566. break;
  567. }
  568. }
  569. read_unlock_irqrestore(&table->rwlock, flags);
  570. if (!found)
  571. return -ENOENT;
  572. return 0;
  573. }
  574. static struct ib_gid_table *alloc_gid_table(int sz)
  575. {
  576. struct ib_gid_table *table =
  577. kzalloc(sizeof(struct ib_gid_table), GFP_KERNEL);
  578. int i;
  579. if (!table)
  580. return NULL;
  581. table->data_vec = kcalloc(sz, sizeof(*table->data_vec), GFP_KERNEL);
  582. if (!table->data_vec)
  583. goto err_free_table;
  584. mutex_init(&table->lock);
  585. table->sz = sz;
  586. rwlock_init(&table->rwlock);
  587. /* Mark all entries as invalid so that allocator can allocate
  588. * one of the invalid (free) entry.
  589. */
  590. for (i = 0; i < sz; i++)
  591. table->data_vec[i].props |= GID_TABLE_ENTRY_INVALID;
  592. return table;
  593. err_free_table:
  594. kfree(table);
  595. return NULL;
  596. }
  597. static void release_gid_table(struct ib_gid_table *table)
  598. {
  599. if (table) {
  600. kfree(table->data_vec);
  601. kfree(table);
  602. }
  603. }
  604. static void cleanup_gid_table_port(struct ib_device *ib_dev, u8 port,
  605. struct ib_gid_table *table)
  606. {
  607. int i;
  608. bool deleted = false;
  609. if (!table)
  610. return;
  611. mutex_lock(&table->lock);
  612. for (i = 0; i < table->sz; ++i) {
  613. if (memcmp(&table->data_vec[i].gid, &zgid,
  614. sizeof(table->data_vec[i].gid))) {
  615. del_gid(ib_dev, port, table, i);
  616. deleted = true;
  617. }
  618. }
  619. mutex_unlock(&table->lock);
  620. if (deleted)
  621. dispatch_gid_change_event(ib_dev, port);
  622. }
  623. void ib_cache_gid_set_default_gid(struct ib_device *ib_dev, u8 port,
  624. struct net_device *ndev,
  625. unsigned long gid_type_mask,
  626. enum ib_cache_gid_default_mode mode)
  627. {
  628. union ib_gid gid = { };
  629. struct ib_gid_attr gid_attr;
  630. struct ib_gid_table *table;
  631. unsigned int gid_type;
  632. unsigned long mask;
  633. table = ib_dev->cache.ports[port - rdma_start_port(ib_dev)].gid;
  634. mask = GID_ATTR_FIND_MASK_GID_TYPE |
  635. GID_ATTR_FIND_MASK_DEFAULT |
  636. GID_ATTR_FIND_MASK_NETDEV;
  637. memset(&gid_attr, 0, sizeof(gid_attr));
  638. gid_attr.ndev = ndev;
  639. for (gid_type = 0; gid_type < IB_GID_TYPE_SIZE; ++gid_type) {
  640. if (1UL << gid_type & ~gid_type_mask)
  641. continue;
  642. gid_attr.gid_type = gid_type;
  643. if (mode == IB_CACHE_GID_DEFAULT_MODE_SET) {
  644. make_default_gid(ndev, &gid);
  645. __ib_cache_gid_add(ib_dev, port, &gid,
  646. &gid_attr, mask, true);
  647. } else if (mode == IB_CACHE_GID_DEFAULT_MODE_DELETE) {
  648. _ib_cache_gid_del(ib_dev, port, &gid,
  649. &gid_attr, mask, true);
  650. }
  651. }
  652. }
  653. static int gid_table_reserve_default(struct ib_device *ib_dev, u8 port,
  654. struct ib_gid_table *table)
  655. {
  656. unsigned int i;
  657. unsigned long roce_gid_type_mask;
  658. unsigned int num_default_gids;
  659. unsigned int current_gid = 0;
  660. roce_gid_type_mask = roce_gid_type_mask_support(ib_dev, port);
  661. num_default_gids = hweight_long(roce_gid_type_mask);
  662. for (i = 0; i < num_default_gids && i < table->sz; i++) {
  663. struct ib_gid_table_entry *entry =
  664. &table->data_vec[i];
  665. entry->props |= GID_TABLE_ENTRY_DEFAULT;
  666. current_gid = find_next_bit(&roce_gid_type_mask,
  667. BITS_PER_LONG,
  668. current_gid);
  669. entry->attr.gid_type = current_gid++;
  670. }
  671. return 0;
  672. }
  673. static int _gid_table_setup_one(struct ib_device *ib_dev)
  674. {
  675. u8 port;
  676. struct ib_gid_table *table;
  677. int err = 0;
  678. for (port = 0; port < ib_dev->phys_port_cnt; port++) {
  679. u8 rdma_port = port + rdma_start_port(ib_dev);
  680. table =
  681. alloc_gid_table(
  682. ib_dev->port_immutable[rdma_port].gid_tbl_len);
  683. if (!table) {
  684. err = -ENOMEM;
  685. goto rollback_table_setup;
  686. }
  687. err = gid_table_reserve_default(ib_dev,
  688. port + rdma_start_port(ib_dev),
  689. table);
  690. if (err)
  691. goto rollback_table_setup;
  692. ib_dev->cache.ports[port].gid = table;
  693. }
  694. return 0;
  695. rollback_table_setup:
  696. for (port = 0; port < ib_dev->phys_port_cnt; port++) {
  697. table = ib_dev->cache.ports[port].gid;
  698. cleanup_gid_table_port(ib_dev, port + rdma_start_port(ib_dev),
  699. table);
  700. release_gid_table(table);
  701. }
  702. return err;
  703. }
  704. static void gid_table_release_one(struct ib_device *ib_dev)
  705. {
  706. struct ib_gid_table *table;
  707. u8 port;
  708. for (port = 0; port < ib_dev->phys_port_cnt; port++) {
  709. table = ib_dev->cache.ports[port].gid;
  710. release_gid_table(table);
  711. ib_dev->cache.ports[port].gid = NULL;
  712. }
  713. }
  714. static void gid_table_cleanup_one(struct ib_device *ib_dev)
  715. {
  716. struct ib_gid_table *table;
  717. u8 port;
  718. for (port = 0; port < ib_dev->phys_port_cnt; port++) {
  719. table = ib_dev->cache.ports[port].gid;
  720. cleanup_gid_table_port(ib_dev, port + rdma_start_port(ib_dev),
  721. table);
  722. }
  723. }
  724. static int gid_table_setup_one(struct ib_device *ib_dev)
  725. {
  726. int err;
  727. err = _gid_table_setup_one(ib_dev);
  728. if (err)
  729. return err;
  730. rdma_roce_rescan_device(ib_dev);
  731. return err;
  732. }
  733. int ib_get_cached_gid(struct ib_device *device,
  734. u8 port_num,
  735. int index,
  736. union ib_gid *gid,
  737. struct ib_gid_attr *gid_attr)
  738. {
  739. int res;
  740. unsigned long flags;
  741. struct ib_gid_table *table;
  742. if (!rdma_is_port_valid(device, port_num))
  743. return -EINVAL;
  744. table = device->cache.ports[port_num - rdma_start_port(device)].gid;
  745. read_lock_irqsave(&table->rwlock, flags);
  746. res = __ib_cache_gid_get(device, port_num, index, gid, gid_attr);
  747. read_unlock_irqrestore(&table->rwlock, flags);
  748. return res;
  749. }
  750. EXPORT_SYMBOL(ib_get_cached_gid);
  751. /**
  752. * ib_find_cached_gid - Returns the port number and GID table index where
  753. * a specified GID value occurs.
  754. * @device: The device to query.
  755. * @gid: The GID value to search for.
  756. * @gid_type: The GID type to search for.
  757. * @ndev: In RoCE, the net device of the device. NULL means ignore.
  758. * @port_num: The port number of the device where the GID value was found.
  759. * @index: The index into the cached GID table where the GID was found. This
  760. * parameter may be NULL.
  761. *
  762. * ib_find_cached_gid() searches for the specified GID value in
  763. * the local software cache.
  764. */
  765. int ib_find_cached_gid(struct ib_device *device,
  766. const union ib_gid *gid,
  767. enum ib_gid_type gid_type,
  768. struct net_device *ndev,
  769. u8 *port_num,
  770. u16 *index)
  771. {
  772. return ib_cache_gid_find(device, gid, gid_type, ndev, port_num, index);
  773. }
  774. EXPORT_SYMBOL(ib_find_cached_gid);
  775. int ib_find_gid_by_filter(struct ib_device *device,
  776. const union ib_gid *gid,
  777. u8 port_num,
  778. bool (*filter)(const union ib_gid *gid,
  779. const struct ib_gid_attr *,
  780. void *),
  781. void *context, u16 *index)
  782. {
  783. /* Only RoCE GID table supports filter function */
  784. if (!rdma_protocol_roce(device, port_num) && filter)
  785. return -EPROTONOSUPPORT;
  786. return ib_cache_gid_find_by_filter(device, gid,
  787. port_num, filter,
  788. context, index);
  789. }
  790. int ib_get_cached_pkey(struct ib_device *device,
  791. u8 port_num,
  792. int index,
  793. u16 *pkey)
  794. {
  795. struct ib_pkey_cache *cache;
  796. unsigned long flags;
  797. int ret = 0;
  798. if (!rdma_is_port_valid(device, port_num))
  799. return -EINVAL;
  800. read_lock_irqsave(&device->cache.lock, flags);
  801. cache = device->cache.ports[port_num - rdma_start_port(device)].pkey;
  802. if (index < 0 || index >= cache->table_len)
  803. ret = -EINVAL;
  804. else
  805. *pkey = cache->table[index];
  806. read_unlock_irqrestore(&device->cache.lock, flags);
  807. return ret;
  808. }
  809. EXPORT_SYMBOL(ib_get_cached_pkey);
  810. int ib_get_cached_subnet_prefix(struct ib_device *device,
  811. u8 port_num,
  812. u64 *sn_pfx)
  813. {
  814. unsigned long flags;
  815. int p;
  816. if (!rdma_is_port_valid(device, port_num))
  817. return -EINVAL;
  818. p = port_num - rdma_start_port(device);
  819. read_lock_irqsave(&device->cache.lock, flags);
  820. *sn_pfx = device->cache.ports[p].subnet_prefix;
  821. read_unlock_irqrestore(&device->cache.lock, flags);
  822. return 0;
  823. }
  824. EXPORT_SYMBOL(ib_get_cached_subnet_prefix);
  825. int ib_find_cached_pkey(struct ib_device *device,
  826. u8 port_num,
  827. u16 pkey,
  828. u16 *index)
  829. {
  830. struct ib_pkey_cache *cache;
  831. unsigned long flags;
  832. int i;
  833. int ret = -ENOENT;
  834. int partial_ix = -1;
  835. if (!rdma_is_port_valid(device, port_num))
  836. return -EINVAL;
  837. read_lock_irqsave(&device->cache.lock, flags);
  838. cache = device->cache.ports[port_num - rdma_start_port(device)].pkey;
  839. *index = -1;
  840. for (i = 0; i < cache->table_len; ++i)
  841. if ((cache->table[i] & 0x7fff) == (pkey & 0x7fff)) {
  842. if (cache->table[i] & 0x8000) {
  843. *index = i;
  844. ret = 0;
  845. break;
  846. } else
  847. partial_ix = i;
  848. }
  849. if (ret && partial_ix >= 0) {
  850. *index = partial_ix;
  851. ret = 0;
  852. }
  853. read_unlock_irqrestore(&device->cache.lock, flags);
  854. return ret;
  855. }
  856. EXPORT_SYMBOL(ib_find_cached_pkey);
  857. int ib_find_exact_cached_pkey(struct ib_device *device,
  858. u8 port_num,
  859. u16 pkey,
  860. u16 *index)
  861. {
  862. struct ib_pkey_cache *cache;
  863. unsigned long flags;
  864. int i;
  865. int ret = -ENOENT;
  866. if (!rdma_is_port_valid(device, port_num))
  867. return -EINVAL;
  868. read_lock_irqsave(&device->cache.lock, flags);
  869. cache = device->cache.ports[port_num - rdma_start_port(device)].pkey;
  870. *index = -1;
  871. for (i = 0; i < cache->table_len; ++i)
  872. if (cache->table[i] == pkey) {
  873. *index = i;
  874. ret = 0;
  875. break;
  876. }
  877. read_unlock_irqrestore(&device->cache.lock, flags);
  878. return ret;
  879. }
  880. EXPORT_SYMBOL(ib_find_exact_cached_pkey);
  881. int ib_get_cached_lmc(struct ib_device *device,
  882. u8 port_num,
  883. u8 *lmc)
  884. {
  885. unsigned long flags;
  886. int ret = 0;
  887. if (!rdma_is_port_valid(device, port_num))
  888. return -EINVAL;
  889. read_lock_irqsave(&device->cache.lock, flags);
  890. *lmc = device->cache.ports[port_num - rdma_start_port(device)].lmc;
  891. read_unlock_irqrestore(&device->cache.lock, flags);
  892. return ret;
  893. }
  894. EXPORT_SYMBOL(ib_get_cached_lmc);
  895. int ib_get_cached_port_state(struct ib_device *device,
  896. u8 port_num,
  897. enum ib_port_state *port_state)
  898. {
  899. unsigned long flags;
  900. int ret = 0;
  901. if (!rdma_is_port_valid(device, port_num))
  902. return -EINVAL;
  903. read_lock_irqsave(&device->cache.lock, flags);
  904. *port_state = device->cache.ports[port_num
  905. - rdma_start_port(device)].port_state;
  906. read_unlock_irqrestore(&device->cache.lock, flags);
  907. return ret;
  908. }
  909. EXPORT_SYMBOL(ib_get_cached_port_state);
  910. static int config_non_roce_gid_cache(struct ib_device *device,
  911. u8 port, int gid_tbl_len)
  912. {
  913. struct ib_gid_attr gid_attr = {};
  914. struct ib_gid_table *table;
  915. union ib_gid gid;
  916. int ret = 0;
  917. int i;
  918. gid_attr.device = device;
  919. gid_attr.port_num = port;
  920. table = device->cache.ports[port - rdma_start_port(device)].gid;
  921. mutex_lock(&table->lock);
  922. for (i = 0; i < gid_tbl_len; ++i) {
  923. if (!device->query_gid)
  924. continue;
  925. ret = device->query_gid(device, port, i, &gid);
  926. if (ret) {
  927. pr_warn("query_gid failed (%d) for %s (index %d)\n",
  928. ret, device->name, i);
  929. goto err;
  930. }
  931. gid_attr.index = i;
  932. add_modify_gid(table, &gid, &gid_attr);
  933. }
  934. err:
  935. mutex_unlock(&table->lock);
  936. return ret;
  937. }
  938. static void ib_cache_update(struct ib_device *device,
  939. u8 port,
  940. bool enforce_security)
  941. {
  942. struct ib_port_attr *tprops = NULL;
  943. struct ib_pkey_cache *pkey_cache = NULL, *old_pkey_cache;
  944. int i;
  945. int ret;
  946. struct ib_gid_table *table;
  947. if (!rdma_is_port_valid(device, port))
  948. return;
  949. table = device->cache.ports[port - rdma_start_port(device)].gid;
  950. tprops = kmalloc(sizeof *tprops, GFP_KERNEL);
  951. if (!tprops)
  952. return;
  953. ret = ib_query_port(device, port, tprops);
  954. if (ret) {
  955. pr_warn("ib_query_port failed (%d) for %s\n",
  956. ret, device->name);
  957. goto err;
  958. }
  959. if (!rdma_protocol_roce(device, port)) {
  960. ret = config_non_roce_gid_cache(device, port,
  961. tprops->gid_tbl_len);
  962. if (ret)
  963. goto err;
  964. }
  965. pkey_cache = kmalloc(sizeof *pkey_cache + tprops->pkey_tbl_len *
  966. sizeof *pkey_cache->table, GFP_KERNEL);
  967. if (!pkey_cache)
  968. goto err;
  969. pkey_cache->table_len = tprops->pkey_tbl_len;
  970. for (i = 0; i < pkey_cache->table_len; ++i) {
  971. ret = ib_query_pkey(device, port, i, pkey_cache->table + i);
  972. if (ret) {
  973. pr_warn("ib_query_pkey failed (%d) for %s (index %d)\n",
  974. ret, device->name, i);
  975. goto err;
  976. }
  977. }
  978. write_lock_irq(&device->cache.lock);
  979. old_pkey_cache = device->cache.ports[port -
  980. rdma_start_port(device)].pkey;
  981. device->cache.ports[port - rdma_start_port(device)].pkey = pkey_cache;
  982. device->cache.ports[port - rdma_start_port(device)].lmc = tprops->lmc;
  983. device->cache.ports[port - rdma_start_port(device)].port_state =
  984. tprops->state;
  985. device->cache.ports[port - rdma_start_port(device)].subnet_prefix =
  986. tprops->subnet_prefix;
  987. write_unlock_irq(&device->cache.lock);
  988. if (enforce_security)
  989. ib_security_cache_change(device,
  990. port,
  991. tprops->subnet_prefix);
  992. kfree(old_pkey_cache);
  993. kfree(tprops);
  994. return;
  995. err:
  996. kfree(pkey_cache);
  997. kfree(tprops);
  998. }
  999. static void ib_cache_task(struct work_struct *_work)
  1000. {
  1001. struct ib_update_work *work =
  1002. container_of(_work, struct ib_update_work, work);
  1003. ib_cache_update(work->device,
  1004. work->port_num,
  1005. work->enforce_security);
  1006. kfree(work);
  1007. }
  1008. static void ib_cache_event(struct ib_event_handler *handler,
  1009. struct ib_event *event)
  1010. {
  1011. struct ib_update_work *work;
  1012. if (event->event == IB_EVENT_PORT_ERR ||
  1013. event->event == IB_EVENT_PORT_ACTIVE ||
  1014. event->event == IB_EVENT_LID_CHANGE ||
  1015. event->event == IB_EVENT_PKEY_CHANGE ||
  1016. event->event == IB_EVENT_SM_CHANGE ||
  1017. event->event == IB_EVENT_CLIENT_REREGISTER ||
  1018. event->event == IB_EVENT_GID_CHANGE) {
  1019. work = kmalloc(sizeof *work, GFP_ATOMIC);
  1020. if (work) {
  1021. INIT_WORK(&work->work, ib_cache_task);
  1022. work->device = event->device;
  1023. work->port_num = event->element.port_num;
  1024. if (event->event == IB_EVENT_PKEY_CHANGE ||
  1025. event->event == IB_EVENT_GID_CHANGE)
  1026. work->enforce_security = true;
  1027. else
  1028. work->enforce_security = false;
  1029. queue_work(ib_wq, &work->work);
  1030. }
  1031. }
  1032. }
  1033. int ib_cache_setup_one(struct ib_device *device)
  1034. {
  1035. int p;
  1036. int err;
  1037. rwlock_init(&device->cache.lock);
  1038. device->cache.ports =
  1039. kzalloc(sizeof(*device->cache.ports) *
  1040. (rdma_end_port(device) - rdma_start_port(device) + 1), GFP_KERNEL);
  1041. if (!device->cache.ports)
  1042. return -ENOMEM;
  1043. err = gid_table_setup_one(device);
  1044. if (err) {
  1045. kfree(device->cache.ports);
  1046. device->cache.ports = NULL;
  1047. return err;
  1048. }
  1049. for (p = 0; p <= rdma_end_port(device) - rdma_start_port(device); ++p)
  1050. ib_cache_update(device, p + rdma_start_port(device), true);
  1051. INIT_IB_EVENT_HANDLER(&device->cache.event_handler,
  1052. device, ib_cache_event);
  1053. ib_register_event_handler(&device->cache.event_handler);
  1054. return 0;
  1055. }
  1056. void ib_cache_release_one(struct ib_device *device)
  1057. {
  1058. int p;
  1059. /*
  1060. * The release function frees all the cache elements.
  1061. * This function should be called as part of freeing
  1062. * all the device's resources when the cache could no
  1063. * longer be accessed.
  1064. */
  1065. for (p = 0; p <= rdma_end_port(device) - rdma_start_port(device); ++p)
  1066. kfree(device->cache.ports[p].pkey);
  1067. gid_table_release_one(device);
  1068. kfree(device->cache.ports);
  1069. }
  1070. void ib_cache_cleanup_one(struct ib_device *device)
  1071. {
  1072. /* The cleanup function unregisters the event handler,
  1073. * waits for all in-progress workqueue elements and cleans
  1074. * up the GID cache. This function should be called after
  1075. * the device was removed from the devices list and all
  1076. * clients were removed, so the cache exists but is
  1077. * non-functional and shouldn't be updated anymore.
  1078. */
  1079. ib_unregister_event_handler(&device->cache.event_handler);
  1080. flush_workqueue(ib_wq);
  1081. gid_table_cleanup_one(device);
  1082. }
  1083. void __init ib_cache_setup(void)
  1084. {
  1085. roce_gid_mgmt_init();
  1086. }
  1087. void __exit ib_cache_cleanup(void)
  1088. {
  1089. roce_gid_mgmt_cleanup();
  1090. }