cache.c 32 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296
  1. /*
  2. * Copyright (c) 2004 Topspin Communications. All rights reserved.
  3. * Copyright (c) 2005 Intel Corporation. All rights reserved.
  4. * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
  5. * Copyright (c) 2005 Voltaire, Inc. All rights reserved.
  6. *
  7. * This software is available to you under a choice of one of two
  8. * licenses. You may choose to be licensed under the terms of the GNU
  9. * General Public License (GPL) Version 2, available from the file
  10. * COPYING in the main directory of this source tree, or the
  11. * OpenIB.org BSD license below:
  12. *
  13. * Redistribution and use in source and binary forms, with or
  14. * without modification, are permitted provided that the following
  15. * conditions are met:
  16. *
  17. * - Redistributions of source code must retain the above
  18. * copyright notice, this list of conditions and the following
  19. * disclaimer.
  20. *
  21. * - Redistributions in binary form must reproduce the above
  22. * copyright notice, this list of conditions and the following
  23. * disclaimer in the documentation and/or other materials
  24. * provided with the distribution.
  25. *
  26. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  27. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  28. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  29. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  30. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  31. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  32. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  33. * SOFTWARE.
  34. */
  35. #include <linux/module.h>
  36. #include <linux/errno.h>
  37. #include <linux/slab.h>
  38. #include <linux/workqueue.h>
  39. #include <linux/netdevice.h>
  40. #include <net/addrconf.h>
  41. #include <rdma/ib_cache.h>
  42. #include "core_priv.h"
  43. struct ib_pkey_cache {
  44. int table_len;
  45. u16 table[0];
  46. };
  47. struct ib_update_work {
  48. struct work_struct work;
  49. struct ib_device *device;
  50. u8 port_num;
  51. bool enforce_security;
  52. };
  53. union ib_gid zgid;
  54. EXPORT_SYMBOL(zgid);
  55. enum gid_attr_find_mask {
  56. GID_ATTR_FIND_MASK_GID = 1UL << 0,
  57. GID_ATTR_FIND_MASK_NETDEV = 1UL << 1,
  58. GID_ATTR_FIND_MASK_DEFAULT = 1UL << 2,
  59. GID_ATTR_FIND_MASK_GID_TYPE = 1UL << 3,
  60. };
  61. enum gid_table_entry_props {
  62. GID_TABLE_ENTRY_INVALID = 1UL << 0,
  63. GID_TABLE_ENTRY_DEFAULT = 1UL << 1,
  64. };
  65. struct ib_gid_table_entry {
  66. unsigned long props;
  67. union ib_gid gid;
  68. struct ib_gid_attr attr;
  69. void *context;
  70. };
  71. struct ib_gid_table {
  72. int sz;
  73. /* In RoCE, adding a GID to the table requires:
  74. * (a) Find if this GID is already exists.
  75. * (b) Find a free space.
  76. * (c) Write the new GID
  77. *
  78. * Delete requires different set of operations:
  79. * (a) Find the GID
  80. * (b) Delete it.
  81. *
  82. **/
  83. /* Any writer to data_vec must hold this lock and the write side of
  84. * rwlock. readers must hold only rwlock. All writers must be in a
  85. * sleepable context.
  86. */
  87. struct mutex lock;
  88. /* rwlock protects data_vec[ix]->props. */
  89. rwlock_t rwlock;
  90. struct ib_gid_table_entry *data_vec;
  91. };
  92. static void dispatch_gid_change_event(struct ib_device *ib_dev, u8 port)
  93. {
  94. struct ib_event event;
  95. event.device = ib_dev;
  96. event.element.port_num = port;
  97. event.event = IB_EVENT_GID_CHANGE;
  98. ib_dispatch_event(&event);
  99. }
  100. static const char * const gid_type_str[] = {
  101. [IB_GID_TYPE_IB] = "IB/RoCE v1",
  102. [IB_GID_TYPE_ROCE_UDP_ENCAP] = "RoCE v2",
  103. };
  104. const char *ib_cache_gid_type_str(enum ib_gid_type gid_type)
  105. {
  106. if (gid_type < ARRAY_SIZE(gid_type_str) && gid_type_str[gid_type])
  107. return gid_type_str[gid_type];
  108. return "Invalid GID type";
  109. }
  110. EXPORT_SYMBOL(ib_cache_gid_type_str);
  111. int ib_cache_gid_parse_type_str(const char *buf)
  112. {
  113. unsigned int i;
  114. size_t len;
  115. int err = -EINVAL;
  116. len = strlen(buf);
  117. if (len == 0)
  118. return -EINVAL;
  119. if (buf[len - 1] == '\n')
  120. len--;
  121. for (i = 0; i < ARRAY_SIZE(gid_type_str); ++i)
  122. if (gid_type_str[i] && !strncmp(buf, gid_type_str[i], len) &&
  123. len == strlen(gid_type_str[i])) {
  124. err = i;
  125. break;
  126. }
  127. return err;
  128. }
  129. EXPORT_SYMBOL(ib_cache_gid_parse_type_str);
  130. static void del_roce_gid(struct ib_device *device, u8 port_num,
  131. struct ib_gid_table *table, int ix)
  132. {
  133. pr_debug("%s device=%s port=%d index=%d gid %pI6\n", __func__,
  134. device->name, port_num, ix,
  135. table->data_vec[ix].gid.raw);
  136. if (rdma_cap_roce_gid_table(device, port_num))
  137. device->del_gid(&table->data_vec[ix].attr,
  138. &table->data_vec[ix].context);
  139. dev_put(table->data_vec[ix].attr.ndev);
  140. }
  141. static int add_roce_gid(struct ib_gid_table *table,
  142. const union ib_gid *gid,
  143. const struct ib_gid_attr *attr)
  144. {
  145. struct ib_gid_table_entry *entry;
  146. int ix = attr->index;
  147. int ret = 0;
  148. if (!attr->ndev) {
  149. pr_err("%s NULL netdev device=%s port=%d index=%d\n",
  150. __func__, attr->device->name, attr->port_num,
  151. attr->index);
  152. return -EINVAL;
  153. }
  154. entry = &table->data_vec[ix];
  155. if ((entry->props & GID_TABLE_ENTRY_INVALID) == 0) {
  156. WARN(1, "GID table corruption device=%s port=%d index=%d\n",
  157. attr->device->name, attr->port_num,
  158. attr->index);
  159. return -EINVAL;
  160. }
  161. if (rdma_cap_roce_gid_table(attr->device, attr->port_num)) {
  162. ret = attr->device->add_gid(gid, attr, &entry->context);
  163. if (ret) {
  164. pr_err("%s GID add failed device=%s port=%d index=%d\n",
  165. __func__, attr->device->name, attr->port_num,
  166. attr->index);
  167. goto add_err;
  168. }
  169. }
  170. dev_hold(attr->ndev);
  171. add_err:
  172. if (!ret)
  173. pr_debug("%s device=%s port=%d index=%d gid %pI6\n", __func__,
  174. attr->device->name, attr->port_num, ix, gid->raw);
  175. return ret;
  176. }
  177. /**
  178. * add_modify_gid - Add or modify GID table entry
  179. *
  180. * @table: GID table in which GID to be added or modified
  181. * @gid: GID content
  182. * @attr: Attributes of the GID
  183. *
  184. * Returns 0 on success or appropriate error code. It accepts zero
  185. * GID addition for non RoCE ports for HCA's who report them as valid
  186. * GID. However such zero GIDs are not added to the cache.
  187. */
  188. static int add_modify_gid(struct ib_gid_table *table,
  189. const union ib_gid *gid,
  190. const struct ib_gid_attr *attr)
  191. {
  192. int ret;
  193. if (rdma_protocol_roce(attr->device, attr->port_num)) {
  194. ret = add_roce_gid(table, gid, attr);
  195. if (ret)
  196. return ret;
  197. } else {
  198. /*
  199. * Some HCA's report multiple GID entries with only one
  200. * valid GID, but remaining as zero GID.
  201. * So ignore such behavior for IB link layer and don't
  202. * fail the call, but don't add such entry to GID cache.
  203. */
  204. if (!memcmp(gid, &zgid, sizeof(*gid)))
  205. return 0;
  206. }
  207. lockdep_assert_held(&table->lock);
  208. memcpy(&table->data_vec[attr->index].gid, gid, sizeof(*gid));
  209. memcpy(&table->data_vec[attr->index].attr, attr, sizeof(*attr));
  210. write_lock_irq(&table->rwlock);
  211. table->data_vec[attr->index].props &= ~GID_TABLE_ENTRY_INVALID;
  212. write_unlock_irq(&table->rwlock);
  213. return 0;
  214. }
  215. /**
  216. * del_gid - Delete GID table entry
  217. *
  218. * @ib_dev: IB device whose GID entry to be deleted
  219. * @port: Port number of the IB device
  220. * @table: GID table of the IB device for a port
  221. * @ix: GID entry index to delete
  222. *
  223. */
  224. static void del_gid(struct ib_device *ib_dev, u8 port,
  225. struct ib_gid_table *table, int ix)
  226. {
  227. lockdep_assert_held(&table->lock);
  228. write_lock_irq(&table->rwlock);
  229. table->data_vec[ix].props |= GID_TABLE_ENTRY_INVALID;
  230. write_unlock_irq(&table->rwlock);
  231. if (rdma_protocol_roce(ib_dev, port))
  232. del_roce_gid(ib_dev, port, table, ix);
  233. memcpy(&table->data_vec[ix].gid, &zgid, sizeof(zgid));
  234. memset(&table->data_vec[ix].attr, 0, sizeof(table->data_vec[ix].attr));
  235. table->data_vec[ix].context = NULL;
  236. }
  237. /* rwlock should be read locked, or lock should be held */
  238. static int find_gid(struct ib_gid_table *table, const union ib_gid *gid,
  239. const struct ib_gid_attr *val, bool default_gid,
  240. unsigned long mask, int *pempty)
  241. {
  242. int i = 0;
  243. int found = -1;
  244. int empty = pempty ? -1 : 0;
  245. while (i < table->sz && (found < 0 || empty < 0)) {
  246. struct ib_gid_table_entry *data = &table->data_vec[i];
  247. struct ib_gid_attr *attr = &data->attr;
  248. int curr_index = i;
  249. i++;
  250. /* find_gid() is used during GID addition where it is expected
  251. * to return a free entry slot which is not duplicate.
  252. * Free entry slot is requested and returned if pempty is set,
  253. * so lookup free slot only if requested.
  254. */
  255. if (pempty && empty < 0) {
  256. if (data->props & GID_TABLE_ENTRY_INVALID) {
  257. /* Found an invalid (free) entry; allocate it */
  258. if (data->props & GID_TABLE_ENTRY_DEFAULT) {
  259. if (default_gid)
  260. empty = curr_index;
  261. } else {
  262. empty = curr_index;
  263. }
  264. }
  265. }
  266. /*
  267. * Additionally find_gid() is used to find valid entry during
  268. * lookup operation, where validity needs to be checked. So
  269. * find the empty entry first to continue to search for a free
  270. * slot and ignore its INVALID flag.
  271. */
  272. if (data->props & GID_TABLE_ENTRY_INVALID)
  273. continue;
  274. if (found >= 0)
  275. continue;
  276. if (mask & GID_ATTR_FIND_MASK_GID_TYPE &&
  277. attr->gid_type != val->gid_type)
  278. continue;
  279. if (mask & GID_ATTR_FIND_MASK_GID &&
  280. memcmp(gid, &data->gid, sizeof(*gid)))
  281. continue;
  282. if (mask & GID_ATTR_FIND_MASK_NETDEV &&
  283. attr->ndev != val->ndev)
  284. continue;
  285. if (mask & GID_ATTR_FIND_MASK_DEFAULT &&
  286. !!(data->props & GID_TABLE_ENTRY_DEFAULT) !=
  287. default_gid)
  288. continue;
  289. found = curr_index;
  290. }
  291. if (pempty)
  292. *pempty = empty;
  293. return found;
  294. }
  295. static void make_default_gid(struct net_device *dev, union ib_gid *gid)
  296. {
  297. gid->global.subnet_prefix = cpu_to_be64(0xfe80000000000000LL);
  298. addrconf_ifid_eui48(&gid->raw[8], dev);
  299. }
  300. static int __ib_cache_gid_add(struct ib_device *ib_dev, u8 port,
  301. union ib_gid *gid, struct ib_gid_attr *attr,
  302. unsigned long mask, bool default_gid)
  303. {
  304. struct ib_gid_table *table;
  305. int ret = 0;
  306. int empty;
  307. int ix;
  308. /* Do not allow adding zero GID in support of
  309. * IB spec version 1.3 section 4.1.1 point (6) and
  310. * section 12.7.10 and section 12.7.20
  311. */
  312. if (!memcmp(gid, &zgid, sizeof(*gid)))
  313. return -EINVAL;
  314. table = ib_dev->cache.ports[port - rdma_start_port(ib_dev)].gid;
  315. mutex_lock(&table->lock);
  316. ix = find_gid(table, gid, attr, default_gid, mask, &empty);
  317. if (ix >= 0)
  318. goto out_unlock;
  319. if (empty < 0) {
  320. ret = -ENOSPC;
  321. goto out_unlock;
  322. }
  323. attr->device = ib_dev;
  324. attr->index = empty;
  325. attr->port_num = port;
  326. ret = add_modify_gid(table, gid, attr);
  327. if (!ret)
  328. dispatch_gid_change_event(ib_dev, port);
  329. out_unlock:
  330. mutex_unlock(&table->lock);
  331. if (ret)
  332. pr_warn("%s: unable to add gid %pI6 error=%d\n",
  333. __func__, gid->raw, ret);
  334. return ret;
  335. }
  336. int ib_cache_gid_add(struct ib_device *ib_dev, u8 port,
  337. union ib_gid *gid, struct ib_gid_attr *attr)
  338. {
  339. struct net_device *idev;
  340. unsigned long mask;
  341. int ret;
  342. if (ib_dev->get_netdev) {
  343. idev = ib_dev->get_netdev(ib_dev, port);
  344. if (idev && attr->ndev != idev) {
  345. union ib_gid default_gid;
  346. /* Adding default GIDs in not permitted */
  347. make_default_gid(idev, &default_gid);
  348. if (!memcmp(gid, &default_gid, sizeof(*gid))) {
  349. dev_put(idev);
  350. return -EPERM;
  351. }
  352. }
  353. if (idev)
  354. dev_put(idev);
  355. }
  356. mask = GID_ATTR_FIND_MASK_GID |
  357. GID_ATTR_FIND_MASK_GID_TYPE |
  358. GID_ATTR_FIND_MASK_NETDEV;
  359. ret = __ib_cache_gid_add(ib_dev, port, gid, attr, mask, false);
  360. return ret;
  361. }
  362. int ib_cache_gid_del(struct ib_device *ib_dev, u8 port,
  363. union ib_gid *gid, struct ib_gid_attr *attr)
  364. {
  365. struct ib_gid_table *table;
  366. int ret = 0;
  367. int ix;
  368. table = ib_dev->cache.ports[port - rdma_start_port(ib_dev)].gid;
  369. mutex_lock(&table->lock);
  370. ix = find_gid(table, gid, attr, false,
  371. GID_ATTR_FIND_MASK_GID |
  372. GID_ATTR_FIND_MASK_GID_TYPE |
  373. GID_ATTR_FIND_MASK_NETDEV,
  374. NULL);
  375. if (ix < 0) {
  376. ret = -EINVAL;
  377. goto out_unlock;
  378. }
  379. del_gid(ib_dev, port, table, ix);
  380. dispatch_gid_change_event(ib_dev, port);
  381. out_unlock:
  382. mutex_unlock(&table->lock);
  383. if (ret)
  384. pr_debug("%s: can't delete gid %pI6 error=%d\n",
  385. __func__, gid->raw, ret);
  386. return ret;
  387. }
  388. int ib_cache_gid_del_all_netdev_gids(struct ib_device *ib_dev, u8 port,
  389. struct net_device *ndev)
  390. {
  391. struct ib_gid_table *table;
  392. int ix;
  393. bool deleted = false;
  394. table = ib_dev->cache.ports[port - rdma_start_port(ib_dev)].gid;
  395. mutex_lock(&table->lock);
  396. for (ix = 0; ix < table->sz; ix++) {
  397. if (table->data_vec[ix].attr.ndev == ndev) {
  398. del_gid(ib_dev, port, table, ix);
  399. deleted = true;
  400. }
  401. }
  402. mutex_unlock(&table->lock);
  403. if (deleted)
  404. dispatch_gid_change_event(ib_dev, port);
  405. return 0;
  406. }
  407. static int __ib_cache_gid_get(struct ib_device *ib_dev, u8 port, int index,
  408. union ib_gid *gid, struct ib_gid_attr *attr)
  409. {
  410. struct ib_gid_table *table;
  411. table = ib_dev->cache.ports[port - rdma_start_port(ib_dev)].gid;
  412. if (index < 0 || index >= table->sz)
  413. return -EINVAL;
  414. if (table->data_vec[index].props & GID_TABLE_ENTRY_INVALID)
  415. return -EAGAIN;
  416. memcpy(gid, &table->data_vec[index].gid, sizeof(*gid));
  417. if (attr) {
  418. memcpy(attr, &table->data_vec[index].attr, sizeof(*attr));
  419. if (attr->ndev)
  420. dev_hold(attr->ndev);
  421. }
  422. return 0;
  423. }
  424. static int _ib_cache_gid_table_find(struct ib_device *ib_dev,
  425. const union ib_gid *gid,
  426. const struct ib_gid_attr *val,
  427. unsigned long mask,
  428. u8 *port, u16 *index)
  429. {
  430. struct ib_gid_table *table;
  431. u8 p;
  432. int local_index;
  433. unsigned long flags;
  434. for (p = 0; p < ib_dev->phys_port_cnt; p++) {
  435. table = ib_dev->cache.ports[p].gid;
  436. read_lock_irqsave(&table->rwlock, flags);
  437. local_index = find_gid(table, gid, val, false, mask, NULL);
  438. if (local_index >= 0) {
  439. if (index)
  440. *index = local_index;
  441. if (port)
  442. *port = p + rdma_start_port(ib_dev);
  443. read_unlock_irqrestore(&table->rwlock, flags);
  444. return 0;
  445. }
  446. read_unlock_irqrestore(&table->rwlock, flags);
  447. }
  448. return -ENOENT;
  449. }
  450. static int ib_cache_gid_find(struct ib_device *ib_dev,
  451. const union ib_gid *gid,
  452. enum ib_gid_type gid_type,
  453. struct net_device *ndev, u8 *port,
  454. u16 *index)
  455. {
  456. unsigned long mask = GID_ATTR_FIND_MASK_GID |
  457. GID_ATTR_FIND_MASK_GID_TYPE;
  458. struct ib_gid_attr gid_attr_val = {.ndev = ndev, .gid_type = gid_type};
  459. if (ndev)
  460. mask |= GID_ATTR_FIND_MASK_NETDEV;
  461. return _ib_cache_gid_table_find(ib_dev, gid, &gid_attr_val,
  462. mask, port, index);
  463. }
  464. /**
  465. * ib_find_cached_gid_by_port - Returns the GID table index where a specified
  466. * GID value occurs. It searches for the specified GID value in the local
  467. * software cache.
  468. * @device: The device to query.
  469. * @gid: The GID value to search for.
  470. * @gid_type: The GID type to search for.
  471. * @port_num: The port number of the device where the GID value should be
  472. * searched.
  473. * @ndev: In RoCE, the net device of the device. Null means ignore.
  474. * @index: The index into the cached GID table where the GID was found. This
  475. * parameter may be NULL.
  476. */
  477. int ib_find_cached_gid_by_port(struct ib_device *ib_dev,
  478. const union ib_gid *gid,
  479. enum ib_gid_type gid_type,
  480. u8 port, struct net_device *ndev,
  481. u16 *index)
  482. {
  483. int local_index;
  484. struct ib_gid_table *table;
  485. unsigned long mask = GID_ATTR_FIND_MASK_GID |
  486. GID_ATTR_FIND_MASK_GID_TYPE;
  487. struct ib_gid_attr val = {.ndev = ndev, .gid_type = gid_type};
  488. unsigned long flags;
  489. if (!rdma_is_port_valid(ib_dev, port))
  490. return -ENOENT;
  491. table = ib_dev->cache.ports[port - rdma_start_port(ib_dev)].gid;
  492. if (ndev)
  493. mask |= GID_ATTR_FIND_MASK_NETDEV;
  494. read_lock_irqsave(&table->rwlock, flags);
  495. local_index = find_gid(table, gid, &val, false, mask, NULL);
  496. if (local_index >= 0) {
  497. if (index)
  498. *index = local_index;
  499. read_unlock_irqrestore(&table->rwlock, flags);
  500. return 0;
  501. }
  502. read_unlock_irqrestore(&table->rwlock, flags);
  503. return -ENOENT;
  504. }
  505. EXPORT_SYMBOL(ib_find_cached_gid_by_port);
  506. /**
  507. * ib_cache_gid_find_by_filter - Returns the GID table index where a specified
  508. * GID value occurs
  509. * @device: The device to query.
  510. * @gid: The GID value to search for.
  511. * @port_num: The port number of the device where the GID value could be
  512. * searched.
  513. * @filter: The filter function is executed on any matching GID in the table.
  514. * If the filter function returns true, the corresponding index is returned,
  515. * otherwise, we continue searching the GID table. It's guaranteed that
  516. * while filter is executed, ndev field is valid and the structure won't
  517. * change. filter is executed in an atomic context. filter must not be NULL.
  518. * @index: The index into the cached GID table where the GID was found. This
  519. * parameter may be NULL.
  520. *
  521. * ib_cache_gid_find_by_filter() searches for the specified GID value
  522. * of which the filter function returns true in the port's GID table.
  523. * This function is only supported on RoCE ports.
  524. *
  525. */
  526. static int ib_cache_gid_find_by_filter(struct ib_device *ib_dev,
  527. const union ib_gid *gid,
  528. u8 port,
  529. bool (*filter)(const union ib_gid *,
  530. const struct ib_gid_attr *,
  531. void *),
  532. void *context,
  533. u16 *index)
  534. {
  535. struct ib_gid_table *table;
  536. unsigned int i;
  537. unsigned long flags;
  538. bool found = false;
  539. if (!rdma_is_port_valid(ib_dev, port) ||
  540. !rdma_protocol_roce(ib_dev, port))
  541. return -EPROTONOSUPPORT;
  542. table = ib_dev->cache.ports[port - rdma_start_port(ib_dev)].gid;
  543. read_lock_irqsave(&table->rwlock, flags);
  544. for (i = 0; i < table->sz; i++) {
  545. struct ib_gid_attr attr;
  546. if (table->data_vec[i].props & GID_TABLE_ENTRY_INVALID)
  547. continue;
  548. if (memcmp(gid, &table->data_vec[i].gid, sizeof(*gid)))
  549. continue;
  550. memcpy(&attr, &table->data_vec[i].attr, sizeof(attr));
  551. if (filter(gid, &attr, context)) {
  552. found = true;
  553. if (index)
  554. *index = i;
  555. break;
  556. }
  557. }
  558. read_unlock_irqrestore(&table->rwlock, flags);
  559. if (!found)
  560. return -ENOENT;
  561. return 0;
  562. }
  563. static struct ib_gid_table *alloc_gid_table(int sz)
  564. {
  565. struct ib_gid_table *table =
  566. kzalloc(sizeof(struct ib_gid_table), GFP_KERNEL);
  567. int i;
  568. if (!table)
  569. return NULL;
  570. table->data_vec = kcalloc(sz, sizeof(*table->data_vec), GFP_KERNEL);
  571. if (!table->data_vec)
  572. goto err_free_table;
  573. mutex_init(&table->lock);
  574. table->sz = sz;
  575. rwlock_init(&table->rwlock);
  576. /* Mark all entries as invalid so that allocator can allocate
  577. * one of the invalid (free) entry.
  578. */
  579. for (i = 0; i < sz; i++)
  580. table->data_vec[i].props |= GID_TABLE_ENTRY_INVALID;
  581. return table;
  582. err_free_table:
  583. kfree(table);
  584. return NULL;
  585. }
  586. static void release_gid_table(struct ib_gid_table *table)
  587. {
  588. if (table) {
  589. kfree(table->data_vec);
  590. kfree(table);
  591. }
  592. }
  593. static void cleanup_gid_table_port(struct ib_device *ib_dev, u8 port,
  594. struct ib_gid_table *table)
  595. {
  596. int i;
  597. bool deleted = false;
  598. if (!table)
  599. return;
  600. mutex_lock(&table->lock);
  601. for (i = 0; i < table->sz; ++i) {
  602. if (memcmp(&table->data_vec[i].gid, &zgid,
  603. sizeof(table->data_vec[i].gid))) {
  604. del_gid(ib_dev, port, table, i);
  605. deleted = true;
  606. }
  607. }
  608. mutex_unlock(&table->lock);
  609. if (deleted)
  610. dispatch_gid_change_event(ib_dev, port);
  611. }
  612. void ib_cache_gid_set_default_gid(struct ib_device *ib_dev, u8 port,
  613. struct net_device *ndev,
  614. unsigned long gid_type_mask,
  615. enum ib_cache_gid_default_mode mode)
  616. {
  617. union ib_gid gid;
  618. struct ib_gid_attr gid_attr;
  619. struct ib_gid_table *table;
  620. unsigned int gid_type;
  621. unsigned long mask;
  622. table = ib_dev->cache.ports[port - rdma_start_port(ib_dev)].gid;
  623. make_default_gid(ndev, &gid);
  624. memset(&gid_attr, 0, sizeof(gid_attr));
  625. gid_attr.ndev = ndev;
  626. for (gid_type = 0; gid_type < IB_GID_TYPE_SIZE; ++gid_type) {
  627. if (1UL << gid_type & ~gid_type_mask)
  628. continue;
  629. gid_attr.gid_type = gid_type;
  630. if (mode == IB_CACHE_GID_DEFAULT_MODE_SET) {
  631. mask = GID_ATTR_FIND_MASK_GID_TYPE |
  632. GID_ATTR_FIND_MASK_DEFAULT;
  633. __ib_cache_gid_add(ib_dev, port, &gid,
  634. &gid_attr, mask, true);
  635. } else if (mode == IB_CACHE_GID_DEFAULT_MODE_DELETE) {
  636. ib_cache_gid_del(ib_dev, port, &gid, &gid_attr);
  637. }
  638. }
  639. }
  640. static int gid_table_reserve_default(struct ib_device *ib_dev, u8 port,
  641. struct ib_gid_table *table)
  642. {
  643. unsigned int i;
  644. unsigned long roce_gid_type_mask;
  645. unsigned int num_default_gids;
  646. unsigned int current_gid = 0;
  647. roce_gid_type_mask = roce_gid_type_mask_support(ib_dev, port);
  648. num_default_gids = hweight_long(roce_gid_type_mask);
  649. for (i = 0; i < num_default_gids && i < table->sz; i++) {
  650. struct ib_gid_table_entry *entry =
  651. &table->data_vec[i];
  652. entry->props |= GID_TABLE_ENTRY_DEFAULT;
  653. current_gid = find_next_bit(&roce_gid_type_mask,
  654. BITS_PER_LONG,
  655. current_gid);
  656. entry->attr.gid_type = current_gid++;
  657. }
  658. return 0;
  659. }
  660. static int _gid_table_setup_one(struct ib_device *ib_dev)
  661. {
  662. u8 port;
  663. struct ib_gid_table *table;
  664. int err = 0;
  665. for (port = 0; port < ib_dev->phys_port_cnt; port++) {
  666. u8 rdma_port = port + rdma_start_port(ib_dev);
  667. table =
  668. alloc_gid_table(
  669. ib_dev->port_immutable[rdma_port].gid_tbl_len);
  670. if (!table) {
  671. err = -ENOMEM;
  672. goto rollback_table_setup;
  673. }
  674. err = gid_table_reserve_default(ib_dev,
  675. port + rdma_start_port(ib_dev),
  676. table);
  677. if (err)
  678. goto rollback_table_setup;
  679. ib_dev->cache.ports[port].gid = table;
  680. }
  681. return 0;
  682. rollback_table_setup:
  683. for (port = 0; port < ib_dev->phys_port_cnt; port++) {
  684. table = ib_dev->cache.ports[port].gid;
  685. cleanup_gid_table_port(ib_dev, port + rdma_start_port(ib_dev),
  686. table);
  687. release_gid_table(table);
  688. }
  689. return err;
  690. }
  691. static void gid_table_release_one(struct ib_device *ib_dev)
  692. {
  693. struct ib_gid_table *table;
  694. u8 port;
  695. for (port = 0; port < ib_dev->phys_port_cnt; port++) {
  696. table = ib_dev->cache.ports[port].gid;
  697. release_gid_table(table);
  698. ib_dev->cache.ports[port].gid = NULL;
  699. }
  700. }
  701. static void gid_table_cleanup_one(struct ib_device *ib_dev)
  702. {
  703. struct ib_gid_table *table;
  704. u8 port;
  705. for (port = 0; port < ib_dev->phys_port_cnt; port++) {
  706. table = ib_dev->cache.ports[port].gid;
  707. cleanup_gid_table_port(ib_dev, port + rdma_start_port(ib_dev),
  708. table);
  709. }
  710. }
  711. static int gid_table_setup_one(struct ib_device *ib_dev)
  712. {
  713. int err;
  714. err = _gid_table_setup_one(ib_dev);
  715. if (err)
  716. return err;
  717. rdma_roce_rescan_device(ib_dev);
  718. return err;
  719. }
  720. int ib_get_cached_gid(struct ib_device *device,
  721. u8 port_num,
  722. int index,
  723. union ib_gid *gid,
  724. struct ib_gid_attr *gid_attr)
  725. {
  726. int res;
  727. unsigned long flags;
  728. struct ib_gid_table *table;
  729. if (!rdma_is_port_valid(device, port_num))
  730. return -EINVAL;
  731. table = device->cache.ports[port_num - rdma_start_port(device)].gid;
  732. read_lock_irqsave(&table->rwlock, flags);
  733. res = __ib_cache_gid_get(device, port_num, index, gid, gid_attr);
  734. read_unlock_irqrestore(&table->rwlock, flags);
  735. return res;
  736. }
  737. EXPORT_SYMBOL(ib_get_cached_gid);
  738. /**
  739. * ib_find_cached_gid - Returns the port number and GID table index where
  740. * a specified GID value occurs.
  741. * @device: The device to query.
  742. * @gid: The GID value to search for.
  743. * @gid_type: The GID type to search for.
  744. * @ndev: In RoCE, the net device of the device. NULL means ignore.
  745. * @port_num: The port number of the device where the GID value was found.
  746. * @index: The index into the cached GID table where the GID was found. This
  747. * parameter may be NULL.
  748. *
  749. * ib_find_cached_gid() searches for the specified GID value in
  750. * the local software cache.
  751. */
  752. int ib_find_cached_gid(struct ib_device *device,
  753. const union ib_gid *gid,
  754. enum ib_gid_type gid_type,
  755. struct net_device *ndev,
  756. u8 *port_num,
  757. u16 *index)
  758. {
  759. return ib_cache_gid_find(device, gid, gid_type, ndev, port_num, index);
  760. }
  761. EXPORT_SYMBOL(ib_find_cached_gid);
  762. int ib_find_gid_by_filter(struct ib_device *device,
  763. const union ib_gid *gid,
  764. u8 port_num,
  765. bool (*filter)(const union ib_gid *gid,
  766. const struct ib_gid_attr *,
  767. void *),
  768. void *context, u16 *index)
  769. {
  770. /* Only RoCE GID table supports filter function */
  771. if (!rdma_protocol_roce(device, port_num) && filter)
  772. return -EPROTONOSUPPORT;
  773. return ib_cache_gid_find_by_filter(device, gid,
  774. port_num, filter,
  775. context, index);
  776. }
  777. int ib_get_cached_pkey(struct ib_device *device,
  778. u8 port_num,
  779. int index,
  780. u16 *pkey)
  781. {
  782. struct ib_pkey_cache *cache;
  783. unsigned long flags;
  784. int ret = 0;
  785. if (!rdma_is_port_valid(device, port_num))
  786. return -EINVAL;
  787. read_lock_irqsave(&device->cache.lock, flags);
  788. cache = device->cache.ports[port_num - rdma_start_port(device)].pkey;
  789. if (index < 0 || index >= cache->table_len)
  790. ret = -EINVAL;
  791. else
  792. *pkey = cache->table[index];
  793. read_unlock_irqrestore(&device->cache.lock, flags);
  794. return ret;
  795. }
  796. EXPORT_SYMBOL(ib_get_cached_pkey);
  797. int ib_get_cached_subnet_prefix(struct ib_device *device,
  798. u8 port_num,
  799. u64 *sn_pfx)
  800. {
  801. unsigned long flags;
  802. int p;
  803. if (!rdma_is_port_valid(device, port_num))
  804. return -EINVAL;
  805. p = port_num - rdma_start_port(device);
  806. read_lock_irqsave(&device->cache.lock, flags);
  807. *sn_pfx = device->cache.ports[p].subnet_prefix;
  808. read_unlock_irqrestore(&device->cache.lock, flags);
  809. return 0;
  810. }
  811. EXPORT_SYMBOL(ib_get_cached_subnet_prefix);
  812. int ib_find_cached_pkey(struct ib_device *device,
  813. u8 port_num,
  814. u16 pkey,
  815. u16 *index)
  816. {
  817. struct ib_pkey_cache *cache;
  818. unsigned long flags;
  819. int i;
  820. int ret = -ENOENT;
  821. int partial_ix = -1;
  822. if (!rdma_is_port_valid(device, port_num))
  823. return -EINVAL;
  824. read_lock_irqsave(&device->cache.lock, flags);
  825. cache = device->cache.ports[port_num - rdma_start_port(device)].pkey;
  826. *index = -1;
  827. for (i = 0; i < cache->table_len; ++i)
  828. if ((cache->table[i] & 0x7fff) == (pkey & 0x7fff)) {
  829. if (cache->table[i] & 0x8000) {
  830. *index = i;
  831. ret = 0;
  832. break;
  833. } else
  834. partial_ix = i;
  835. }
  836. if (ret && partial_ix >= 0) {
  837. *index = partial_ix;
  838. ret = 0;
  839. }
  840. read_unlock_irqrestore(&device->cache.lock, flags);
  841. return ret;
  842. }
  843. EXPORT_SYMBOL(ib_find_cached_pkey);
  844. int ib_find_exact_cached_pkey(struct ib_device *device,
  845. u8 port_num,
  846. u16 pkey,
  847. u16 *index)
  848. {
  849. struct ib_pkey_cache *cache;
  850. unsigned long flags;
  851. int i;
  852. int ret = -ENOENT;
  853. if (!rdma_is_port_valid(device, port_num))
  854. return -EINVAL;
  855. read_lock_irqsave(&device->cache.lock, flags);
  856. cache = device->cache.ports[port_num - rdma_start_port(device)].pkey;
  857. *index = -1;
  858. for (i = 0; i < cache->table_len; ++i)
  859. if (cache->table[i] == pkey) {
  860. *index = i;
  861. ret = 0;
  862. break;
  863. }
  864. read_unlock_irqrestore(&device->cache.lock, flags);
  865. return ret;
  866. }
  867. EXPORT_SYMBOL(ib_find_exact_cached_pkey);
  868. int ib_get_cached_lmc(struct ib_device *device,
  869. u8 port_num,
  870. u8 *lmc)
  871. {
  872. unsigned long flags;
  873. int ret = 0;
  874. if (!rdma_is_port_valid(device, port_num))
  875. return -EINVAL;
  876. read_lock_irqsave(&device->cache.lock, flags);
  877. *lmc = device->cache.ports[port_num - rdma_start_port(device)].lmc;
  878. read_unlock_irqrestore(&device->cache.lock, flags);
  879. return ret;
  880. }
  881. EXPORT_SYMBOL(ib_get_cached_lmc);
  882. int ib_get_cached_port_state(struct ib_device *device,
  883. u8 port_num,
  884. enum ib_port_state *port_state)
  885. {
  886. unsigned long flags;
  887. int ret = 0;
  888. if (!rdma_is_port_valid(device, port_num))
  889. return -EINVAL;
  890. read_lock_irqsave(&device->cache.lock, flags);
  891. *port_state = device->cache.ports[port_num
  892. - rdma_start_port(device)].port_state;
  893. read_unlock_irqrestore(&device->cache.lock, flags);
  894. return ret;
  895. }
  896. EXPORT_SYMBOL(ib_get_cached_port_state);
  897. static int config_non_roce_gid_cache(struct ib_device *device,
  898. u8 port, int gid_tbl_len)
  899. {
  900. struct ib_gid_attr gid_attr = {};
  901. struct ib_gid_table *table;
  902. union ib_gid gid;
  903. int ret = 0;
  904. int i;
  905. gid_attr.device = device;
  906. gid_attr.port_num = port;
  907. table = device->cache.ports[port - rdma_start_port(device)].gid;
  908. mutex_lock(&table->lock);
  909. for (i = 0; i < gid_tbl_len; ++i) {
  910. if (!device->query_gid)
  911. continue;
  912. ret = device->query_gid(device, port, i, &gid);
  913. if (ret) {
  914. pr_warn("query_gid failed (%d) for %s (index %d)\n",
  915. ret, device->name, i);
  916. goto err;
  917. }
  918. gid_attr.index = i;
  919. add_modify_gid(table, &gid, &gid_attr);
  920. }
  921. err:
  922. mutex_unlock(&table->lock);
  923. return ret;
  924. }
  925. static void ib_cache_update(struct ib_device *device,
  926. u8 port,
  927. bool enforce_security)
  928. {
  929. struct ib_port_attr *tprops = NULL;
  930. struct ib_pkey_cache *pkey_cache = NULL, *old_pkey_cache;
  931. int i;
  932. int ret;
  933. struct ib_gid_table *table;
  934. if (!rdma_is_port_valid(device, port))
  935. return;
  936. table = device->cache.ports[port - rdma_start_port(device)].gid;
  937. tprops = kmalloc(sizeof *tprops, GFP_KERNEL);
  938. if (!tprops)
  939. return;
  940. ret = ib_query_port(device, port, tprops);
  941. if (ret) {
  942. pr_warn("ib_query_port failed (%d) for %s\n",
  943. ret, device->name);
  944. goto err;
  945. }
  946. if (!rdma_protocol_roce(device, port)) {
  947. ret = config_non_roce_gid_cache(device, port,
  948. tprops->gid_tbl_len);
  949. if (ret)
  950. goto err;
  951. }
  952. pkey_cache = kmalloc(sizeof *pkey_cache + tprops->pkey_tbl_len *
  953. sizeof *pkey_cache->table, GFP_KERNEL);
  954. if (!pkey_cache)
  955. goto err;
  956. pkey_cache->table_len = tprops->pkey_tbl_len;
  957. for (i = 0; i < pkey_cache->table_len; ++i) {
  958. ret = ib_query_pkey(device, port, i, pkey_cache->table + i);
  959. if (ret) {
  960. pr_warn("ib_query_pkey failed (%d) for %s (index %d)\n",
  961. ret, device->name, i);
  962. goto err;
  963. }
  964. }
  965. write_lock_irq(&device->cache.lock);
  966. old_pkey_cache = device->cache.ports[port -
  967. rdma_start_port(device)].pkey;
  968. device->cache.ports[port - rdma_start_port(device)].pkey = pkey_cache;
  969. device->cache.ports[port - rdma_start_port(device)].lmc = tprops->lmc;
  970. device->cache.ports[port - rdma_start_port(device)].port_state =
  971. tprops->state;
  972. device->cache.ports[port - rdma_start_port(device)].subnet_prefix =
  973. tprops->subnet_prefix;
  974. write_unlock_irq(&device->cache.lock);
  975. if (enforce_security)
  976. ib_security_cache_change(device,
  977. port,
  978. tprops->subnet_prefix);
  979. kfree(old_pkey_cache);
  980. kfree(tprops);
  981. return;
  982. err:
  983. kfree(pkey_cache);
  984. kfree(tprops);
  985. }
  986. static void ib_cache_task(struct work_struct *_work)
  987. {
  988. struct ib_update_work *work =
  989. container_of(_work, struct ib_update_work, work);
  990. ib_cache_update(work->device,
  991. work->port_num,
  992. work->enforce_security);
  993. kfree(work);
  994. }
  995. static void ib_cache_event(struct ib_event_handler *handler,
  996. struct ib_event *event)
  997. {
  998. struct ib_update_work *work;
  999. if (event->event == IB_EVENT_PORT_ERR ||
  1000. event->event == IB_EVENT_PORT_ACTIVE ||
  1001. event->event == IB_EVENT_LID_CHANGE ||
  1002. event->event == IB_EVENT_PKEY_CHANGE ||
  1003. event->event == IB_EVENT_SM_CHANGE ||
  1004. event->event == IB_EVENT_CLIENT_REREGISTER ||
  1005. event->event == IB_EVENT_GID_CHANGE) {
  1006. work = kmalloc(sizeof *work, GFP_ATOMIC);
  1007. if (work) {
  1008. INIT_WORK(&work->work, ib_cache_task);
  1009. work->device = event->device;
  1010. work->port_num = event->element.port_num;
  1011. if (event->event == IB_EVENT_PKEY_CHANGE ||
  1012. event->event == IB_EVENT_GID_CHANGE)
  1013. work->enforce_security = true;
  1014. else
  1015. work->enforce_security = false;
  1016. queue_work(ib_wq, &work->work);
  1017. }
  1018. }
  1019. }
  1020. int ib_cache_setup_one(struct ib_device *device)
  1021. {
  1022. int p;
  1023. int err;
  1024. rwlock_init(&device->cache.lock);
  1025. device->cache.ports =
  1026. kzalloc(sizeof(*device->cache.ports) *
  1027. (rdma_end_port(device) - rdma_start_port(device) + 1), GFP_KERNEL);
  1028. if (!device->cache.ports)
  1029. return -ENOMEM;
  1030. err = gid_table_setup_one(device);
  1031. if (err) {
  1032. kfree(device->cache.ports);
  1033. device->cache.ports = NULL;
  1034. return err;
  1035. }
  1036. for (p = 0; p <= rdma_end_port(device) - rdma_start_port(device); ++p)
  1037. ib_cache_update(device, p + rdma_start_port(device), true);
  1038. INIT_IB_EVENT_HANDLER(&device->cache.event_handler,
  1039. device, ib_cache_event);
  1040. ib_register_event_handler(&device->cache.event_handler);
  1041. return 0;
  1042. }
  1043. void ib_cache_release_one(struct ib_device *device)
  1044. {
  1045. int p;
  1046. /*
  1047. * The release function frees all the cache elements.
  1048. * This function should be called as part of freeing
  1049. * all the device's resources when the cache could no
  1050. * longer be accessed.
  1051. */
  1052. for (p = 0; p <= rdma_end_port(device) - rdma_start_port(device); ++p)
  1053. kfree(device->cache.ports[p].pkey);
  1054. gid_table_release_one(device);
  1055. kfree(device->cache.ports);
  1056. }
  1057. void ib_cache_cleanup_one(struct ib_device *device)
  1058. {
  1059. /* The cleanup function unregisters the event handler,
  1060. * waits for all in-progress workqueue elements and cleans
  1061. * up the GID cache. This function should be called after
  1062. * the device was removed from the devices list and all
  1063. * clients were removed, so the cache exists but is
  1064. * non-functional and shouldn't be updated anymore.
  1065. */
  1066. ib_unregister_event_handler(&device->cache.event_handler);
  1067. flush_workqueue(ib_wq);
  1068. gid_table_cleanup_one(device);
  1069. }
  1070. void __init ib_cache_setup(void)
  1071. {
  1072. roce_gid_mgmt_init();
  1073. }
  1074. void __exit ib_cache_cleanup(void)
  1075. {
  1076. roce_gid_mgmt_cleanup();
  1077. }