cache.c 31 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251
  1. /*
  2. * Copyright (c) 2004 Topspin Communications. All rights reserved.
  3. * Copyright (c) 2005 Intel Corporation. All rights reserved.
  4. * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
  5. * Copyright (c) 2005 Voltaire, Inc. All rights reserved.
  6. *
  7. * This software is available to you under a choice of one of two
  8. * licenses. You may choose to be licensed under the terms of the GNU
  9. * General Public License (GPL) Version 2, available from the file
  10. * COPYING in the main directory of this source tree, or the
  11. * OpenIB.org BSD license below:
  12. *
  13. * Redistribution and use in source and binary forms, with or
  14. * without modification, are permitted provided that the following
  15. * conditions are met:
  16. *
  17. * - Redistributions of source code must retain the above
  18. * copyright notice, this list of conditions and the following
  19. * disclaimer.
  20. *
  21. * - Redistributions in binary form must reproduce the above
  22. * copyright notice, this list of conditions and the following
  23. * disclaimer in the documentation and/or other materials
  24. * provided with the distribution.
  25. *
  26. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  27. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  28. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  29. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  30. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  31. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  32. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  33. * SOFTWARE.
  34. */
  35. #include <linux/module.h>
  36. #include <linux/errno.h>
  37. #include <linux/slab.h>
  38. #include <linux/workqueue.h>
  39. #include <linux/netdevice.h>
  40. #include <net/addrconf.h>
  41. #include <rdma/ib_cache.h>
  42. #include "core_priv.h"
  43. struct ib_pkey_cache {
  44. int table_len;
  45. u16 table[0];
  46. };
  47. struct ib_update_work {
  48. struct work_struct work;
  49. struct ib_device *device;
  50. u8 port_num;
  51. bool enforce_security;
  52. };
  53. union ib_gid zgid;
  54. EXPORT_SYMBOL(zgid);
  55. static const struct ib_gid_attr zattr;
  56. enum gid_attr_find_mask {
  57. GID_ATTR_FIND_MASK_GID = 1UL << 0,
  58. GID_ATTR_FIND_MASK_NETDEV = 1UL << 1,
  59. GID_ATTR_FIND_MASK_DEFAULT = 1UL << 2,
  60. GID_ATTR_FIND_MASK_GID_TYPE = 1UL << 3,
  61. };
  62. enum gid_table_entry_props {
  63. GID_TABLE_ENTRY_INVALID = 1UL << 0,
  64. GID_TABLE_ENTRY_DEFAULT = 1UL << 1,
  65. };
  66. enum gid_table_write_action {
  67. GID_TABLE_WRITE_ACTION_ADD,
  68. GID_TABLE_WRITE_ACTION_DEL,
  69. /* MODIFY only updates the GID table. Currently only used by
  70. * ib_cache_update.
  71. */
  72. GID_TABLE_WRITE_ACTION_MODIFY
  73. };
  74. struct ib_gid_table_entry {
  75. unsigned long props;
  76. union ib_gid gid;
  77. struct ib_gid_attr attr;
  78. void *context;
  79. };
  80. struct ib_gid_table {
  81. int sz;
  82. /* In RoCE, adding a GID to the table requires:
  83. * (a) Find if this GID is already exists.
  84. * (b) Find a free space.
  85. * (c) Write the new GID
  86. *
  87. * Delete requires different set of operations:
  88. * (a) Find the GID
  89. * (b) Delete it.
  90. *
  91. * Add/delete should be carried out atomically.
  92. * This is done by locking this mutex from multiple
  93. * writers. We don't need this lock for IB, as the MAD
  94. * layer replaces all entries. All data_vec entries
  95. * are locked by this lock.
  96. **/
  97. struct mutex lock;
  98. /* This lock protects the table entries from being
  99. * read and written simultaneously.
  100. */
  101. rwlock_t rwlock;
  102. struct ib_gid_table_entry *data_vec;
  103. };
  104. static void dispatch_gid_change_event(struct ib_device *ib_dev, u8 port)
  105. {
  106. if (rdma_cap_roce_gid_table(ib_dev, port)) {
  107. struct ib_event event;
  108. event.device = ib_dev;
  109. event.element.port_num = port;
  110. event.event = IB_EVENT_GID_CHANGE;
  111. ib_dispatch_event(&event);
  112. }
  113. }
  114. static const char * const gid_type_str[] = {
  115. [IB_GID_TYPE_IB] = "IB/RoCE v1",
  116. [IB_GID_TYPE_ROCE_UDP_ENCAP] = "RoCE v2",
  117. };
  118. const char *ib_cache_gid_type_str(enum ib_gid_type gid_type)
  119. {
  120. if (gid_type < ARRAY_SIZE(gid_type_str) && gid_type_str[gid_type])
  121. return gid_type_str[gid_type];
  122. return "Invalid GID type";
  123. }
  124. EXPORT_SYMBOL(ib_cache_gid_type_str);
  125. int ib_cache_gid_parse_type_str(const char *buf)
  126. {
  127. unsigned int i;
  128. size_t len;
  129. int err = -EINVAL;
  130. len = strlen(buf);
  131. if (len == 0)
  132. return -EINVAL;
  133. if (buf[len - 1] == '\n')
  134. len--;
  135. for (i = 0; i < ARRAY_SIZE(gid_type_str); ++i)
  136. if (gid_type_str[i] && !strncmp(buf, gid_type_str[i], len) &&
  137. len == strlen(gid_type_str[i])) {
  138. err = i;
  139. break;
  140. }
  141. return err;
  142. }
  143. EXPORT_SYMBOL(ib_cache_gid_parse_type_str);
  144. /* This function expects that rwlock will be write locked in all
  145. * scenarios and that lock will be locked in sleep-able (RoCE)
  146. * scenarios.
  147. */
  148. static int write_gid(struct ib_device *ib_dev, u8 port,
  149. struct ib_gid_table *table, int ix,
  150. const union ib_gid *gid,
  151. const struct ib_gid_attr *attr,
  152. enum gid_table_write_action action,
  153. bool default_gid)
  154. __releases(&table->rwlock) __acquires(&table->rwlock)
  155. {
  156. int ret = 0;
  157. struct net_device *old_net_dev;
  158. enum ib_gid_type old_gid_type;
  159. /* in rdma_cap_roce_gid_table, this funciton should be protected by a
  160. * sleep-able lock.
  161. */
  162. if (rdma_cap_roce_gid_table(ib_dev, port)) {
  163. table->data_vec[ix].props |= GID_TABLE_ENTRY_INVALID;
  164. write_unlock_irq(&table->rwlock);
  165. /* GID_TABLE_WRITE_ACTION_MODIFY currently isn't supported by
  166. * RoCE providers and thus only updates the cache.
  167. */
  168. if (action == GID_TABLE_WRITE_ACTION_ADD)
  169. ret = ib_dev->add_gid(ib_dev, port, ix, gid, attr,
  170. &table->data_vec[ix].context);
  171. else if (action == GID_TABLE_WRITE_ACTION_DEL)
  172. ret = ib_dev->del_gid(ib_dev, port, ix,
  173. &table->data_vec[ix].context);
  174. write_lock_irq(&table->rwlock);
  175. }
  176. old_net_dev = table->data_vec[ix].attr.ndev;
  177. old_gid_type = table->data_vec[ix].attr.gid_type;
  178. if (old_net_dev && old_net_dev != attr->ndev)
  179. dev_put(old_net_dev);
  180. /* if modify_gid failed, just delete the old gid */
  181. if (ret || action == GID_TABLE_WRITE_ACTION_DEL) {
  182. gid = &zgid;
  183. attr = &zattr;
  184. table->data_vec[ix].context = NULL;
  185. }
  186. memcpy(&table->data_vec[ix].gid, gid, sizeof(*gid));
  187. memcpy(&table->data_vec[ix].attr, attr, sizeof(*attr));
  188. if (default_gid) {
  189. table->data_vec[ix].props |= GID_TABLE_ENTRY_DEFAULT;
  190. if (action == GID_TABLE_WRITE_ACTION_DEL)
  191. table->data_vec[ix].attr.gid_type = old_gid_type;
  192. }
  193. if (table->data_vec[ix].attr.ndev &&
  194. table->data_vec[ix].attr.ndev != old_net_dev)
  195. dev_hold(table->data_vec[ix].attr.ndev);
  196. table->data_vec[ix].props &= ~GID_TABLE_ENTRY_INVALID;
  197. return ret;
  198. }
  199. static int add_gid(struct ib_device *ib_dev, u8 port,
  200. struct ib_gid_table *table, int ix,
  201. const union ib_gid *gid,
  202. const struct ib_gid_attr *attr,
  203. bool default_gid) {
  204. return write_gid(ib_dev, port, table, ix, gid, attr,
  205. GID_TABLE_WRITE_ACTION_ADD, default_gid);
  206. }
  207. static int modify_gid(struct ib_device *ib_dev, u8 port,
  208. struct ib_gid_table *table, int ix,
  209. const union ib_gid *gid,
  210. const struct ib_gid_attr *attr,
  211. bool default_gid) {
  212. return write_gid(ib_dev, port, table, ix, gid, attr,
  213. GID_TABLE_WRITE_ACTION_MODIFY, default_gid);
  214. }
  215. static int del_gid(struct ib_device *ib_dev, u8 port,
  216. struct ib_gid_table *table, int ix,
  217. bool default_gid) {
  218. return write_gid(ib_dev, port, table, ix, &zgid, &zattr,
  219. GID_TABLE_WRITE_ACTION_DEL, default_gid);
  220. }
  221. /* rwlock should be read locked */
  222. static int find_gid(struct ib_gid_table *table, const union ib_gid *gid,
  223. const struct ib_gid_attr *val, bool default_gid,
  224. unsigned long mask, int *pempty)
  225. {
  226. int i = 0;
  227. int found = -1;
  228. int empty = pempty ? -1 : 0;
  229. while (i < table->sz && (found < 0 || empty < 0)) {
  230. struct ib_gid_table_entry *data = &table->data_vec[i];
  231. struct ib_gid_attr *attr = &data->attr;
  232. int curr_index = i;
  233. i++;
  234. if (data->props & GID_TABLE_ENTRY_INVALID)
  235. continue;
  236. if (empty < 0)
  237. if (!memcmp(&data->gid, &zgid, sizeof(*gid)) &&
  238. !memcmp(attr, &zattr, sizeof(*attr)) &&
  239. !data->props)
  240. empty = curr_index;
  241. if (found >= 0)
  242. continue;
  243. if (mask & GID_ATTR_FIND_MASK_GID_TYPE &&
  244. attr->gid_type != val->gid_type)
  245. continue;
  246. if (mask & GID_ATTR_FIND_MASK_GID &&
  247. memcmp(gid, &data->gid, sizeof(*gid)))
  248. continue;
  249. if (mask & GID_ATTR_FIND_MASK_NETDEV &&
  250. attr->ndev != val->ndev)
  251. continue;
  252. if (mask & GID_ATTR_FIND_MASK_DEFAULT &&
  253. !!(data->props & GID_TABLE_ENTRY_DEFAULT) !=
  254. default_gid)
  255. continue;
  256. found = curr_index;
  257. }
  258. if (pempty)
  259. *pempty = empty;
  260. return found;
  261. }
  262. static void make_default_gid(struct net_device *dev, union ib_gid *gid)
  263. {
  264. gid->global.subnet_prefix = cpu_to_be64(0xfe80000000000000LL);
  265. addrconf_ifid_eui48(&gid->raw[8], dev);
  266. }
  267. int ib_cache_gid_add(struct ib_device *ib_dev, u8 port,
  268. union ib_gid *gid, struct ib_gid_attr *attr)
  269. {
  270. struct ib_gid_table *table;
  271. int ix;
  272. int ret = 0;
  273. struct net_device *idev;
  274. int empty;
  275. table = ib_dev->cache.ports[port - rdma_start_port(ib_dev)].gid;
  276. if (!memcmp(gid, &zgid, sizeof(*gid)))
  277. return -EINVAL;
  278. if (ib_dev->get_netdev) {
  279. idev = ib_dev->get_netdev(ib_dev, port);
  280. if (idev && attr->ndev != idev) {
  281. union ib_gid default_gid;
  282. /* Adding default GIDs in not permitted */
  283. make_default_gid(idev, &default_gid);
  284. if (!memcmp(gid, &default_gid, sizeof(*gid))) {
  285. dev_put(idev);
  286. return -EPERM;
  287. }
  288. }
  289. if (idev)
  290. dev_put(idev);
  291. }
  292. mutex_lock(&table->lock);
  293. write_lock_irq(&table->rwlock);
  294. ix = find_gid(table, gid, attr, false, GID_ATTR_FIND_MASK_GID |
  295. GID_ATTR_FIND_MASK_GID_TYPE |
  296. GID_ATTR_FIND_MASK_NETDEV, &empty);
  297. if (ix >= 0)
  298. goto out_unlock;
  299. if (empty < 0) {
  300. ret = -ENOSPC;
  301. goto out_unlock;
  302. }
  303. ret = add_gid(ib_dev, port, table, empty, gid, attr, false);
  304. if (!ret)
  305. dispatch_gid_change_event(ib_dev, port);
  306. out_unlock:
  307. write_unlock_irq(&table->rwlock);
  308. mutex_unlock(&table->lock);
  309. return ret;
  310. }
  311. int ib_cache_gid_del(struct ib_device *ib_dev, u8 port,
  312. union ib_gid *gid, struct ib_gid_attr *attr)
  313. {
  314. struct ib_gid_table *table;
  315. int ix;
  316. table = ib_dev->cache.ports[port - rdma_start_port(ib_dev)].gid;
  317. mutex_lock(&table->lock);
  318. write_lock_irq(&table->rwlock);
  319. ix = find_gid(table, gid, attr, false,
  320. GID_ATTR_FIND_MASK_GID |
  321. GID_ATTR_FIND_MASK_GID_TYPE |
  322. GID_ATTR_FIND_MASK_NETDEV |
  323. GID_ATTR_FIND_MASK_DEFAULT,
  324. NULL);
  325. if (ix < 0)
  326. goto out_unlock;
  327. if (!del_gid(ib_dev, port, table, ix, false))
  328. dispatch_gid_change_event(ib_dev, port);
  329. out_unlock:
  330. write_unlock_irq(&table->rwlock);
  331. mutex_unlock(&table->lock);
  332. return 0;
  333. }
  334. int ib_cache_gid_del_all_netdev_gids(struct ib_device *ib_dev, u8 port,
  335. struct net_device *ndev)
  336. {
  337. struct ib_gid_table *table;
  338. int ix;
  339. bool deleted = false;
  340. table = ib_dev->cache.ports[port - rdma_start_port(ib_dev)].gid;
  341. mutex_lock(&table->lock);
  342. write_lock_irq(&table->rwlock);
  343. for (ix = 0; ix < table->sz; ix++)
  344. if (table->data_vec[ix].attr.ndev == ndev)
  345. if (!del_gid(ib_dev, port, table, ix,
  346. !!(table->data_vec[ix].props &
  347. GID_TABLE_ENTRY_DEFAULT)))
  348. deleted = true;
  349. write_unlock_irq(&table->rwlock);
  350. mutex_unlock(&table->lock);
  351. if (deleted)
  352. dispatch_gid_change_event(ib_dev, port);
  353. return 0;
  354. }
  355. static int __ib_cache_gid_get(struct ib_device *ib_dev, u8 port, int index,
  356. union ib_gid *gid, struct ib_gid_attr *attr)
  357. {
  358. struct ib_gid_table *table;
  359. table = ib_dev->cache.ports[port - rdma_start_port(ib_dev)].gid;
  360. if (index < 0 || index >= table->sz)
  361. return -EINVAL;
  362. if (table->data_vec[index].props & GID_TABLE_ENTRY_INVALID)
  363. return -EAGAIN;
  364. memcpy(gid, &table->data_vec[index].gid, sizeof(*gid));
  365. if (attr) {
  366. memcpy(attr, &table->data_vec[index].attr, sizeof(*attr));
  367. if (attr->ndev)
  368. dev_hold(attr->ndev);
  369. }
  370. return 0;
  371. }
  372. static int _ib_cache_gid_table_find(struct ib_device *ib_dev,
  373. const union ib_gid *gid,
  374. const struct ib_gid_attr *val,
  375. unsigned long mask,
  376. u8 *port, u16 *index)
  377. {
  378. struct ib_gid_table *table;
  379. u8 p;
  380. int local_index;
  381. unsigned long flags;
  382. for (p = 0; p < ib_dev->phys_port_cnt; p++) {
  383. table = ib_dev->cache.ports[p].gid;
  384. read_lock_irqsave(&table->rwlock, flags);
  385. local_index = find_gid(table, gid, val, false, mask, NULL);
  386. if (local_index >= 0) {
  387. if (index)
  388. *index = local_index;
  389. if (port)
  390. *port = p + rdma_start_port(ib_dev);
  391. read_unlock_irqrestore(&table->rwlock, flags);
  392. return 0;
  393. }
  394. read_unlock_irqrestore(&table->rwlock, flags);
  395. }
  396. return -ENOENT;
  397. }
  398. static int ib_cache_gid_find(struct ib_device *ib_dev,
  399. const union ib_gid *gid,
  400. enum ib_gid_type gid_type,
  401. struct net_device *ndev, u8 *port,
  402. u16 *index)
  403. {
  404. unsigned long mask = GID_ATTR_FIND_MASK_GID |
  405. GID_ATTR_FIND_MASK_GID_TYPE;
  406. struct ib_gid_attr gid_attr_val = {.ndev = ndev, .gid_type = gid_type};
  407. if (ndev)
  408. mask |= GID_ATTR_FIND_MASK_NETDEV;
  409. return _ib_cache_gid_table_find(ib_dev, gid, &gid_attr_val,
  410. mask, port, index);
  411. }
  412. int ib_find_cached_gid_by_port(struct ib_device *ib_dev,
  413. const union ib_gid *gid,
  414. enum ib_gid_type gid_type,
  415. u8 port, struct net_device *ndev,
  416. u16 *index)
  417. {
  418. int local_index;
  419. struct ib_gid_table *table;
  420. unsigned long mask = GID_ATTR_FIND_MASK_GID |
  421. GID_ATTR_FIND_MASK_GID_TYPE;
  422. struct ib_gid_attr val = {.ndev = ndev, .gid_type = gid_type};
  423. unsigned long flags;
  424. if (!rdma_is_port_valid(ib_dev, port))
  425. return -ENOENT;
  426. table = ib_dev->cache.ports[port - rdma_start_port(ib_dev)].gid;
  427. if (ndev)
  428. mask |= GID_ATTR_FIND_MASK_NETDEV;
  429. read_lock_irqsave(&table->rwlock, flags);
  430. local_index = find_gid(table, gid, &val, false, mask, NULL);
  431. if (local_index >= 0) {
  432. if (index)
  433. *index = local_index;
  434. read_unlock_irqrestore(&table->rwlock, flags);
  435. return 0;
  436. }
  437. read_unlock_irqrestore(&table->rwlock, flags);
  438. return -ENOENT;
  439. }
  440. EXPORT_SYMBOL(ib_find_cached_gid_by_port);
  441. /**
  442. * ib_find_gid_by_filter - Returns the GID table index where a specified
  443. * GID value occurs
  444. * @device: The device to query.
  445. * @gid: The GID value to search for.
  446. * @port_num: The port number of the device where the GID value could be
  447. * searched.
  448. * @filter: The filter function is executed on any matching GID in the table.
  449. * If the filter function returns true, the corresponding index is returned,
  450. * otherwise, we continue searching the GID table. It's guaranteed that
  451. * while filter is executed, ndev field is valid and the structure won't
  452. * change. filter is executed in an atomic context. filter must not be NULL.
  453. * @index: The index into the cached GID table where the GID was found. This
  454. * parameter may be NULL.
  455. *
  456. * ib_cache_gid_find_by_filter() searches for the specified GID value
  457. * of which the filter function returns true in the port's GID table.
  458. * This function is only supported on RoCE ports.
  459. *
  460. */
  461. static int ib_cache_gid_find_by_filter(struct ib_device *ib_dev,
  462. const union ib_gid *gid,
  463. u8 port,
  464. bool (*filter)(const union ib_gid *,
  465. const struct ib_gid_attr *,
  466. void *),
  467. void *context,
  468. u16 *index)
  469. {
  470. struct ib_gid_table *table;
  471. unsigned int i;
  472. unsigned long flags;
  473. bool found = false;
  474. if (!rdma_is_port_valid(ib_dev, port) ||
  475. !rdma_protocol_roce(ib_dev, port))
  476. return -EPROTONOSUPPORT;
  477. table = ib_dev->cache.ports[port - rdma_start_port(ib_dev)].gid;
  478. read_lock_irqsave(&table->rwlock, flags);
  479. for (i = 0; i < table->sz; i++) {
  480. struct ib_gid_attr attr;
  481. if (table->data_vec[i].props & GID_TABLE_ENTRY_INVALID)
  482. continue;
  483. if (memcmp(gid, &table->data_vec[i].gid, sizeof(*gid)))
  484. continue;
  485. memcpy(&attr, &table->data_vec[i].attr, sizeof(attr));
  486. if (filter(gid, &attr, context)) {
  487. found = true;
  488. if (index)
  489. *index = i;
  490. break;
  491. }
  492. }
  493. read_unlock_irqrestore(&table->rwlock, flags);
  494. if (!found)
  495. return -ENOENT;
  496. return 0;
  497. }
  498. static struct ib_gid_table *alloc_gid_table(int sz)
  499. {
  500. struct ib_gid_table *table =
  501. kzalloc(sizeof(struct ib_gid_table), GFP_KERNEL);
  502. if (!table)
  503. return NULL;
  504. table->data_vec = kcalloc(sz, sizeof(*table->data_vec), GFP_KERNEL);
  505. if (!table->data_vec)
  506. goto err_free_table;
  507. mutex_init(&table->lock);
  508. table->sz = sz;
  509. rwlock_init(&table->rwlock);
  510. return table;
  511. err_free_table:
  512. kfree(table);
  513. return NULL;
  514. }
  515. static void release_gid_table(struct ib_gid_table *table)
  516. {
  517. if (table) {
  518. kfree(table->data_vec);
  519. kfree(table);
  520. }
  521. }
  522. static void cleanup_gid_table_port(struct ib_device *ib_dev, u8 port,
  523. struct ib_gid_table *table)
  524. {
  525. int i;
  526. bool deleted = false;
  527. if (!table)
  528. return;
  529. write_lock_irq(&table->rwlock);
  530. for (i = 0; i < table->sz; ++i) {
  531. if (memcmp(&table->data_vec[i].gid, &zgid,
  532. sizeof(table->data_vec[i].gid)))
  533. if (!del_gid(ib_dev, port, table, i,
  534. table->data_vec[i].props &
  535. GID_ATTR_FIND_MASK_DEFAULT))
  536. deleted = true;
  537. }
  538. write_unlock_irq(&table->rwlock);
  539. if (deleted)
  540. dispatch_gid_change_event(ib_dev, port);
  541. }
  542. void ib_cache_gid_set_default_gid(struct ib_device *ib_dev, u8 port,
  543. struct net_device *ndev,
  544. unsigned long gid_type_mask,
  545. enum ib_cache_gid_default_mode mode)
  546. {
  547. union ib_gid gid;
  548. struct ib_gid_attr gid_attr;
  549. struct ib_gid_attr zattr_type = zattr;
  550. struct ib_gid_table *table;
  551. unsigned int gid_type;
  552. table = ib_dev->cache.ports[port - rdma_start_port(ib_dev)].gid;
  553. make_default_gid(ndev, &gid);
  554. memset(&gid_attr, 0, sizeof(gid_attr));
  555. gid_attr.ndev = ndev;
  556. for (gid_type = 0; gid_type < IB_GID_TYPE_SIZE; ++gid_type) {
  557. int ix;
  558. union ib_gid current_gid;
  559. struct ib_gid_attr current_gid_attr = {};
  560. if (1UL << gid_type & ~gid_type_mask)
  561. continue;
  562. gid_attr.gid_type = gid_type;
  563. mutex_lock(&table->lock);
  564. write_lock_irq(&table->rwlock);
  565. ix = find_gid(table, NULL, &gid_attr, true,
  566. GID_ATTR_FIND_MASK_GID_TYPE |
  567. GID_ATTR_FIND_MASK_DEFAULT,
  568. NULL);
  569. /* Coudn't find default GID location */
  570. if (WARN_ON(ix < 0))
  571. goto release;
  572. zattr_type.gid_type = gid_type;
  573. if (!__ib_cache_gid_get(ib_dev, port, ix,
  574. &current_gid, &current_gid_attr) &&
  575. mode == IB_CACHE_GID_DEFAULT_MODE_SET &&
  576. !memcmp(&gid, &current_gid, sizeof(gid)) &&
  577. !memcmp(&gid_attr, &current_gid_attr, sizeof(gid_attr)))
  578. goto release;
  579. if (memcmp(&current_gid, &zgid, sizeof(current_gid)) ||
  580. memcmp(&current_gid_attr, &zattr_type,
  581. sizeof(current_gid_attr))) {
  582. if (del_gid(ib_dev, port, table, ix, true)) {
  583. pr_warn("ib_cache_gid: can't delete index %d for default gid %pI6\n",
  584. ix, gid.raw);
  585. goto release;
  586. } else {
  587. dispatch_gid_change_event(ib_dev, port);
  588. }
  589. }
  590. if (mode == IB_CACHE_GID_DEFAULT_MODE_SET) {
  591. if (add_gid(ib_dev, port, table, ix, &gid, &gid_attr, true))
  592. pr_warn("ib_cache_gid: unable to add default gid %pI6\n",
  593. gid.raw);
  594. else
  595. dispatch_gid_change_event(ib_dev, port);
  596. }
  597. release:
  598. if (current_gid_attr.ndev)
  599. dev_put(current_gid_attr.ndev);
  600. write_unlock_irq(&table->rwlock);
  601. mutex_unlock(&table->lock);
  602. }
  603. }
  604. static int gid_table_reserve_default(struct ib_device *ib_dev, u8 port,
  605. struct ib_gid_table *table)
  606. {
  607. unsigned int i;
  608. unsigned long roce_gid_type_mask;
  609. unsigned int num_default_gids;
  610. unsigned int current_gid = 0;
  611. roce_gid_type_mask = roce_gid_type_mask_support(ib_dev, port);
  612. num_default_gids = hweight_long(roce_gid_type_mask);
  613. for (i = 0; i < num_default_gids && i < table->sz; i++) {
  614. struct ib_gid_table_entry *entry =
  615. &table->data_vec[i];
  616. entry->props |= GID_TABLE_ENTRY_DEFAULT;
  617. current_gid = find_next_bit(&roce_gid_type_mask,
  618. BITS_PER_LONG,
  619. current_gid);
  620. entry->attr.gid_type = current_gid++;
  621. }
  622. return 0;
  623. }
  624. static int _gid_table_setup_one(struct ib_device *ib_dev)
  625. {
  626. u8 port;
  627. struct ib_gid_table *table;
  628. int err = 0;
  629. for (port = 0; port < ib_dev->phys_port_cnt; port++) {
  630. u8 rdma_port = port + rdma_start_port(ib_dev);
  631. table =
  632. alloc_gid_table(
  633. ib_dev->port_immutable[rdma_port].gid_tbl_len);
  634. if (!table) {
  635. err = -ENOMEM;
  636. goto rollback_table_setup;
  637. }
  638. err = gid_table_reserve_default(ib_dev,
  639. port + rdma_start_port(ib_dev),
  640. table);
  641. if (err)
  642. goto rollback_table_setup;
  643. ib_dev->cache.ports[port].gid = table;
  644. }
  645. return 0;
  646. rollback_table_setup:
  647. for (port = 0; port < ib_dev->phys_port_cnt; port++) {
  648. table = ib_dev->cache.ports[port].gid;
  649. cleanup_gid_table_port(ib_dev, port + rdma_start_port(ib_dev),
  650. table);
  651. release_gid_table(table);
  652. }
  653. return err;
  654. }
  655. static void gid_table_release_one(struct ib_device *ib_dev)
  656. {
  657. struct ib_gid_table *table;
  658. u8 port;
  659. for (port = 0; port < ib_dev->phys_port_cnt; port++) {
  660. table = ib_dev->cache.ports[port].gid;
  661. release_gid_table(table);
  662. ib_dev->cache.ports[port].gid = NULL;
  663. }
  664. }
  665. static void gid_table_cleanup_one(struct ib_device *ib_dev)
  666. {
  667. struct ib_gid_table *table;
  668. u8 port;
  669. for (port = 0; port < ib_dev->phys_port_cnt; port++) {
  670. table = ib_dev->cache.ports[port].gid;
  671. cleanup_gid_table_port(ib_dev, port + rdma_start_port(ib_dev),
  672. table);
  673. }
  674. }
  675. static int gid_table_setup_one(struct ib_device *ib_dev)
  676. {
  677. int err;
  678. err = _gid_table_setup_one(ib_dev);
  679. if (err)
  680. return err;
  681. rdma_roce_rescan_device(ib_dev);
  682. return err;
  683. }
  684. int ib_get_cached_gid(struct ib_device *device,
  685. u8 port_num,
  686. int index,
  687. union ib_gid *gid,
  688. struct ib_gid_attr *gid_attr)
  689. {
  690. int res;
  691. unsigned long flags;
  692. struct ib_gid_table *table;
  693. if (!rdma_is_port_valid(device, port_num))
  694. return -EINVAL;
  695. table = device->cache.ports[port_num - rdma_start_port(device)].gid;
  696. read_lock_irqsave(&table->rwlock, flags);
  697. res = __ib_cache_gid_get(device, port_num, index, gid, gid_attr);
  698. read_unlock_irqrestore(&table->rwlock, flags);
  699. return res;
  700. }
  701. EXPORT_SYMBOL(ib_get_cached_gid);
  702. int ib_find_cached_gid(struct ib_device *device,
  703. const union ib_gid *gid,
  704. enum ib_gid_type gid_type,
  705. struct net_device *ndev,
  706. u8 *port_num,
  707. u16 *index)
  708. {
  709. return ib_cache_gid_find(device, gid, gid_type, ndev, port_num, index);
  710. }
  711. EXPORT_SYMBOL(ib_find_cached_gid);
  712. int ib_find_gid_by_filter(struct ib_device *device,
  713. const union ib_gid *gid,
  714. u8 port_num,
  715. bool (*filter)(const union ib_gid *gid,
  716. const struct ib_gid_attr *,
  717. void *),
  718. void *context, u16 *index)
  719. {
  720. /* Only RoCE GID table supports filter function */
  721. if (!rdma_cap_roce_gid_table(device, port_num) && filter)
  722. return -EPROTONOSUPPORT;
  723. return ib_cache_gid_find_by_filter(device, gid,
  724. port_num, filter,
  725. context, index);
  726. }
  727. int ib_get_cached_pkey(struct ib_device *device,
  728. u8 port_num,
  729. int index,
  730. u16 *pkey)
  731. {
  732. struct ib_pkey_cache *cache;
  733. unsigned long flags;
  734. int ret = 0;
  735. if (!rdma_is_port_valid(device, port_num))
  736. return -EINVAL;
  737. read_lock_irqsave(&device->cache.lock, flags);
  738. cache = device->cache.ports[port_num - rdma_start_port(device)].pkey;
  739. if (index < 0 || index >= cache->table_len)
  740. ret = -EINVAL;
  741. else
  742. *pkey = cache->table[index];
  743. read_unlock_irqrestore(&device->cache.lock, flags);
  744. return ret;
  745. }
  746. EXPORT_SYMBOL(ib_get_cached_pkey);
  747. int ib_get_cached_subnet_prefix(struct ib_device *device,
  748. u8 port_num,
  749. u64 *sn_pfx)
  750. {
  751. unsigned long flags;
  752. int p;
  753. if (port_num < rdma_start_port(device) ||
  754. port_num > rdma_end_port(device))
  755. return -EINVAL;
  756. p = port_num - rdma_start_port(device);
  757. read_lock_irqsave(&device->cache.lock, flags);
  758. *sn_pfx = device->cache.ports[p].subnet_prefix;
  759. read_unlock_irqrestore(&device->cache.lock, flags);
  760. return 0;
  761. }
  762. EXPORT_SYMBOL(ib_get_cached_subnet_prefix);
  763. int ib_find_cached_pkey(struct ib_device *device,
  764. u8 port_num,
  765. u16 pkey,
  766. u16 *index)
  767. {
  768. struct ib_pkey_cache *cache;
  769. unsigned long flags;
  770. int i;
  771. int ret = -ENOENT;
  772. int partial_ix = -1;
  773. if (!rdma_is_port_valid(device, port_num))
  774. return -EINVAL;
  775. read_lock_irqsave(&device->cache.lock, flags);
  776. cache = device->cache.ports[port_num - rdma_start_port(device)].pkey;
  777. *index = -1;
  778. for (i = 0; i < cache->table_len; ++i)
  779. if ((cache->table[i] & 0x7fff) == (pkey & 0x7fff)) {
  780. if (cache->table[i] & 0x8000) {
  781. *index = i;
  782. ret = 0;
  783. break;
  784. } else
  785. partial_ix = i;
  786. }
  787. if (ret && partial_ix >= 0) {
  788. *index = partial_ix;
  789. ret = 0;
  790. }
  791. read_unlock_irqrestore(&device->cache.lock, flags);
  792. return ret;
  793. }
  794. EXPORT_SYMBOL(ib_find_cached_pkey);
  795. int ib_find_exact_cached_pkey(struct ib_device *device,
  796. u8 port_num,
  797. u16 pkey,
  798. u16 *index)
  799. {
  800. struct ib_pkey_cache *cache;
  801. unsigned long flags;
  802. int i;
  803. int ret = -ENOENT;
  804. if (!rdma_is_port_valid(device, port_num))
  805. return -EINVAL;
  806. read_lock_irqsave(&device->cache.lock, flags);
  807. cache = device->cache.ports[port_num - rdma_start_port(device)].pkey;
  808. *index = -1;
  809. for (i = 0; i < cache->table_len; ++i)
  810. if (cache->table[i] == pkey) {
  811. *index = i;
  812. ret = 0;
  813. break;
  814. }
  815. read_unlock_irqrestore(&device->cache.lock, flags);
  816. return ret;
  817. }
  818. EXPORT_SYMBOL(ib_find_exact_cached_pkey);
  819. int ib_get_cached_lmc(struct ib_device *device,
  820. u8 port_num,
  821. u8 *lmc)
  822. {
  823. unsigned long flags;
  824. int ret = 0;
  825. if (!rdma_is_port_valid(device, port_num))
  826. return -EINVAL;
  827. read_lock_irqsave(&device->cache.lock, flags);
  828. *lmc = device->cache.ports[port_num - rdma_start_port(device)].lmc;
  829. read_unlock_irqrestore(&device->cache.lock, flags);
  830. return ret;
  831. }
  832. EXPORT_SYMBOL(ib_get_cached_lmc);
  833. int ib_get_cached_port_state(struct ib_device *device,
  834. u8 port_num,
  835. enum ib_port_state *port_state)
  836. {
  837. unsigned long flags;
  838. int ret = 0;
  839. if (port_num < rdma_start_port(device) || port_num > rdma_end_port(device))
  840. return -EINVAL;
  841. read_lock_irqsave(&device->cache.lock, flags);
  842. *port_state = device->cache.ports[port_num
  843. - rdma_start_port(device)].port_state;
  844. read_unlock_irqrestore(&device->cache.lock, flags);
  845. return ret;
  846. }
  847. EXPORT_SYMBOL(ib_get_cached_port_state);
  848. static void ib_cache_update(struct ib_device *device,
  849. u8 port,
  850. bool enforce_security)
  851. {
  852. struct ib_port_attr *tprops = NULL;
  853. struct ib_pkey_cache *pkey_cache = NULL, *old_pkey_cache;
  854. struct ib_gid_cache {
  855. int table_len;
  856. union ib_gid table[0];
  857. } *gid_cache = NULL;
  858. int i;
  859. int ret;
  860. struct ib_gid_table *table;
  861. bool use_roce_gid_table =
  862. rdma_cap_roce_gid_table(device, port);
  863. if (!rdma_is_port_valid(device, port))
  864. return;
  865. table = device->cache.ports[port - rdma_start_port(device)].gid;
  866. tprops = kmalloc(sizeof *tprops, GFP_KERNEL);
  867. if (!tprops)
  868. return;
  869. ret = ib_query_port(device, port, tprops);
  870. if (ret) {
  871. pr_warn("ib_query_port failed (%d) for %s\n",
  872. ret, device->name);
  873. goto err;
  874. }
  875. pkey_cache = kmalloc(sizeof *pkey_cache + tprops->pkey_tbl_len *
  876. sizeof *pkey_cache->table, GFP_KERNEL);
  877. if (!pkey_cache)
  878. goto err;
  879. pkey_cache->table_len = tprops->pkey_tbl_len;
  880. if (!use_roce_gid_table) {
  881. gid_cache = kmalloc(sizeof(*gid_cache) + tprops->gid_tbl_len *
  882. sizeof(*gid_cache->table), GFP_KERNEL);
  883. if (!gid_cache)
  884. goto err;
  885. gid_cache->table_len = tprops->gid_tbl_len;
  886. }
  887. for (i = 0; i < pkey_cache->table_len; ++i) {
  888. ret = ib_query_pkey(device, port, i, pkey_cache->table + i);
  889. if (ret) {
  890. pr_warn("ib_query_pkey failed (%d) for %s (index %d)\n",
  891. ret, device->name, i);
  892. goto err;
  893. }
  894. }
  895. if (!use_roce_gid_table) {
  896. for (i = 0; i < gid_cache->table_len; ++i) {
  897. ret = ib_query_gid(device, port, i,
  898. gid_cache->table + i, NULL);
  899. if (ret) {
  900. pr_warn("ib_query_gid failed (%d) for %s (index %d)\n",
  901. ret, device->name, i);
  902. goto err;
  903. }
  904. }
  905. }
  906. write_lock_irq(&device->cache.lock);
  907. old_pkey_cache = device->cache.ports[port -
  908. rdma_start_port(device)].pkey;
  909. device->cache.ports[port - rdma_start_port(device)].pkey = pkey_cache;
  910. if (!use_roce_gid_table) {
  911. write_lock(&table->rwlock);
  912. for (i = 0; i < gid_cache->table_len; i++) {
  913. modify_gid(device, port, table, i, gid_cache->table + i,
  914. &zattr, false);
  915. }
  916. write_unlock(&table->rwlock);
  917. }
  918. device->cache.ports[port - rdma_start_port(device)].lmc = tprops->lmc;
  919. device->cache.ports[port - rdma_start_port(device)].port_state =
  920. tprops->state;
  921. device->cache.ports[port - rdma_start_port(device)].subnet_prefix =
  922. tprops->subnet_prefix;
  923. write_unlock_irq(&device->cache.lock);
  924. if (enforce_security)
  925. ib_security_cache_change(device,
  926. port,
  927. tprops->subnet_prefix);
  928. kfree(gid_cache);
  929. kfree(old_pkey_cache);
  930. kfree(tprops);
  931. return;
  932. err:
  933. kfree(pkey_cache);
  934. kfree(gid_cache);
  935. kfree(tprops);
  936. }
  937. static void ib_cache_task(struct work_struct *_work)
  938. {
  939. struct ib_update_work *work =
  940. container_of(_work, struct ib_update_work, work);
  941. ib_cache_update(work->device,
  942. work->port_num,
  943. work->enforce_security);
  944. kfree(work);
  945. }
  946. static void ib_cache_event(struct ib_event_handler *handler,
  947. struct ib_event *event)
  948. {
  949. struct ib_update_work *work;
  950. if (event->event == IB_EVENT_PORT_ERR ||
  951. event->event == IB_EVENT_PORT_ACTIVE ||
  952. event->event == IB_EVENT_LID_CHANGE ||
  953. event->event == IB_EVENT_PKEY_CHANGE ||
  954. event->event == IB_EVENT_SM_CHANGE ||
  955. event->event == IB_EVENT_CLIENT_REREGISTER ||
  956. event->event == IB_EVENT_GID_CHANGE) {
  957. work = kmalloc(sizeof *work, GFP_ATOMIC);
  958. if (work) {
  959. INIT_WORK(&work->work, ib_cache_task);
  960. work->device = event->device;
  961. work->port_num = event->element.port_num;
  962. if (event->event == IB_EVENT_PKEY_CHANGE ||
  963. event->event == IB_EVENT_GID_CHANGE)
  964. work->enforce_security = true;
  965. else
  966. work->enforce_security = false;
  967. queue_work(ib_wq, &work->work);
  968. }
  969. }
  970. }
  971. int ib_cache_setup_one(struct ib_device *device)
  972. {
  973. int p;
  974. int err;
  975. rwlock_init(&device->cache.lock);
  976. device->cache.ports =
  977. kzalloc(sizeof(*device->cache.ports) *
  978. (rdma_end_port(device) - rdma_start_port(device) + 1), GFP_KERNEL);
  979. if (!device->cache.ports)
  980. return -ENOMEM;
  981. err = gid_table_setup_one(device);
  982. if (err) {
  983. kfree(device->cache.ports);
  984. device->cache.ports = NULL;
  985. return err;
  986. }
  987. for (p = 0; p <= rdma_end_port(device) - rdma_start_port(device); ++p)
  988. ib_cache_update(device, p + rdma_start_port(device), true);
  989. INIT_IB_EVENT_HANDLER(&device->cache.event_handler,
  990. device, ib_cache_event);
  991. ib_register_event_handler(&device->cache.event_handler);
  992. return 0;
  993. }
  994. void ib_cache_release_one(struct ib_device *device)
  995. {
  996. int p;
  997. /*
  998. * The release function frees all the cache elements.
  999. * This function should be called as part of freeing
  1000. * all the device's resources when the cache could no
  1001. * longer be accessed.
  1002. */
  1003. for (p = 0; p <= rdma_end_port(device) - rdma_start_port(device); ++p)
  1004. kfree(device->cache.ports[p].pkey);
  1005. gid_table_release_one(device);
  1006. kfree(device->cache.ports);
  1007. }
  1008. void ib_cache_cleanup_one(struct ib_device *device)
  1009. {
  1010. /* The cleanup function unregisters the event handler,
  1011. * waits for all in-progress workqueue elements and cleans
  1012. * up the GID cache. This function should be called after
  1013. * the device was removed from the devices list and all
  1014. * clients were removed, so the cache exists but is
  1015. * non-functional and shouldn't be updated anymore.
  1016. */
  1017. ib_unregister_event_handler(&device->cache.event_handler);
  1018. flush_workqueue(ib_wq);
  1019. gid_table_cleanup_one(device);
  1020. }
  1021. void __init ib_cache_setup(void)
  1022. {
  1023. roce_gid_mgmt_init();
  1024. }
  1025. void __exit ib_cache_cleanup(void)
  1026. {
  1027. roce_gid_mgmt_cleanup();
  1028. }