uverbs_ioctl.c 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750
  1. /*
  2. * Copyright (c) 2017, Mellanox Technologies inc. All rights reserved.
  3. *
  4. * This software is available to you under a choice of one of two
  5. * licenses. You may choose to be licensed under the terms of the GNU
  6. * General Public License (GPL) Version 2, available from the file
  7. * COPYING in the main directory of this source tree, or the
  8. * OpenIB.org BSD license below:
  9. *
  10. * Redistribution and use in source and binary forms, with or
  11. * without modification, are permitted provided that the following
  12. * conditions are met:
  13. *
  14. * - Redistributions of source code must retain the above
  15. * copyright notice, this list of conditions and the following
  16. * disclaimer.
  17. *
  18. * - Redistributions in binary form must reproduce the above
  19. * copyright notice, this list of conditions and the following
  20. * disclaimer in the documentation and/or other materials
  21. * provided with the distribution.
  22. *
  23. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  24. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  25. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  26. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  27. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  28. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  29. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  30. * SOFTWARE.
  31. */
  32. #include <rdma/rdma_user_ioctl.h>
  33. #include <rdma/uverbs_ioctl.h>
  34. #include "rdma_core.h"
  35. #include "uverbs.h"
  36. struct bundle_alloc_head {
  37. struct bundle_alloc_head *next;
  38. u8 data[];
  39. };
  40. struct bundle_priv {
  41. /* Must be first */
  42. struct bundle_alloc_head alloc_head;
  43. struct bundle_alloc_head *allocated_mem;
  44. size_t internal_avail;
  45. size_t internal_used;
  46. struct radix_tree_root *radix;
  47. const struct uverbs_api_ioctl_method *method_elm;
  48. void __rcu **radix_slots;
  49. unsigned long radix_slots_len;
  50. u32 method_key;
  51. struct ib_uverbs_attr __user *user_attrs;
  52. struct ib_uverbs_attr *uattrs;
  53. DECLARE_BITMAP(uobj_finalize, UVERBS_API_ATTR_BKEY_LEN);
  54. DECLARE_BITMAP(spec_finalize, UVERBS_API_ATTR_BKEY_LEN);
  55. /*
  56. * Must be last. bundle ends in a flex array which overlaps
  57. * internal_buffer.
  58. */
  59. struct uverbs_attr_bundle bundle;
  60. u64 internal_buffer[32];
  61. };
  62. /*
  63. * Each method has an absolute minimum amount of memory it needs to allocate,
  64. * precompute that amount and determine if the onstack memory can be used or
  65. * if allocation is need.
  66. */
  67. void uapi_compute_bundle_size(struct uverbs_api_ioctl_method *method_elm,
  68. unsigned int num_attrs)
  69. {
  70. struct bundle_priv *pbundle;
  71. size_t bundle_size =
  72. offsetof(struct bundle_priv, internal_buffer) +
  73. sizeof(*pbundle->bundle.attrs) * method_elm->key_bitmap_len +
  74. sizeof(*pbundle->uattrs) * num_attrs;
  75. method_elm->use_stack = bundle_size <= sizeof(*pbundle);
  76. method_elm->bundle_size =
  77. ALIGN(bundle_size + 256, sizeof(*pbundle->internal_buffer));
  78. /* Do not want order-2 allocations for this. */
  79. WARN_ON_ONCE(method_elm->bundle_size > PAGE_SIZE);
  80. }
  81. /**
  82. * uverbs_alloc() - Quickly allocate memory for use with a bundle
  83. * @bundle: The bundle
  84. * @size: Number of bytes to allocate
  85. * @flags: Allocator flags
  86. *
  87. * The bundle allocator is intended for allocations that are connected with
  88. * processing the system call related to the bundle. The allocated memory is
  89. * always freed once the system call completes, and cannot be freed any other
  90. * way.
  91. *
  92. * This tries to use a small pool of pre-allocated memory for performance.
  93. */
  94. __malloc void *_uverbs_alloc(struct uverbs_attr_bundle *bundle, size_t size,
  95. gfp_t flags)
  96. {
  97. struct bundle_priv *pbundle =
  98. container_of(bundle, struct bundle_priv, bundle);
  99. size_t new_used;
  100. void *res;
  101. if (check_add_overflow(size, pbundle->internal_used, &new_used))
  102. return ERR_PTR(-EOVERFLOW);
  103. if (new_used > pbundle->internal_avail) {
  104. struct bundle_alloc_head *buf;
  105. buf = kvmalloc(struct_size(buf, data, size), flags);
  106. if (!buf)
  107. return ERR_PTR(-ENOMEM);
  108. buf->next = pbundle->allocated_mem;
  109. pbundle->allocated_mem = buf;
  110. return buf->data;
  111. }
  112. res = (void *)pbundle->internal_buffer + pbundle->internal_used;
  113. pbundle->internal_used =
  114. ALIGN(new_used, sizeof(*pbundle->internal_buffer));
  115. if (flags & __GFP_ZERO)
  116. memset(res, 0, size);
  117. return res;
  118. }
  119. EXPORT_SYMBOL(_uverbs_alloc);
  120. static bool uverbs_is_attr_cleared(const struct ib_uverbs_attr *uattr,
  121. u16 len)
  122. {
  123. if (uattr->len > sizeof(((struct ib_uverbs_attr *)0)->data))
  124. return ib_is_buffer_cleared(u64_to_user_ptr(uattr->data) + len,
  125. uattr->len - len);
  126. return !memchr_inv((const void *)&uattr->data + len,
  127. 0, uattr->len - len);
  128. }
  129. static int uverbs_process_idrs_array(struct bundle_priv *pbundle,
  130. const struct uverbs_api_attr *attr_uapi,
  131. struct uverbs_objs_arr_attr *attr,
  132. struct ib_uverbs_attr *uattr,
  133. u32 attr_bkey)
  134. {
  135. const struct uverbs_attr_spec *spec = &attr_uapi->spec;
  136. size_t array_len;
  137. u32 *idr_vals;
  138. int ret = 0;
  139. size_t i;
  140. if (uattr->attr_data.reserved)
  141. return -EINVAL;
  142. if (uattr->len % sizeof(u32))
  143. return -EINVAL;
  144. array_len = uattr->len / sizeof(u32);
  145. if (array_len < spec->u2.objs_arr.min_len ||
  146. array_len > spec->u2.objs_arr.max_len)
  147. return -EINVAL;
  148. attr->uobjects =
  149. uverbs_alloc(&pbundle->bundle,
  150. array_size(array_len, sizeof(*attr->uobjects)));
  151. if (IS_ERR(attr->uobjects))
  152. return PTR_ERR(attr->uobjects);
  153. /*
  154. * Since idr is 4B and *uobjects is >= 4B, we can use attr->uobjects
  155. * to store idrs array and avoid additional memory allocation. The
  156. * idrs array is offset to the end of the uobjects array so we will be
  157. * able to read idr and replace with a pointer.
  158. */
  159. idr_vals = (u32 *)(attr->uobjects + array_len) - array_len;
  160. if (uattr->len > sizeof(uattr->data)) {
  161. ret = copy_from_user(idr_vals, u64_to_user_ptr(uattr->data),
  162. uattr->len);
  163. if (ret)
  164. return -EFAULT;
  165. } else {
  166. memcpy(idr_vals, &uattr->data, uattr->len);
  167. }
  168. for (i = 0; i != array_len; i++) {
  169. attr->uobjects[i] = uverbs_get_uobject_from_file(
  170. spec->u2.objs_arr.obj_type, pbundle->bundle.ufile,
  171. spec->u2.objs_arr.access, idr_vals[i]);
  172. if (IS_ERR(attr->uobjects[i])) {
  173. ret = PTR_ERR(attr->uobjects[i]);
  174. break;
  175. }
  176. }
  177. attr->len = i;
  178. __set_bit(attr_bkey, pbundle->spec_finalize);
  179. return ret;
  180. }
  181. static int uverbs_free_idrs_array(const struct uverbs_api_attr *attr_uapi,
  182. struct uverbs_objs_arr_attr *attr,
  183. bool commit)
  184. {
  185. const struct uverbs_attr_spec *spec = &attr_uapi->spec;
  186. int current_ret;
  187. int ret = 0;
  188. size_t i;
  189. for (i = 0; i != attr->len; i++) {
  190. current_ret = uverbs_finalize_object(
  191. attr->uobjects[i], spec->u2.objs_arr.access, commit);
  192. if (!ret)
  193. ret = current_ret;
  194. }
  195. return ret;
  196. }
  197. static int uverbs_process_attr(struct bundle_priv *pbundle,
  198. const struct uverbs_api_attr *attr_uapi,
  199. struct ib_uverbs_attr *uattr, u32 attr_bkey)
  200. {
  201. const struct uverbs_attr_spec *spec = &attr_uapi->spec;
  202. struct uverbs_attr *e = &pbundle->bundle.attrs[attr_bkey];
  203. const struct uverbs_attr_spec *val_spec = spec;
  204. struct uverbs_obj_attr *o_attr;
  205. switch (spec->type) {
  206. case UVERBS_ATTR_TYPE_ENUM_IN:
  207. if (uattr->attr_data.enum_data.elem_id >= spec->u.enum_def.num_elems)
  208. return -EOPNOTSUPP;
  209. if (uattr->attr_data.enum_data.reserved)
  210. return -EINVAL;
  211. val_spec = &spec->u2.enum_def.ids[uattr->attr_data.enum_data.elem_id];
  212. /* Currently we only support PTR_IN based enums */
  213. if (val_spec->type != UVERBS_ATTR_TYPE_PTR_IN)
  214. return -EOPNOTSUPP;
  215. e->ptr_attr.enum_id = uattr->attr_data.enum_data.elem_id;
  216. /* fall through */
  217. case UVERBS_ATTR_TYPE_PTR_IN:
  218. /* Ensure that any data provided by userspace beyond the known
  219. * struct is zero. Userspace that knows how to use some future
  220. * longer struct will fail here if used with an old kernel and
  221. * non-zero content, making ABI compat/discovery simpler.
  222. */
  223. if (uattr->len > val_spec->u.ptr.len &&
  224. val_spec->zero_trailing &&
  225. !uverbs_is_attr_cleared(uattr, val_spec->u.ptr.len))
  226. return -EOPNOTSUPP;
  227. /* fall through */
  228. case UVERBS_ATTR_TYPE_PTR_OUT:
  229. if (uattr->len < val_spec->u.ptr.min_len ||
  230. (!val_spec->zero_trailing &&
  231. uattr->len > val_spec->u.ptr.len))
  232. return -EINVAL;
  233. if (spec->type != UVERBS_ATTR_TYPE_ENUM_IN &&
  234. uattr->attr_data.reserved)
  235. return -EINVAL;
  236. e->ptr_attr.uattr_idx = uattr - pbundle->uattrs;
  237. e->ptr_attr.len = uattr->len;
  238. if (val_spec->alloc_and_copy && !uverbs_attr_ptr_is_inline(e)) {
  239. void *p;
  240. p = uverbs_alloc(&pbundle->bundle, uattr->len);
  241. if (IS_ERR(p))
  242. return PTR_ERR(p);
  243. e->ptr_attr.ptr = p;
  244. if (copy_from_user(p, u64_to_user_ptr(uattr->data),
  245. uattr->len))
  246. return -EFAULT;
  247. } else {
  248. e->ptr_attr.data = uattr->data;
  249. }
  250. break;
  251. case UVERBS_ATTR_TYPE_IDR:
  252. case UVERBS_ATTR_TYPE_FD:
  253. if (uattr->attr_data.reserved)
  254. return -EINVAL;
  255. if (uattr->len != 0)
  256. return -EINVAL;
  257. o_attr = &e->obj_attr;
  258. o_attr->attr_elm = attr_uapi;
  259. /*
  260. * The type of uattr->data is u64 for UVERBS_ATTR_TYPE_IDR and
  261. * s64 for UVERBS_ATTR_TYPE_FD. We can cast the u64 to s64
  262. * here without caring about truncation as we know that the
  263. * IDR implementation today rejects negative IDs
  264. */
  265. o_attr->uobject = uverbs_get_uobject_from_file(
  266. spec->u.obj.obj_type,
  267. pbundle->bundle.ufile,
  268. spec->u.obj.access,
  269. uattr->data_s64);
  270. if (IS_ERR(o_attr->uobject))
  271. return PTR_ERR(o_attr->uobject);
  272. __set_bit(attr_bkey, pbundle->uobj_finalize);
  273. if (spec->u.obj.access == UVERBS_ACCESS_NEW) {
  274. unsigned int uattr_idx = uattr - pbundle->uattrs;
  275. s64 id = o_attr->uobject->id;
  276. /* Copy the allocated id to the user-space */
  277. if (put_user(id, &pbundle->user_attrs[uattr_idx].data))
  278. return -EFAULT;
  279. }
  280. break;
  281. case UVERBS_ATTR_TYPE_IDRS_ARRAY:
  282. return uverbs_process_idrs_array(pbundle, attr_uapi,
  283. &e->objs_arr_attr, uattr,
  284. attr_bkey);
  285. default:
  286. return -EOPNOTSUPP;
  287. }
  288. return 0;
  289. }
  290. /*
  291. * We search the radix tree with the method prefix and now we want to fast
  292. * search the suffix bits to get a particular attribute pointer. It is not
  293. * totally clear to me if this breaks the radix tree encasulation or not, but
  294. * it uses the iter data to determine if the method iter points at the same
  295. * chunk that will store the attribute, if so it just derefs it directly. By
  296. * construction in most kernel configs the method and attrs will all fit in a
  297. * single radix chunk, so in most cases this will have no search. Other cases
  298. * this falls back to a full search.
  299. */
  300. static void __rcu **uapi_get_attr_for_method(struct bundle_priv *pbundle,
  301. u32 attr_key)
  302. {
  303. void __rcu **slot;
  304. if (likely(attr_key < pbundle->radix_slots_len)) {
  305. void *entry;
  306. slot = pbundle->radix_slots + attr_key;
  307. entry = rcu_dereference_raw(*slot);
  308. if (likely(!radix_tree_is_internal_node(entry) && entry))
  309. return slot;
  310. }
  311. return radix_tree_lookup_slot(pbundle->radix,
  312. pbundle->method_key | attr_key);
  313. }
  314. static int uverbs_set_attr(struct bundle_priv *pbundle,
  315. struct ib_uverbs_attr *uattr)
  316. {
  317. u32 attr_key = uapi_key_attr(uattr->attr_id);
  318. u32 attr_bkey = uapi_bkey_attr(attr_key);
  319. const struct uverbs_api_attr *attr;
  320. void __rcu **slot;
  321. int ret;
  322. slot = uapi_get_attr_for_method(pbundle, attr_key);
  323. if (!slot) {
  324. /*
  325. * Kernel does not support the attribute but user-space says it
  326. * is mandatory
  327. */
  328. if (uattr->flags & UVERBS_ATTR_F_MANDATORY)
  329. return -EPROTONOSUPPORT;
  330. return 0;
  331. }
  332. attr = srcu_dereference(
  333. *slot, &pbundle->bundle.ufile->device->disassociate_srcu);
  334. /* Reject duplicate attributes from user-space */
  335. if (test_bit(attr_bkey, pbundle->bundle.attr_present))
  336. return -EINVAL;
  337. ret = uverbs_process_attr(pbundle, attr, uattr, attr_bkey);
  338. if (ret)
  339. return ret;
  340. __set_bit(attr_bkey, pbundle->bundle.attr_present);
  341. return 0;
  342. }
  343. static int ib_uverbs_run_method(struct bundle_priv *pbundle,
  344. unsigned int num_attrs)
  345. {
  346. int (*handler)(struct ib_uverbs_file *ufile,
  347. struct uverbs_attr_bundle *ctx);
  348. size_t uattrs_size = array_size(sizeof(*pbundle->uattrs), num_attrs);
  349. unsigned int destroy_bkey = pbundle->method_elm->destroy_bkey;
  350. unsigned int i;
  351. int ret;
  352. /* See uverbs_disassociate_api() */
  353. handler = srcu_dereference(
  354. pbundle->method_elm->handler,
  355. &pbundle->bundle.ufile->device->disassociate_srcu);
  356. if (!handler)
  357. return -EIO;
  358. pbundle->uattrs = uverbs_alloc(&pbundle->bundle, uattrs_size);
  359. if (IS_ERR(pbundle->uattrs))
  360. return PTR_ERR(pbundle->uattrs);
  361. if (copy_from_user(pbundle->uattrs, pbundle->user_attrs, uattrs_size))
  362. return -EFAULT;
  363. for (i = 0; i != num_attrs; i++) {
  364. ret = uverbs_set_attr(pbundle, &pbundle->uattrs[i]);
  365. if (unlikely(ret))
  366. return ret;
  367. }
  368. /* User space did not provide all the mandatory attributes */
  369. if (unlikely(!bitmap_subset(pbundle->method_elm->attr_mandatory,
  370. pbundle->bundle.attr_present,
  371. pbundle->method_elm->key_bitmap_len)))
  372. return -EINVAL;
  373. if (destroy_bkey != UVERBS_API_ATTR_BKEY_LEN) {
  374. struct uverbs_obj_attr *destroy_attr =
  375. &pbundle->bundle.attrs[destroy_bkey].obj_attr;
  376. ret = uobj_destroy(destroy_attr->uobject);
  377. if (ret)
  378. return ret;
  379. __clear_bit(destroy_bkey, pbundle->uobj_finalize);
  380. ret = handler(pbundle->bundle.ufile, &pbundle->bundle);
  381. uobj_put_destroy(destroy_attr->uobject);
  382. } else {
  383. ret = handler(pbundle->bundle.ufile, &pbundle->bundle);
  384. }
  385. /*
  386. * EPROTONOSUPPORT is ONLY to be returned if the ioctl framework can
  387. * not invoke the method because the request is not supported. No
  388. * other cases should return this code.
  389. */
  390. if (WARN_ON_ONCE(ret == -EPROTONOSUPPORT))
  391. return -EINVAL;
  392. return ret;
  393. }
  394. static int bundle_destroy(struct bundle_priv *pbundle, bool commit)
  395. {
  396. unsigned int key_bitmap_len = pbundle->method_elm->key_bitmap_len;
  397. struct bundle_alloc_head *memblock;
  398. unsigned int i;
  399. int ret = 0;
  400. /* fast path for simple uobjects */
  401. i = -1;
  402. while ((i = find_next_bit(pbundle->uobj_finalize, key_bitmap_len,
  403. i + 1)) < key_bitmap_len) {
  404. struct uverbs_attr *attr = &pbundle->bundle.attrs[i];
  405. int current_ret;
  406. current_ret = uverbs_finalize_object(
  407. attr->obj_attr.uobject,
  408. attr->obj_attr.attr_elm->spec.u.obj.access, commit);
  409. if (!ret)
  410. ret = current_ret;
  411. }
  412. i = -1;
  413. while ((i = find_next_bit(pbundle->spec_finalize, key_bitmap_len,
  414. i + 1)) < key_bitmap_len) {
  415. struct uverbs_attr *attr = &pbundle->bundle.attrs[i];
  416. const struct uverbs_api_attr *attr_uapi;
  417. void __rcu **slot;
  418. int current_ret;
  419. slot = uapi_get_attr_for_method(
  420. pbundle,
  421. pbundle->method_key | uapi_bkey_to_key_attr(i));
  422. if (WARN_ON(!slot))
  423. continue;
  424. attr_uapi = srcu_dereference(
  425. *slot,
  426. &pbundle->bundle.ufile->device->disassociate_srcu);
  427. if (attr_uapi->spec.type == UVERBS_ATTR_TYPE_IDRS_ARRAY) {
  428. current_ret = uverbs_free_idrs_array(
  429. attr_uapi, &attr->objs_arr_attr, commit);
  430. if (!ret)
  431. ret = current_ret;
  432. }
  433. }
  434. for (memblock = pbundle->allocated_mem; memblock;) {
  435. struct bundle_alloc_head *tmp = memblock;
  436. memblock = memblock->next;
  437. kvfree(tmp);
  438. }
  439. return ret;
  440. }
  441. static int ib_uverbs_cmd_verbs(struct ib_uverbs_file *ufile,
  442. struct ib_uverbs_ioctl_hdr *hdr,
  443. struct ib_uverbs_attr __user *user_attrs)
  444. {
  445. const struct uverbs_api_ioctl_method *method_elm;
  446. struct uverbs_api *uapi = ufile->device->uapi;
  447. struct radix_tree_iter attrs_iter;
  448. struct bundle_priv *pbundle;
  449. struct bundle_priv onstack;
  450. void __rcu **slot;
  451. int destroy_ret;
  452. int ret;
  453. if (unlikely(hdr->driver_id != uapi->driver_id))
  454. return -EINVAL;
  455. slot = radix_tree_iter_lookup(
  456. &uapi->radix, &attrs_iter,
  457. uapi_key_obj(hdr->object_id) |
  458. uapi_key_ioctl_method(hdr->method_id));
  459. if (unlikely(!slot))
  460. return -EPROTONOSUPPORT;
  461. method_elm = srcu_dereference(*slot, &ufile->device->disassociate_srcu);
  462. if (!method_elm->use_stack) {
  463. pbundle = kmalloc(method_elm->bundle_size, GFP_KERNEL);
  464. if (!pbundle)
  465. return -ENOMEM;
  466. pbundle->internal_avail =
  467. method_elm->bundle_size -
  468. offsetof(struct bundle_priv, internal_buffer);
  469. pbundle->alloc_head.next = NULL;
  470. pbundle->allocated_mem = &pbundle->alloc_head;
  471. } else {
  472. pbundle = &onstack;
  473. pbundle->internal_avail = sizeof(pbundle->internal_buffer);
  474. pbundle->allocated_mem = NULL;
  475. }
  476. /* Space for the pbundle->bundle.attrs flex array */
  477. pbundle->method_elm = method_elm;
  478. pbundle->method_key = attrs_iter.index;
  479. pbundle->bundle.ufile = ufile;
  480. pbundle->radix = &uapi->radix;
  481. pbundle->radix_slots = slot;
  482. pbundle->radix_slots_len = radix_tree_chunk_size(&attrs_iter);
  483. pbundle->user_attrs = user_attrs;
  484. pbundle->internal_used = ALIGN(pbundle->method_elm->key_bitmap_len *
  485. sizeof(*pbundle->bundle.attrs),
  486. sizeof(*pbundle->internal_buffer));
  487. memset(pbundle->bundle.attr_present, 0,
  488. sizeof(pbundle->bundle.attr_present));
  489. memset(pbundle->uobj_finalize, 0, sizeof(pbundle->uobj_finalize));
  490. memset(pbundle->spec_finalize, 0, sizeof(pbundle->spec_finalize));
  491. ret = ib_uverbs_run_method(pbundle, hdr->num_attrs);
  492. destroy_ret = bundle_destroy(pbundle, ret == 0);
  493. if (unlikely(destroy_ret && !ret))
  494. return destroy_ret;
  495. return ret;
  496. }
  497. long ib_uverbs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
  498. {
  499. struct ib_uverbs_file *file = filp->private_data;
  500. struct ib_uverbs_ioctl_hdr __user *user_hdr =
  501. (struct ib_uverbs_ioctl_hdr __user *)arg;
  502. struct ib_uverbs_ioctl_hdr hdr;
  503. int srcu_key;
  504. int err;
  505. if (unlikely(cmd != RDMA_VERBS_IOCTL))
  506. return -ENOIOCTLCMD;
  507. err = copy_from_user(&hdr, user_hdr, sizeof(hdr));
  508. if (err)
  509. return -EFAULT;
  510. if (hdr.length > PAGE_SIZE ||
  511. hdr.length != struct_size(&hdr, attrs, hdr.num_attrs))
  512. return -EINVAL;
  513. if (hdr.reserved1 || hdr.reserved2)
  514. return -EPROTONOSUPPORT;
  515. srcu_key = srcu_read_lock(&file->device->disassociate_srcu);
  516. err = ib_uverbs_cmd_verbs(file, &hdr, user_hdr->attrs);
  517. srcu_read_unlock(&file->device->disassociate_srcu, srcu_key);
  518. return err;
  519. }
  520. int uverbs_get_flags64(u64 *to, const struct uverbs_attr_bundle *attrs_bundle,
  521. size_t idx, u64 allowed_bits)
  522. {
  523. const struct uverbs_attr *attr;
  524. u64 flags;
  525. attr = uverbs_attr_get(attrs_bundle, idx);
  526. /* Missing attribute means 0 flags */
  527. if (IS_ERR(attr)) {
  528. *to = 0;
  529. return 0;
  530. }
  531. /*
  532. * New userspace code should use 8 bytes to pass flags, but we
  533. * transparently support old userspaces that were using 4 bytes as
  534. * well.
  535. */
  536. if (attr->ptr_attr.len == 8)
  537. flags = attr->ptr_attr.data;
  538. else if (attr->ptr_attr.len == 4)
  539. flags = *(u32 *)&attr->ptr_attr.data;
  540. else
  541. return -EINVAL;
  542. if (flags & ~allowed_bits)
  543. return -EINVAL;
  544. *to = flags;
  545. return 0;
  546. }
  547. EXPORT_SYMBOL(uverbs_get_flags64);
  548. int uverbs_get_flags32(u32 *to, const struct uverbs_attr_bundle *attrs_bundle,
  549. size_t idx, u64 allowed_bits)
  550. {
  551. u64 flags;
  552. int ret;
  553. ret = uverbs_get_flags64(&flags, attrs_bundle, idx, allowed_bits);
  554. if (ret)
  555. return ret;
  556. if (flags > U32_MAX)
  557. return -EINVAL;
  558. *to = flags;
  559. return 0;
  560. }
  561. EXPORT_SYMBOL(uverbs_get_flags32);
  562. /*
  563. * This is for ease of conversion. The purpose is to convert all drivers to
  564. * use uverbs_attr_bundle instead of ib_udata. Assume attr == 0 is input and
  565. * attr == 1 is output.
  566. */
  567. void create_udata(struct uverbs_attr_bundle *bundle, struct ib_udata *udata)
  568. {
  569. struct bundle_priv *pbundle =
  570. container_of(bundle, struct bundle_priv, bundle);
  571. const struct uverbs_attr *uhw_in =
  572. uverbs_attr_get(bundle, UVERBS_ATTR_UHW_IN);
  573. const struct uverbs_attr *uhw_out =
  574. uverbs_attr_get(bundle, UVERBS_ATTR_UHW_OUT);
  575. if (!IS_ERR(uhw_in)) {
  576. udata->inlen = uhw_in->ptr_attr.len;
  577. if (uverbs_attr_ptr_is_inline(uhw_in))
  578. udata->inbuf =
  579. &pbundle->user_attrs[uhw_in->ptr_attr.uattr_idx]
  580. .data;
  581. else
  582. udata->inbuf = u64_to_user_ptr(uhw_in->ptr_attr.data);
  583. } else {
  584. udata->inbuf = NULL;
  585. udata->inlen = 0;
  586. }
  587. if (!IS_ERR(uhw_out)) {
  588. udata->outbuf = u64_to_user_ptr(uhw_out->ptr_attr.data);
  589. udata->outlen = uhw_out->ptr_attr.len;
  590. } else {
  591. udata->outbuf = NULL;
  592. udata->outlen = 0;
  593. }
  594. }
  595. int uverbs_copy_to(const struct uverbs_attr_bundle *bundle, size_t idx,
  596. const void *from, size_t size)
  597. {
  598. struct bundle_priv *pbundle =
  599. container_of(bundle, struct bundle_priv, bundle);
  600. const struct uverbs_attr *attr = uverbs_attr_get(bundle, idx);
  601. u16 flags;
  602. size_t min_size;
  603. if (IS_ERR(attr))
  604. return PTR_ERR(attr);
  605. min_size = min_t(size_t, attr->ptr_attr.len, size);
  606. if (copy_to_user(u64_to_user_ptr(attr->ptr_attr.data), from, min_size))
  607. return -EFAULT;
  608. flags = pbundle->uattrs[attr->ptr_attr.uattr_idx].flags |
  609. UVERBS_ATTR_F_VALID_OUTPUT;
  610. if (put_user(flags,
  611. &pbundle->user_attrs[attr->ptr_attr.uattr_idx].flags))
  612. return -EFAULT;
  613. return 0;
  614. }
  615. EXPORT_SYMBOL(uverbs_copy_to);
  616. int _uverbs_get_const(s64 *to, const struct uverbs_attr_bundle *attrs_bundle,
  617. size_t idx, s64 lower_bound, u64 upper_bound,
  618. s64 *def_val)
  619. {
  620. const struct uverbs_attr *attr;
  621. attr = uverbs_attr_get(attrs_bundle, idx);
  622. if (IS_ERR(attr)) {
  623. if ((PTR_ERR(attr) != -ENOENT) || !def_val)
  624. return PTR_ERR(attr);
  625. *to = *def_val;
  626. } else {
  627. *to = attr->ptr_attr.data;
  628. }
  629. if (*to < lower_bound || (*to > 0 && (u64)*to > upper_bound))
  630. return -EINVAL;
  631. return 0;
  632. }
  633. EXPORT_SYMBOL(_uverbs_get_const);