| 1 | /* |
| 2 | * Copyright (c) 2017, Mellanox Technologies inc. All rights reserved. |
| 3 | * |
| 4 | * This software is available to you under a choice of one of two |
| 5 | * licenses. You may choose to be licensed under the terms of the GNU |
| 6 | * General Public License (GPL) Version 2, available from the file |
| 7 | * COPYING in the main directory of this source tree, or the |
| 8 | * OpenIB.org BSD license below: |
| 9 | * |
| 10 | * Redistribution and use in source and binary forms, with or |
| 11 | * without modification, are permitted provided that the following |
| 12 | * conditions are met: |
| 13 | * |
| 14 | * - Redistributions of source code must retain the above |
| 15 | * copyright notice, this list of conditions and the following |
| 16 | * disclaimer. |
| 17 | * |
| 18 | * - Redistributions in binary form must reproduce the above |
| 19 | * copyright notice, this list of conditions and the following |
| 20 | * disclaimer in the documentation and/or other materials |
| 21 | * provided with the distribution. |
| 22 | * |
| 23 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, |
| 24 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF |
| 25 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND |
| 26 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS |
| 27 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN |
| 28 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN |
| 29 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE |
| 30 | * SOFTWARE. |
| 31 | */ |
| 32 | |
| 33 | #include <rdma/rdma_user_ioctl.h> |
| 34 | #include <rdma/uverbs_ioctl.h> |
| 35 | #include "rdma_core.h" |
| 36 | #include "uverbs.h" |
| 37 | |
| 38 | struct bundle_alloc_head { |
| 39 | struct_group_tagged(bundle_alloc_head_hdr, hdr, |
| 40 | struct bundle_alloc_head *next; |
| 41 | ); |
| 42 | u8 data[]; |
| 43 | }; |
| 44 | |
| 45 | struct bundle_priv { |
| 46 | /* Must be first */ |
| 47 | struct bundle_alloc_head_hdr alloc_head; |
| 48 | struct bundle_alloc_head *allocated_mem; |
| 49 | size_t internal_avail; |
| 50 | size_t internal_used; |
| 51 | |
| 52 | struct radix_tree_root *radix; |
| 53 | const struct uverbs_api_ioctl_method *method_elm; |
| 54 | void __rcu **radix_slots; |
| 55 | unsigned long radix_slots_len; |
| 56 | u32 method_key; |
| 57 | |
| 58 | struct ib_uverbs_attr __user *user_attrs; |
| 59 | struct ib_uverbs_attr *uattrs; |
| 60 | |
| 61 | DECLARE_BITMAP(uobj_finalize, UVERBS_API_ATTR_BKEY_LEN); |
| 62 | DECLARE_BITMAP(spec_finalize, UVERBS_API_ATTR_BKEY_LEN); |
| 63 | DECLARE_BITMAP(uobj_hw_obj_valid, UVERBS_API_ATTR_BKEY_LEN); |
| 64 | |
| 65 | /* |
| 66 | * Must be last. bundle ends in a flex array which overlaps |
| 67 | * internal_buffer. |
| 68 | */ |
| 69 | struct uverbs_attr_bundle_hdr bundle; |
| 70 | u64 internal_buffer[32]; |
| 71 | }; |
| 72 | |
| 73 | /* |
| 74 | * Each method has an absolute minimum amount of memory it needs to allocate, |
| 75 | * precompute that amount and determine if the onstack memory can be used or |
| 76 | * if allocation is need. |
| 77 | */ |
| 78 | void uapi_compute_bundle_size(struct uverbs_api_ioctl_method *method_elm, |
| 79 | unsigned int num_attrs) |
| 80 | { |
| 81 | struct bundle_priv *pbundle; |
| 82 | struct uverbs_attr_bundle *bundle; |
| 83 | size_t bundle_size = |
| 84 | offsetof(struct bundle_priv, internal_buffer) + |
| 85 | sizeof(*bundle->attrs) * method_elm->key_bitmap_len + |
| 86 | sizeof(*pbundle->uattrs) * num_attrs; |
| 87 | |
| 88 | method_elm->use_stack = bundle_size <= sizeof(*pbundle); |
| 89 | method_elm->bundle_size = |
| 90 | ALIGN(bundle_size + 256, sizeof(*pbundle->internal_buffer)); |
| 91 | |
| 92 | /* Do not want order-2 allocations for this. */ |
| 93 | WARN_ON_ONCE(method_elm->bundle_size > PAGE_SIZE); |
| 94 | } |
| 95 | |
| 96 | /** |
| 97 | * _uverbs_alloc() - Quickly allocate memory for use with a bundle |
| 98 | * @bundle: The bundle |
| 99 | * @size: Number of bytes to allocate |
| 100 | * @flags: Allocator flags |
| 101 | * |
| 102 | * The bundle allocator is intended for allocations that are connected with |
| 103 | * processing the system call related to the bundle. The allocated memory is |
| 104 | * always freed once the system call completes, and cannot be freed any other |
| 105 | * way. |
| 106 | * |
| 107 | * This tries to use a small pool of pre-allocated memory for performance. |
| 108 | */ |
| 109 | __malloc void *_uverbs_alloc(struct uverbs_attr_bundle *bundle, size_t size, |
| 110 | gfp_t flags) |
| 111 | { |
| 112 | struct bundle_priv *pbundle = |
| 113 | container_of(&bundle->hdr, struct bundle_priv, bundle); |
| 114 | size_t new_used; |
| 115 | void *res; |
| 116 | |
| 117 | if (check_add_overflow(size, pbundle->internal_used, &new_used)) |
| 118 | return ERR_PTR(error: -EOVERFLOW); |
| 119 | |
| 120 | if (new_used > pbundle->internal_avail) { |
| 121 | struct bundle_alloc_head *buf; |
| 122 | |
| 123 | buf = kvmalloc(struct_size(buf, data, size), flags); |
| 124 | if (!buf) |
| 125 | return ERR_PTR(error: -ENOMEM); |
| 126 | buf->next = pbundle->allocated_mem; |
| 127 | pbundle->allocated_mem = buf; |
| 128 | return buf->data; |
| 129 | } |
| 130 | |
| 131 | res = (void *)pbundle->internal_buffer + pbundle->internal_used; |
| 132 | pbundle->internal_used = |
| 133 | ALIGN(new_used, sizeof(*pbundle->internal_buffer)); |
| 134 | if (want_init_on_alloc(flags)) |
| 135 | memset(res, 0, size); |
| 136 | return res; |
| 137 | } |
| 138 | EXPORT_SYMBOL(_uverbs_alloc); |
| 139 | |
| 140 | static bool uverbs_is_attr_cleared(const struct ib_uverbs_attr *uattr, |
| 141 | u16 len) |
| 142 | { |
| 143 | if (uattr->len > sizeof_field(struct ib_uverbs_attr, data)) |
| 144 | return ib_is_buffer_cleared(u64_to_user_ptr(uattr->data) + len, |
| 145 | len: uattr->len - len); |
| 146 | |
| 147 | return !memchr_inv(p: (const void *)&uattr->data + len, |
| 148 | c: 0, size: uattr->len - len); |
| 149 | } |
| 150 | |
| 151 | static int uverbs_set_output(const struct uverbs_attr_bundle *bundle, |
| 152 | const struct uverbs_attr *attr) |
| 153 | { |
| 154 | struct bundle_priv *pbundle = |
| 155 | container_of(&bundle->hdr, struct bundle_priv, bundle); |
| 156 | u16 flags; |
| 157 | |
| 158 | flags = pbundle->uattrs[attr->ptr_attr.uattr_idx].flags | |
| 159 | UVERBS_ATTR_F_VALID_OUTPUT; |
| 160 | if (put_user(flags, |
| 161 | &pbundle->user_attrs[attr->ptr_attr.uattr_idx].flags)) |
| 162 | return -EFAULT; |
| 163 | return 0; |
| 164 | } |
| 165 | |
| 166 | static int uverbs_process_idrs_array(struct bundle_priv *pbundle, |
| 167 | const struct uverbs_api_attr *attr_uapi, |
| 168 | struct uverbs_objs_arr_attr *attr, |
| 169 | struct ib_uverbs_attr *uattr, |
| 170 | u32 attr_bkey) |
| 171 | { |
| 172 | struct uverbs_attr_bundle *bundle = |
| 173 | container_of(&pbundle->bundle, struct uverbs_attr_bundle, hdr); |
| 174 | const struct uverbs_attr_spec *spec = &attr_uapi->spec; |
| 175 | size_t array_len; |
| 176 | u32 *idr_vals; |
| 177 | int ret = 0; |
| 178 | size_t i; |
| 179 | |
| 180 | if (uattr->attr_data.reserved) |
| 181 | return -EINVAL; |
| 182 | |
| 183 | if (uattr->len % sizeof(u32)) |
| 184 | return -EINVAL; |
| 185 | |
| 186 | array_len = uattr->len / sizeof(u32); |
| 187 | if (array_len < spec->u2.objs_arr.min_len || |
| 188 | array_len > spec->u2.objs_arr.max_len) |
| 189 | return -EINVAL; |
| 190 | |
| 191 | attr->uobjects = |
| 192 | uverbs_alloc(bundle, |
| 193 | array_size(array_len, sizeof(*attr->uobjects))); |
| 194 | if (IS_ERR(ptr: attr->uobjects)) |
| 195 | return PTR_ERR(ptr: attr->uobjects); |
| 196 | |
| 197 | /* |
| 198 | * Since idr is 4B and *uobjects is >= 4B, we can use attr->uobjects |
| 199 | * to store idrs array and avoid additional memory allocation. The |
| 200 | * idrs array is offset to the end of the uobjects array so we will be |
| 201 | * able to read idr and replace with a pointer. |
| 202 | */ |
| 203 | idr_vals = (u32 *)(attr->uobjects + array_len) - array_len; |
| 204 | |
| 205 | if (uattr->len > sizeof(uattr->data)) { |
| 206 | ret = copy_from_user(to: idr_vals, u64_to_user_ptr(uattr->data), |
| 207 | n: uattr->len); |
| 208 | if (ret) |
| 209 | return -EFAULT; |
| 210 | } else { |
| 211 | memcpy(idr_vals, &uattr->data, uattr->len); |
| 212 | } |
| 213 | |
| 214 | for (i = 0; i != array_len; i++) { |
| 215 | attr->uobjects[i] = uverbs_get_uobject_from_file( |
| 216 | object_id: spec->u2.objs_arr.obj_type, access: spec->u2.objs_arr.access, |
| 217 | id: idr_vals[i], attrs: bundle); |
| 218 | if (IS_ERR(ptr: attr->uobjects[i])) { |
| 219 | ret = PTR_ERR(ptr: attr->uobjects[i]); |
| 220 | break; |
| 221 | } |
| 222 | } |
| 223 | |
| 224 | attr->len = i; |
| 225 | __set_bit(attr_bkey, pbundle->spec_finalize); |
| 226 | return ret; |
| 227 | } |
| 228 | |
| 229 | static void uverbs_free_idrs_array(const struct uverbs_api_attr *attr_uapi, |
| 230 | struct uverbs_objs_arr_attr *attr, |
| 231 | bool commit, |
| 232 | struct uverbs_attr_bundle *attrs) |
| 233 | { |
| 234 | const struct uverbs_attr_spec *spec = &attr_uapi->spec; |
| 235 | size_t i; |
| 236 | |
| 237 | for (i = 0; i != attr->len; i++) |
| 238 | uverbs_finalize_object(uobj: attr->uobjects[i], |
| 239 | access: spec->u2.objs_arr.access, hw_obj_valid: false, commit, |
| 240 | attrs); |
| 241 | } |
| 242 | |
| 243 | static int uverbs_process_attr(struct bundle_priv *pbundle, |
| 244 | const struct uverbs_api_attr *attr_uapi, |
| 245 | struct ib_uverbs_attr *uattr, u32 attr_bkey) |
| 246 | { |
| 247 | const struct uverbs_attr_spec *spec = &attr_uapi->spec; |
| 248 | struct uverbs_attr_bundle *bundle = |
| 249 | container_of(&pbundle->bundle, struct uverbs_attr_bundle, hdr); |
| 250 | struct uverbs_attr *e = &bundle->attrs[attr_bkey]; |
| 251 | const struct uverbs_attr_spec *val_spec = spec; |
| 252 | struct uverbs_obj_attr *o_attr; |
| 253 | |
| 254 | switch (spec->type) { |
| 255 | case UVERBS_ATTR_TYPE_ENUM_IN: |
| 256 | if (uattr->attr_data.enum_data.elem_id >= spec->u.enum_def.num_elems) |
| 257 | return -EOPNOTSUPP; |
| 258 | |
| 259 | if (uattr->attr_data.enum_data.reserved) |
| 260 | return -EINVAL; |
| 261 | |
| 262 | val_spec = &spec->u2.enum_def.ids[uattr->attr_data.enum_data.elem_id]; |
| 263 | |
| 264 | /* Currently we only support PTR_IN based enums */ |
| 265 | if (val_spec->type != UVERBS_ATTR_TYPE_PTR_IN) |
| 266 | return -EOPNOTSUPP; |
| 267 | |
| 268 | e->ptr_attr.enum_id = uattr->attr_data.enum_data.elem_id; |
| 269 | fallthrough; |
| 270 | case UVERBS_ATTR_TYPE_PTR_IN: |
| 271 | /* Ensure that any data provided by userspace beyond the known |
| 272 | * struct is zero. Userspace that knows how to use some future |
| 273 | * longer struct will fail here if used with an old kernel and |
| 274 | * non-zero content, making ABI compat/discovery simpler. |
| 275 | */ |
| 276 | if (uattr->len > val_spec->u.ptr.len && |
| 277 | val_spec->zero_trailing && |
| 278 | !uverbs_is_attr_cleared(uattr, len: val_spec->u.ptr.len)) |
| 279 | return -EOPNOTSUPP; |
| 280 | |
| 281 | fallthrough; |
| 282 | case UVERBS_ATTR_TYPE_PTR_OUT: |
| 283 | if (uattr->len < val_spec->u.ptr.min_len || |
| 284 | (!val_spec->zero_trailing && |
| 285 | uattr->len > val_spec->u.ptr.len)) |
| 286 | return -EINVAL; |
| 287 | |
| 288 | if (spec->type != UVERBS_ATTR_TYPE_ENUM_IN && |
| 289 | uattr->attr_data.reserved) |
| 290 | return -EINVAL; |
| 291 | |
| 292 | e->ptr_attr.uattr_idx = uattr - pbundle->uattrs; |
| 293 | e->ptr_attr.len = uattr->len; |
| 294 | |
| 295 | if (val_spec->alloc_and_copy && !uverbs_attr_ptr_is_inline(attr: e)) { |
| 296 | void *p; |
| 297 | |
| 298 | p = uverbs_alloc(bundle, size: uattr->len); |
| 299 | if (IS_ERR(ptr: p)) |
| 300 | return PTR_ERR(ptr: p); |
| 301 | |
| 302 | e->ptr_attr.ptr = p; |
| 303 | |
| 304 | if (copy_from_user(to: p, u64_to_user_ptr(uattr->data), |
| 305 | n: uattr->len)) |
| 306 | return -EFAULT; |
| 307 | } else { |
| 308 | e->ptr_attr.data = uattr->data; |
| 309 | } |
| 310 | break; |
| 311 | |
| 312 | case UVERBS_ATTR_TYPE_IDR: |
| 313 | case UVERBS_ATTR_TYPE_FD: |
| 314 | if (uattr->attr_data.reserved) |
| 315 | return -EINVAL; |
| 316 | |
| 317 | if (uattr->len != 0) |
| 318 | return -EINVAL; |
| 319 | |
| 320 | o_attr = &e->obj_attr; |
| 321 | o_attr->attr_elm = attr_uapi; |
| 322 | |
| 323 | /* |
| 324 | * The type of uattr->data is u64 for UVERBS_ATTR_TYPE_IDR and |
| 325 | * s64 for UVERBS_ATTR_TYPE_FD. We can cast the u64 to s64 |
| 326 | * here without caring about truncation as we know that the |
| 327 | * IDR implementation today rejects negative IDs |
| 328 | */ |
| 329 | o_attr->uobject = uverbs_get_uobject_from_file( |
| 330 | object_id: spec->u.obj.obj_type, access: spec->u.obj.access, |
| 331 | id: uattr->data_s64, attrs: bundle); |
| 332 | if (IS_ERR(ptr: o_attr->uobject)) |
| 333 | return PTR_ERR(ptr: o_attr->uobject); |
| 334 | __set_bit(attr_bkey, pbundle->uobj_finalize); |
| 335 | |
| 336 | if (spec->u.obj.access == UVERBS_ACCESS_NEW) { |
| 337 | unsigned int uattr_idx = uattr - pbundle->uattrs; |
| 338 | s64 id = o_attr->uobject->id; |
| 339 | |
| 340 | /* Copy the allocated id to the user-space */ |
| 341 | if (put_user(id, &pbundle->user_attrs[uattr_idx].data)) |
| 342 | return -EFAULT; |
| 343 | } |
| 344 | |
| 345 | break; |
| 346 | |
| 347 | case UVERBS_ATTR_TYPE_RAW_FD: |
| 348 | if (uattr->attr_data.reserved || uattr->len != 0 || |
| 349 | uattr->data_s64 < INT_MIN || uattr->data_s64 > INT_MAX) |
| 350 | return -EINVAL; |
| 351 | /* _uverbs_get_const_signed() is the accessor */ |
| 352 | e->ptr_attr.data = uattr->data_s64; |
| 353 | break; |
| 354 | |
| 355 | case UVERBS_ATTR_TYPE_IDRS_ARRAY: |
| 356 | return uverbs_process_idrs_array(pbundle, attr_uapi, |
| 357 | attr: &e->objs_arr_attr, uattr, |
| 358 | attr_bkey); |
| 359 | default: |
| 360 | return -EOPNOTSUPP; |
| 361 | } |
| 362 | |
| 363 | return 0; |
| 364 | } |
| 365 | |
| 366 | /* |
| 367 | * We search the radix tree with the method prefix and now we want to fast |
| 368 | * search the suffix bits to get a particular attribute pointer. It is not |
| 369 | * totally clear to me if this breaks the radix tree encasulation or not, but |
| 370 | * it uses the iter data to determine if the method iter points at the same |
| 371 | * chunk that will store the attribute, if so it just derefs it directly. By |
| 372 | * construction in most kernel configs the method and attrs will all fit in a |
| 373 | * single radix chunk, so in most cases this will have no search. Other cases |
| 374 | * this falls back to a full search. |
| 375 | */ |
| 376 | static void __rcu **uapi_get_attr_for_method(struct bundle_priv *pbundle, |
| 377 | u32 attr_key) |
| 378 | { |
| 379 | void __rcu **slot; |
| 380 | |
| 381 | if (likely(attr_key < pbundle->radix_slots_len)) { |
| 382 | void *entry; |
| 383 | |
| 384 | slot = pbundle->radix_slots + attr_key; |
| 385 | entry = rcu_dereference_raw(*slot); |
| 386 | if (likely(!radix_tree_is_internal_node(entry) && entry)) |
| 387 | return slot; |
| 388 | } |
| 389 | |
| 390 | return radix_tree_lookup_slot(pbundle->radix, |
| 391 | index: pbundle->method_key | attr_key); |
| 392 | } |
| 393 | |
| 394 | static int uverbs_set_attr(struct bundle_priv *pbundle, |
| 395 | struct ib_uverbs_attr *uattr) |
| 396 | { |
| 397 | u32 attr_key = uapi_key_attr(id: uattr->attr_id); |
| 398 | u32 attr_bkey = uapi_bkey_attr(attr_key); |
| 399 | const struct uverbs_api_attr *attr; |
| 400 | void __rcu **slot; |
| 401 | int ret; |
| 402 | |
| 403 | slot = uapi_get_attr_for_method(pbundle, attr_key); |
| 404 | if (!slot) { |
| 405 | /* |
| 406 | * Kernel does not support the attribute but user-space says it |
| 407 | * is mandatory |
| 408 | */ |
| 409 | if (uattr->flags & UVERBS_ATTR_F_MANDATORY) |
| 410 | return -EPROTONOSUPPORT; |
| 411 | return 0; |
| 412 | } |
| 413 | attr = rcu_dereference_protected(*slot, true); |
| 414 | |
| 415 | /* Reject duplicate attributes from user-space */ |
| 416 | if (test_bit(attr_bkey, pbundle->bundle.attr_present)) |
| 417 | return -EINVAL; |
| 418 | |
| 419 | ret = uverbs_process_attr(pbundle, attr_uapi: attr, uattr, attr_bkey); |
| 420 | if (ret) |
| 421 | return ret; |
| 422 | |
| 423 | __set_bit(attr_bkey, pbundle->bundle.attr_present); |
| 424 | |
| 425 | return 0; |
| 426 | } |
| 427 | |
| 428 | static int ib_uverbs_run_method(struct bundle_priv *pbundle, |
| 429 | unsigned int num_attrs) |
| 430 | { |
| 431 | int (*handler)(struct uverbs_attr_bundle *attrs); |
| 432 | struct uverbs_attr_bundle *bundle = |
| 433 | container_of(&pbundle->bundle, struct uverbs_attr_bundle, hdr); |
| 434 | size_t uattrs_size = array_size(sizeof(*pbundle->uattrs), num_attrs); |
| 435 | unsigned int destroy_bkey = pbundle->method_elm->destroy_bkey; |
| 436 | unsigned int i; |
| 437 | int ret; |
| 438 | |
| 439 | /* See uverbs_disassociate_api() */ |
| 440 | handler = srcu_dereference( |
| 441 | pbundle->method_elm->handler, |
| 442 | &pbundle->bundle.ufile->device->disassociate_srcu); |
| 443 | if (!handler) |
| 444 | return -EIO; |
| 445 | |
| 446 | pbundle->uattrs = uverbs_alloc(bundle, size: uattrs_size); |
| 447 | if (IS_ERR(ptr: pbundle->uattrs)) |
| 448 | return PTR_ERR(ptr: pbundle->uattrs); |
| 449 | if (copy_from_user(to: pbundle->uattrs, from: pbundle->user_attrs, n: uattrs_size)) |
| 450 | return -EFAULT; |
| 451 | |
| 452 | for (i = 0; i != num_attrs; i++) { |
| 453 | ret = uverbs_set_attr(pbundle, uattr: &pbundle->uattrs[i]); |
| 454 | if (unlikely(ret)) |
| 455 | return ret; |
| 456 | } |
| 457 | |
| 458 | /* User space did not provide all the mandatory attributes */ |
| 459 | if (unlikely(!bitmap_subset(pbundle->method_elm->attr_mandatory, |
| 460 | pbundle->bundle.attr_present, |
| 461 | pbundle->method_elm->key_bitmap_len))) |
| 462 | return -EINVAL; |
| 463 | |
| 464 | if (pbundle->method_elm->has_udata) |
| 465 | uverbs_fill_udata(bundle, udata: &pbundle->bundle.driver_udata, |
| 466 | attr_in: UVERBS_ATTR_UHW_IN, attr_out: UVERBS_ATTR_UHW_OUT); |
| 467 | else |
| 468 | pbundle->bundle.driver_udata = (struct ib_udata){}; |
| 469 | |
| 470 | if (destroy_bkey != UVERBS_API_ATTR_BKEY_LEN) { |
| 471 | struct uverbs_obj_attr *destroy_attr = &bundle->attrs[destroy_bkey].obj_attr; |
| 472 | |
| 473 | ret = uobj_destroy(uobj: destroy_attr->uobject, attrs: bundle); |
| 474 | if (ret) |
| 475 | return ret; |
| 476 | __clear_bit(destroy_bkey, pbundle->uobj_finalize); |
| 477 | |
| 478 | ret = handler(bundle); |
| 479 | uobj_put_destroy(uobj: destroy_attr->uobject); |
| 480 | } else { |
| 481 | ret = handler(bundle); |
| 482 | } |
| 483 | |
| 484 | /* |
| 485 | * Until the drivers are revised to use the bundle directly we have to |
| 486 | * assume that the driver wrote to its UHW_OUT and flag userspace |
| 487 | * appropriately. |
| 488 | */ |
| 489 | if (!ret && pbundle->method_elm->has_udata) { |
| 490 | const struct uverbs_attr *attr = |
| 491 | uverbs_attr_get(attrs_bundle: bundle, idx: UVERBS_ATTR_UHW_OUT); |
| 492 | |
| 493 | if (!IS_ERR(ptr: attr)) |
| 494 | ret = uverbs_set_output(bundle, attr); |
| 495 | } |
| 496 | |
| 497 | /* |
| 498 | * EPROTONOSUPPORT is ONLY to be returned if the ioctl framework can |
| 499 | * not invoke the method because the request is not supported. No |
| 500 | * other cases should return this code. |
| 501 | */ |
| 502 | if (WARN_ON_ONCE(ret == -EPROTONOSUPPORT)) |
| 503 | return -EINVAL; |
| 504 | |
| 505 | return ret; |
| 506 | } |
| 507 | |
| 508 | static void bundle_destroy(struct bundle_priv *pbundle, bool commit) |
| 509 | { |
| 510 | unsigned int key_bitmap_len = pbundle->method_elm->key_bitmap_len; |
| 511 | struct uverbs_attr_bundle *bundle = |
| 512 | container_of(&pbundle->bundle, struct uverbs_attr_bundle, hdr); |
| 513 | struct bundle_alloc_head *memblock; |
| 514 | unsigned int i; |
| 515 | |
| 516 | /* fast path for simple uobjects */ |
| 517 | i = -1; |
| 518 | while ((i = find_next_bit(addr: pbundle->uobj_finalize, size: key_bitmap_len, |
| 519 | offset: i + 1)) < key_bitmap_len) { |
| 520 | struct uverbs_attr *attr = &bundle->attrs[i]; |
| 521 | |
| 522 | uverbs_finalize_object( |
| 523 | uobj: attr->obj_attr.uobject, |
| 524 | access: attr->obj_attr.attr_elm->spec.u.obj.access, |
| 525 | test_bit(i, pbundle->uobj_hw_obj_valid), |
| 526 | commit, attrs: bundle); |
| 527 | } |
| 528 | |
| 529 | i = -1; |
| 530 | while ((i = find_next_bit(addr: pbundle->spec_finalize, size: key_bitmap_len, |
| 531 | offset: i + 1)) < key_bitmap_len) { |
| 532 | struct uverbs_attr *attr = &bundle->attrs[i]; |
| 533 | const struct uverbs_api_attr *attr_uapi; |
| 534 | void __rcu **slot; |
| 535 | |
| 536 | slot = uapi_get_attr_for_method( |
| 537 | pbundle, |
| 538 | attr_key: pbundle->method_key | uapi_bkey_to_key_attr(attr_bkey: i)); |
| 539 | if (WARN_ON(!slot)) |
| 540 | continue; |
| 541 | |
| 542 | attr_uapi = rcu_dereference_protected(*slot, true); |
| 543 | |
| 544 | if (attr_uapi->spec.type == UVERBS_ATTR_TYPE_IDRS_ARRAY) { |
| 545 | uverbs_free_idrs_array(attr_uapi, attr: &attr->objs_arr_attr, |
| 546 | commit, attrs: bundle); |
| 547 | } |
| 548 | } |
| 549 | |
| 550 | for (memblock = pbundle->allocated_mem; memblock;) { |
| 551 | struct bundle_alloc_head *tmp = memblock; |
| 552 | |
| 553 | memblock = memblock->next; |
| 554 | kvfree(addr: tmp); |
| 555 | } |
| 556 | } |
| 557 | |
| 558 | static int ib_uverbs_cmd_verbs(struct ib_uverbs_file *ufile, |
| 559 | struct ib_uverbs_ioctl_hdr *hdr, |
| 560 | struct ib_uverbs_attr __user *user_attrs) |
| 561 | { |
| 562 | const struct uverbs_api_ioctl_method *method_elm; |
| 563 | struct uverbs_api *uapi = ufile->device->uapi; |
| 564 | struct radix_tree_iter attrs_iter; |
| 565 | struct bundle_priv *pbundle; |
| 566 | struct bundle_priv onstack; |
| 567 | void __rcu **slot; |
| 568 | int ret; |
| 569 | |
| 570 | if (unlikely(hdr->driver_id != uapi->driver_id)) |
| 571 | return -EINVAL; |
| 572 | |
| 573 | slot = radix_tree_iter_lookup( |
| 574 | root: &uapi->radix, iter: &attrs_iter, |
| 575 | index: uapi_key_obj(id: hdr->object_id) | |
| 576 | uapi_key_ioctl_method(id: hdr->method_id)); |
| 577 | if (unlikely(!slot)) |
| 578 | return -EPROTONOSUPPORT; |
| 579 | method_elm = rcu_dereference_protected(*slot, true); |
| 580 | |
| 581 | if (!method_elm->use_stack) { |
| 582 | pbundle = kmalloc(method_elm->bundle_size, GFP_KERNEL); |
| 583 | if (!pbundle) |
| 584 | return -ENOMEM; |
| 585 | pbundle->internal_avail = |
| 586 | method_elm->bundle_size - |
| 587 | offsetof(struct bundle_priv, internal_buffer); |
| 588 | pbundle->alloc_head.next = NULL; |
| 589 | pbundle->allocated_mem = container_of(&pbundle->alloc_head, |
| 590 | struct bundle_alloc_head, hdr); |
| 591 | } else { |
| 592 | pbundle = &onstack; |
| 593 | pbundle->internal_avail = sizeof(pbundle->internal_buffer); |
| 594 | pbundle->allocated_mem = NULL; |
| 595 | } |
| 596 | |
| 597 | /* Space for the pbundle->bundle.attrs flex array */ |
| 598 | pbundle->method_elm = method_elm; |
| 599 | pbundle->method_key = attrs_iter.index; |
| 600 | pbundle->bundle.ufile = ufile; |
| 601 | pbundle->bundle.context = NULL; /* only valid if bundle has uobject */ |
| 602 | pbundle->radix = &uapi->radix; |
| 603 | pbundle->radix_slots = slot; |
| 604 | pbundle->radix_slots_len = radix_tree_chunk_size(iter: &attrs_iter); |
| 605 | pbundle->user_attrs = user_attrs; |
| 606 | |
| 607 | pbundle->internal_used = ALIGN(pbundle->method_elm->key_bitmap_len * |
| 608 | sizeof(*container_of(&pbundle->bundle, |
| 609 | struct uverbs_attr_bundle, hdr)->attrs), |
| 610 | sizeof(*pbundle->internal_buffer)); |
| 611 | memset(pbundle->bundle.attr_present, 0, |
| 612 | sizeof(pbundle->bundle.attr_present)); |
| 613 | memset(pbundle->uobj_finalize, 0, sizeof(pbundle->uobj_finalize)); |
| 614 | memset(pbundle->spec_finalize, 0, sizeof(pbundle->spec_finalize)); |
| 615 | memset(pbundle->uobj_hw_obj_valid, 0, |
| 616 | sizeof(pbundle->uobj_hw_obj_valid)); |
| 617 | |
| 618 | ret = ib_uverbs_run_method(pbundle, num_attrs: hdr->num_attrs); |
| 619 | bundle_destroy(pbundle, commit: ret == 0); |
| 620 | return ret; |
| 621 | } |
| 622 | |
| 623 | long ib_uverbs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) |
| 624 | { |
| 625 | struct ib_uverbs_file *file = filp->private_data; |
| 626 | struct ib_uverbs_ioctl_hdr __user *user_hdr = |
| 627 | (struct ib_uverbs_ioctl_hdr __user *)arg; |
| 628 | struct ib_uverbs_ioctl_hdr hdr; |
| 629 | int srcu_key; |
| 630 | int err; |
| 631 | |
| 632 | if (unlikely(cmd != RDMA_VERBS_IOCTL)) |
| 633 | return -ENOIOCTLCMD; |
| 634 | |
| 635 | err = copy_from_user(to: &hdr, from: user_hdr, n: sizeof(hdr)); |
| 636 | if (err) |
| 637 | return -EFAULT; |
| 638 | |
| 639 | if (hdr.length > PAGE_SIZE || |
| 640 | hdr.length != struct_size(&hdr, attrs, hdr.num_attrs)) |
| 641 | return -EINVAL; |
| 642 | |
| 643 | if (hdr.reserved1 || hdr.reserved2) |
| 644 | return -EPROTONOSUPPORT; |
| 645 | |
| 646 | srcu_key = srcu_read_lock(ssp: &file->device->disassociate_srcu); |
| 647 | err = ib_uverbs_cmd_verbs(ufile: file, hdr: &hdr, user_attrs: user_hdr->attrs); |
| 648 | srcu_read_unlock(ssp: &file->device->disassociate_srcu, idx: srcu_key); |
| 649 | return err; |
| 650 | } |
| 651 | |
| 652 | int uverbs_get_flags64(u64 *to, const struct uverbs_attr_bundle *attrs_bundle, |
| 653 | size_t idx, u64 allowed_bits) |
| 654 | { |
| 655 | const struct uverbs_attr *attr; |
| 656 | u64 flags; |
| 657 | |
| 658 | attr = uverbs_attr_get(attrs_bundle, idx); |
| 659 | /* Missing attribute means 0 flags */ |
| 660 | if (IS_ERR(ptr: attr)) { |
| 661 | *to = 0; |
| 662 | return 0; |
| 663 | } |
| 664 | |
| 665 | /* |
| 666 | * New userspace code should use 8 bytes to pass flags, but we |
| 667 | * transparently support old userspaces that were using 4 bytes as |
| 668 | * well. |
| 669 | */ |
| 670 | if (attr->ptr_attr.len == 8) |
| 671 | flags = attr->ptr_attr.data; |
| 672 | else if (attr->ptr_attr.len == 4) |
| 673 | flags = *(u32 *)&attr->ptr_attr.data; |
| 674 | else |
| 675 | return -EINVAL; |
| 676 | |
| 677 | if (flags & ~allowed_bits) |
| 678 | return -EINVAL; |
| 679 | |
| 680 | *to = flags; |
| 681 | return 0; |
| 682 | } |
| 683 | EXPORT_SYMBOL(uverbs_get_flags64); |
| 684 | |
| 685 | int uverbs_get_flags32(u32 *to, const struct uverbs_attr_bundle *attrs_bundle, |
| 686 | size_t idx, u64 allowed_bits) |
| 687 | { |
| 688 | u64 flags; |
| 689 | int ret; |
| 690 | |
| 691 | ret = uverbs_get_flags64(&flags, attrs_bundle, idx, allowed_bits); |
| 692 | if (ret) |
| 693 | return ret; |
| 694 | |
| 695 | if (flags > U32_MAX) |
| 696 | return -EINVAL; |
| 697 | *to = flags; |
| 698 | |
| 699 | return 0; |
| 700 | } |
| 701 | EXPORT_SYMBOL(uverbs_get_flags32); |
| 702 | |
| 703 | /* |
| 704 | * Fill a ib_udata struct (core or uhw) using the given attribute IDs. |
| 705 | * This is primarily used to convert the UVERBS_ATTR_UHW() into the |
| 706 | * ib_udata format used by the drivers. |
| 707 | */ |
| 708 | void uverbs_fill_udata(struct uverbs_attr_bundle *bundle, |
| 709 | struct ib_udata *udata, unsigned int attr_in, |
| 710 | unsigned int attr_out) |
| 711 | { |
| 712 | struct bundle_priv *pbundle = |
| 713 | container_of(&bundle->hdr, struct bundle_priv, bundle); |
| 714 | struct uverbs_attr_bundle *bundle_aux = |
| 715 | container_of(&pbundle->bundle, struct uverbs_attr_bundle, hdr); |
| 716 | const struct uverbs_attr *in = |
| 717 | uverbs_attr_get(attrs_bundle: bundle_aux, idx: attr_in); |
| 718 | const struct uverbs_attr *out = |
| 719 | uverbs_attr_get(attrs_bundle: bundle_aux, idx: attr_out); |
| 720 | |
| 721 | if (!IS_ERR(ptr: in)) { |
| 722 | udata->inlen = in->ptr_attr.len; |
| 723 | if (uverbs_attr_ptr_is_inline(attr: in)) |
| 724 | udata->inbuf = |
| 725 | &pbundle->user_attrs[in->ptr_attr.uattr_idx] |
| 726 | .data; |
| 727 | else |
| 728 | udata->inbuf = u64_to_user_ptr(in->ptr_attr.data); |
| 729 | } else { |
| 730 | udata->inbuf = NULL; |
| 731 | udata->inlen = 0; |
| 732 | } |
| 733 | |
| 734 | if (!IS_ERR(ptr: out)) { |
| 735 | udata->outbuf = u64_to_user_ptr(out->ptr_attr.data); |
| 736 | udata->outlen = out->ptr_attr.len; |
| 737 | } else { |
| 738 | udata->outbuf = NULL; |
| 739 | udata->outlen = 0; |
| 740 | } |
| 741 | } |
| 742 | |
| 743 | int uverbs_copy_to(const struct uverbs_attr_bundle *bundle, size_t idx, |
| 744 | const void *from, size_t size) |
| 745 | { |
| 746 | const struct uverbs_attr *attr = uverbs_attr_get(attrs_bundle: bundle, idx); |
| 747 | size_t min_size; |
| 748 | |
| 749 | if (IS_ERR(ptr: attr)) |
| 750 | return PTR_ERR(ptr: attr); |
| 751 | |
| 752 | min_size = min_t(size_t, attr->ptr_attr.len, size); |
| 753 | if (copy_to_user(u64_to_user_ptr(attr->ptr_attr.data), from, n: min_size)) |
| 754 | return -EFAULT; |
| 755 | |
| 756 | return uverbs_set_output(bundle, attr); |
| 757 | } |
| 758 | EXPORT_SYMBOL(uverbs_copy_to); |
| 759 | |
| 760 | |
| 761 | /* |
| 762 | * This is only used if the caller has directly used copy_to_use to write the |
| 763 | * data. It signals to user space that the buffer is filled in. |
| 764 | */ |
| 765 | int uverbs_output_written(const struct uverbs_attr_bundle *bundle, size_t idx) |
| 766 | { |
| 767 | const struct uverbs_attr *attr = uverbs_attr_get(attrs_bundle: bundle, idx); |
| 768 | |
| 769 | if (IS_ERR(ptr: attr)) |
| 770 | return PTR_ERR(ptr: attr); |
| 771 | |
| 772 | return uverbs_set_output(bundle, attr); |
| 773 | } |
| 774 | |
| 775 | int _uverbs_get_const_signed(s64 *to, |
| 776 | const struct uverbs_attr_bundle *attrs_bundle, |
| 777 | size_t idx, s64 lower_bound, u64 upper_bound, |
| 778 | s64 *def_val) |
| 779 | { |
| 780 | const struct uverbs_attr *attr; |
| 781 | |
| 782 | attr = uverbs_attr_get(attrs_bundle, idx); |
| 783 | if (IS_ERR(ptr: attr)) { |
| 784 | if ((PTR_ERR(ptr: attr) != -ENOENT) || !def_val) |
| 785 | return PTR_ERR(ptr: attr); |
| 786 | |
| 787 | *to = *def_val; |
| 788 | } else { |
| 789 | *to = attr->ptr_attr.data; |
| 790 | } |
| 791 | |
| 792 | if (*to < lower_bound || (*to > 0 && (u64)*to > upper_bound)) |
| 793 | return -EINVAL; |
| 794 | |
| 795 | return 0; |
| 796 | } |
| 797 | EXPORT_SYMBOL(_uverbs_get_const_signed); |
| 798 | |
| 799 | int _uverbs_get_const_unsigned(u64 *to, |
| 800 | const struct uverbs_attr_bundle *attrs_bundle, |
| 801 | size_t idx, u64 upper_bound, u64 *def_val) |
| 802 | { |
| 803 | const struct uverbs_attr *attr; |
| 804 | |
| 805 | attr = uverbs_attr_get(attrs_bundle, idx); |
| 806 | if (IS_ERR(ptr: attr)) { |
| 807 | if ((PTR_ERR(ptr: attr) != -ENOENT) || !def_val) |
| 808 | return PTR_ERR(ptr: attr); |
| 809 | |
| 810 | *to = *def_val; |
| 811 | } else { |
| 812 | *to = attr->ptr_attr.data; |
| 813 | } |
| 814 | |
| 815 | if (*to > upper_bound) |
| 816 | return -EINVAL; |
| 817 | |
| 818 | return 0; |
| 819 | } |
| 820 | EXPORT_SYMBOL(_uverbs_get_const_unsigned); |
| 821 | |
| 822 | int uverbs_copy_to_struct_or_zero(const struct uverbs_attr_bundle *bundle, |
| 823 | size_t idx, const void *from, size_t size) |
| 824 | { |
| 825 | const struct uverbs_attr *attr = uverbs_attr_get(attrs_bundle: bundle, idx); |
| 826 | |
| 827 | if (IS_ERR(ptr: attr)) |
| 828 | return PTR_ERR(ptr: attr); |
| 829 | |
| 830 | if (size < attr->ptr_attr.len) { |
| 831 | if (clear_user(u64_to_user_ptr(attr->ptr_attr.data) + size, |
| 832 | n: attr->ptr_attr.len - size)) |
| 833 | return -EFAULT; |
| 834 | } |
| 835 | return uverbs_copy_to(bundle, idx, from, size); |
| 836 | } |
| 837 | EXPORT_SYMBOL(uverbs_copy_to_struct_or_zero); |
| 838 | |
| 839 | /* Once called an abort will call through to the type's destroy_hw() */ |
| 840 | void uverbs_finalize_uobj_create(const struct uverbs_attr_bundle *bundle, |
| 841 | u16 idx) |
| 842 | { |
| 843 | struct bundle_priv *pbundle = |
| 844 | container_of(&bundle->hdr, struct bundle_priv, bundle); |
| 845 | |
| 846 | __set_bit(uapi_bkey_attr(uapi_key_attr(idx)), |
| 847 | pbundle->uobj_hw_obj_valid); |
| 848 | } |
| 849 | EXPORT_SYMBOL(uverbs_finalize_uobj_create); |
| 850 | |