| 1 | // SPDX-License-Identifier: GPL-2.0 or MIT |
| 2 | /* Copyright 2019 Linaro, Ltd, Rob Herring <robh@kernel.org> */ |
| 3 | /* Copyright 2023 Collabora ltd. */ |
| 4 | |
| 5 | #include <linux/cleanup.h> |
| 6 | #include <linux/dma-buf.h> |
| 7 | #include <linux/dma-mapping.h> |
| 8 | #include <linux/err.h> |
| 9 | #include <linux/slab.h> |
| 10 | |
| 11 | #include <drm/drm_print.h> |
| 12 | #include <drm/panthor_drm.h> |
| 13 | |
| 14 | #include "panthor_device.h" |
| 15 | #include "panthor_fw.h" |
| 16 | #include "panthor_gem.h" |
| 17 | #include "panthor_mmu.h" |
| 18 | |
| 19 | #ifdef CONFIG_DEBUG_FS |
| 20 | static void panthor_gem_debugfs_bo_init(struct panthor_gem_object *bo) |
| 21 | { |
| 22 | INIT_LIST_HEAD(list: &bo->debugfs.node); |
| 23 | } |
| 24 | |
| 25 | static void panthor_gem_debugfs_bo_add(struct panthor_gem_object *bo) |
| 26 | { |
| 27 | struct panthor_device *ptdev = container_of(bo->base.base.dev, |
| 28 | struct panthor_device, base); |
| 29 | |
| 30 | bo->debugfs.creator.tgid = current->group_leader->pid; |
| 31 | get_task_comm(bo->debugfs.creator.process_name, current->group_leader); |
| 32 | |
| 33 | mutex_lock(&ptdev->gems.lock); |
| 34 | list_add_tail(new: &bo->debugfs.node, head: &ptdev->gems.node); |
| 35 | mutex_unlock(lock: &ptdev->gems.lock); |
| 36 | } |
| 37 | |
| 38 | static void panthor_gem_debugfs_bo_rm(struct panthor_gem_object *bo) |
| 39 | { |
| 40 | struct panthor_device *ptdev = container_of(bo->base.base.dev, |
| 41 | struct panthor_device, base); |
| 42 | |
| 43 | if (list_empty(head: &bo->debugfs.node)) |
| 44 | return; |
| 45 | |
| 46 | mutex_lock(&ptdev->gems.lock); |
| 47 | list_del_init(entry: &bo->debugfs.node); |
| 48 | mutex_unlock(lock: &ptdev->gems.lock); |
| 49 | } |
| 50 | |
| 51 | static void panthor_gem_debugfs_set_usage_flags(struct panthor_gem_object *bo, u32 usage_flags) |
| 52 | { |
| 53 | bo->debugfs.flags = usage_flags; |
| 54 | panthor_gem_debugfs_bo_add(bo); |
| 55 | } |
| 56 | #else |
| 57 | static void panthor_gem_debugfs_bo_rm(struct panthor_gem_object *bo) {} |
| 58 | static void panthor_gem_debugfs_set_usage_flags(struct panthor_gem_object *bo, u32 usage_flags) {} |
| 59 | static void panthor_gem_debugfs_bo_init(struct panthor_gem_object *bo) {} |
| 60 | #endif |
| 61 | |
| 62 | static void panthor_gem_free_object(struct drm_gem_object *obj) |
| 63 | { |
| 64 | struct panthor_gem_object *bo = to_panthor_bo(obj); |
| 65 | struct drm_gem_object *vm_root_gem = bo->exclusive_vm_root_gem; |
| 66 | |
| 67 | panthor_gem_debugfs_bo_rm(bo); |
| 68 | |
| 69 | /* |
| 70 | * Label might have been allocated with kstrdup_const(), |
| 71 | * we need to take that into account when freeing the memory |
| 72 | */ |
| 73 | kfree_const(x: bo->label.str); |
| 74 | |
| 75 | mutex_destroy(lock: &bo->label.lock); |
| 76 | |
| 77 | drm_gem_free_mmap_offset(obj: &bo->base.base); |
| 78 | drm_gem_shmem_free(shmem: &bo->base); |
| 79 | drm_gem_object_put(obj: vm_root_gem); |
| 80 | } |
| 81 | |
| 82 | /** |
| 83 | * panthor_kernel_bo_destroy() - Destroy a kernel buffer object |
| 84 | * @bo: Kernel buffer object to destroy. If NULL or an ERR_PTR(), the destruction |
| 85 | * is skipped. |
| 86 | */ |
| 87 | void panthor_kernel_bo_destroy(struct panthor_kernel_bo *bo) |
| 88 | { |
| 89 | struct panthor_vm *vm; |
| 90 | |
| 91 | if (IS_ERR_OR_NULL(ptr: bo)) |
| 92 | return; |
| 93 | |
| 94 | vm = bo->vm; |
| 95 | panthor_kernel_bo_vunmap(bo); |
| 96 | |
| 97 | drm_WARN_ON(bo->obj->dev, |
| 98 | to_panthor_bo(bo->obj)->exclusive_vm_root_gem != panthor_vm_root_gem(vm)); |
| 99 | panthor_vm_unmap_range(vm, va: bo->va_node.start, size: bo->va_node.size); |
| 100 | panthor_vm_free_va(vm, va_node: &bo->va_node); |
| 101 | drm_gem_object_put(obj: bo->obj); |
| 102 | panthor_vm_put(vm); |
| 103 | kfree(objp: bo); |
| 104 | } |
| 105 | |
| 106 | /** |
| 107 | * panthor_kernel_bo_create() - Create and map a GEM object to a VM |
| 108 | * @ptdev: Device. |
| 109 | * @vm: VM to map the GEM to. If NULL, the kernel object is not GPU mapped. |
| 110 | * @size: Size of the buffer object. |
| 111 | * @bo_flags: Combination of drm_panthor_bo_flags flags. |
| 112 | * @vm_map_flags: Combination of drm_panthor_vm_bind_op_flags (only those |
| 113 | * that are related to map operations). |
| 114 | * @gpu_va: GPU address assigned when mapping to the VM. |
| 115 | * If gpu_va == PANTHOR_VM_KERNEL_AUTO_VA, the virtual address will be |
| 116 | * automatically allocated. |
| 117 | * @name: Descriptive label of the BO's contents |
| 118 | * |
| 119 | * Return: A valid pointer in case of success, an ERR_PTR() otherwise. |
| 120 | */ |
| 121 | struct panthor_kernel_bo * |
| 122 | panthor_kernel_bo_create(struct panthor_device *ptdev, struct panthor_vm *vm, |
| 123 | size_t size, u32 bo_flags, u32 vm_map_flags, |
| 124 | u64 gpu_va, const char *name) |
| 125 | { |
| 126 | struct drm_gem_shmem_object *obj; |
| 127 | struct panthor_kernel_bo *kbo; |
| 128 | struct panthor_gem_object *bo; |
| 129 | u32 debug_flags = PANTHOR_DEBUGFS_GEM_USAGE_FLAG_KERNEL; |
| 130 | int ret; |
| 131 | |
| 132 | if (drm_WARN_ON(&ptdev->base, !vm)) |
| 133 | return ERR_PTR(error: -EINVAL); |
| 134 | |
| 135 | kbo = kzalloc(sizeof(*kbo), GFP_KERNEL); |
| 136 | if (!kbo) |
| 137 | return ERR_PTR(error: -ENOMEM); |
| 138 | |
| 139 | obj = drm_gem_shmem_create(dev: &ptdev->base, size); |
| 140 | if (IS_ERR(ptr: obj)) { |
| 141 | ret = PTR_ERR(ptr: obj); |
| 142 | goto err_free_bo; |
| 143 | } |
| 144 | |
| 145 | bo = to_panthor_bo(obj: &obj->base); |
| 146 | kbo->obj = &obj->base; |
| 147 | bo->flags = bo_flags; |
| 148 | bo->exclusive_vm_root_gem = panthor_vm_root_gem(vm); |
| 149 | drm_gem_object_get(obj: bo->exclusive_vm_root_gem); |
| 150 | bo->base.base.resv = bo->exclusive_vm_root_gem->resv; |
| 151 | |
| 152 | if (vm == panthor_fw_vm(ptdev)) |
| 153 | debug_flags |= PANTHOR_DEBUGFS_GEM_USAGE_FLAG_FW_MAPPED; |
| 154 | |
| 155 | panthor_gem_kernel_bo_set_label(bo: kbo, label: name); |
| 156 | panthor_gem_debugfs_set_usage_flags(bo: to_panthor_bo(obj: kbo->obj), usage_flags: debug_flags); |
| 157 | |
| 158 | /* The system and GPU MMU page size might differ, which becomes a |
| 159 | * problem for FW sections that need to be mapped at explicit address |
| 160 | * since our PAGE_SIZE alignment might cover a VA range that's |
| 161 | * expected to be used for another section. |
| 162 | * Make sure we never map more than we need. |
| 163 | */ |
| 164 | size = ALIGN(size, panthor_vm_page_size(vm)); |
| 165 | ret = panthor_vm_alloc_va(vm, va: gpu_va, size, va_node: &kbo->va_node); |
| 166 | if (ret) |
| 167 | goto err_put_obj; |
| 168 | |
| 169 | ret = panthor_vm_map_bo_range(vm, bo, offset: 0, size, va: kbo->va_node.start, flags: vm_map_flags); |
| 170 | if (ret) |
| 171 | goto err_free_va; |
| 172 | |
| 173 | kbo->vm = panthor_vm_get(vm); |
| 174 | return kbo; |
| 175 | |
| 176 | err_free_va: |
| 177 | panthor_vm_free_va(vm, va_node: &kbo->va_node); |
| 178 | |
| 179 | err_put_obj: |
| 180 | drm_gem_object_put(obj: &obj->base); |
| 181 | |
| 182 | err_free_bo: |
| 183 | kfree(objp: kbo); |
| 184 | return ERR_PTR(error: ret); |
| 185 | } |
| 186 | |
| 187 | static struct dma_buf * |
| 188 | panthor_gem_prime_export(struct drm_gem_object *obj, int flags) |
| 189 | { |
| 190 | /* We can't export GEMs that have an exclusive VM. */ |
| 191 | if (to_panthor_bo(obj)->exclusive_vm_root_gem) |
| 192 | return ERR_PTR(error: -EINVAL); |
| 193 | |
| 194 | return drm_gem_prime_export(obj, flags); |
| 195 | } |
| 196 | |
| 197 | static enum drm_gem_object_status panthor_gem_status(struct drm_gem_object *obj) |
| 198 | { |
| 199 | struct panthor_gem_object *bo = to_panthor_bo(obj); |
| 200 | enum drm_gem_object_status res = 0; |
| 201 | |
| 202 | if (drm_gem_is_imported(obj: &bo->base.base) || bo->base.pages) |
| 203 | res |= DRM_GEM_OBJECT_RESIDENT; |
| 204 | |
| 205 | return res; |
| 206 | } |
| 207 | |
| 208 | static const struct drm_gem_object_funcs panthor_gem_funcs = { |
| 209 | .free = panthor_gem_free_object, |
| 210 | .print_info = drm_gem_shmem_object_print_info, |
| 211 | .pin = drm_gem_shmem_object_pin, |
| 212 | .unpin = drm_gem_shmem_object_unpin, |
| 213 | .get_sg_table = drm_gem_shmem_object_get_sg_table, |
| 214 | .vmap = drm_gem_shmem_object_vmap, |
| 215 | .vunmap = drm_gem_shmem_object_vunmap, |
| 216 | .mmap = drm_gem_shmem_object_mmap, |
| 217 | .status = panthor_gem_status, |
| 218 | .export = panthor_gem_prime_export, |
| 219 | .vm_ops = &drm_gem_shmem_vm_ops, |
| 220 | }; |
| 221 | |
| 222 | /** |
| 223 | * panthor_gem_create_object - Implementation of driver->gem_create_object. |
| 224 | * @ddev: DRM device |
| 225 | * @size: Size in bytes of the memory the object will reference |
| 226 | * |
| 227 | * This lets the GEM helpers allocate object structs for us, and keep |
| 228 | * our BO stats correct. |
| 229 | */ |
| 230 | struct drm_gem_object *panthor_gem_create_object(struct drm_device *ddev, size_t size) |
| 231 | { |
| 232 | struct panthor_device *ptdev = container_of(ddev, struct panthor_device, base); |
| 233 | struct panthor_gem_object *obj; |
| 234 | |
| 235 | obj = kzalloc(sizeof(*obj), GFP_KERNEL); |
| 236 | if (!obj) |
| 237 | return ERR_PTR(error: -ENOMEM); |
| 238 | |
| 239 | obj->base.base.funcs = &panthor_gem_funcs; |
| 240 | obj->base.map_wc = !ptdev->coherent; |
| 241 | mutex_init(&obj->label.lock); |
| 242 | |
| 243 | panthor_gem_debugfs_bo_init(bo: obj); |
| 244 | |
| 245 | return &obj->base.base; |
| 246 | } |
| 247 | |
| 248 | /** |
| 249 | * panthor_gem_create_with_handle() - Create a GEM object and attach it to a handle. |
| 250 | * @file: DRM file. |
| 251 | * @ddev: DRM device. |
| 252 | * @exclusive_vm: Exclusive VM. Not NULL if the GEM object can't be shared. |
| 253 | * @size: Size of the GEM object to allocate. |
| 254 | * @flags: Combination of drm_panthor_bo_flags flags. |
| 255 | * @handle: Pointer holding the handle pointing to the new GEM object. |
| 256 | * |
| 257 | * Return: Zero on success |
| 258 | */ |
| 259 | int |
| 260 | panthor_gem_create_with_handle(struct drm_file *file, |
| 261 | struct drm_device *ddev, |
| 262 | struct panthor_vm *exclusive_vm, |
| 263 | u64 *size, u32 flags, u32 *handle) |
| 264 | { |
| 265 | int ret; |
| 266 | struct drm_gem_shmem_object *shmem; |
| 267 | struct panthor_gem_object *bo; |
| 268 | |
| 269 | shmem = drm_gem_shmem_create(dev: ddev, size: *size); |
| 270 | if (IS_ERR(ptr: shmem)) |
| 271 | return PTR_ERR(ptr: shmem); |
| 272 | |
| 273 | bo = to_panthor_bo(obj: &shmem->base); |
| 274 | bo->flags = flags; |
| 275 | |
| 276 | if (exclusive_vm) { |
| 277 | bo->exclusive_vm_root_gem = panthor_vm_root_gem(vm: exclusive_vm); |
| 278 | drm_gem_object_get(obj: bo->exclusive_vm_root_gem); |
| 279 | bo->base.base.resv = bo->exclusive_vm_root_gem->resv; |
| 280 | } |
| 281 | |
| 282 | panthor_gem_debugfs_set_usage_flags(bo, usage_flags: 0); |
| 283 | |
| 284 | /* If this is a write-combine mapping, we query the sgt to force a CPU |
| 285 | * cache flush (dma_map_sgtable() is called when the sgt is created). |
| 286 | * This ensures the zero-ing is visible to any uncached mapping created |
| 287 | * by vmap/mmap. |
| 288 | * FIXME: Ideally this should be done when pages are allocated, not at |
| 289 | * BO creation time. |
| 290 | */ |
| 291 | if (shmem->map_wc) { |
| 292 | struct sg_table *sgt; |
| 293 | |
| 294 | sgt = drm_gem_shmem_get_pages_sgt(shmem); |
| 295 | if (IS_ERR(ptr: sgt)) { |
| 296 | ret = PTR_ERR(ptr: sgt); |
| 297 | goto out_put_gem; |
| 298 | } |
| 299 | } |
| 300 | |
| 301 | /* |
| 302 | * Allocate an id of idr table where the obj is registered |
| 303 | * and handle has the id what user can see. |
| 304 | */ |
| 305 | ret = drm_gem_handle_create(file_priv: file, obj: &shmem->base, handlep: handle); |
| 306 | if (!ret) |
| 307 | *size = bo->base.base.size; |
| 308 | |
| 309 | out_put_gem: |
| 310 | /* drop reference from allocate - handle holds it now. */ |
| 311 | drm_gem_object_put(obj: &shmem->base); |
| 312 | |
| 313 | return ret; |
| 314 | } |
| 315 | |
| 316 | void |
| 317 | panthor_gem_bo_set_label(struct drm_gem_object *obj, const char *label) |
| 318 | { |
| 319 | struct panthor_gem_object *bo = to_panthor_bo(obj); |
| 320 | const char *old_label; |
| 321 | |
| 322 | scoped_guard(mutex, &bo->label.lock) { |
| 323 | old_label = bo->label.str; |
| 324 | bo->label.str = label; |
| 325 | } |
| 326 | |
| 327 | kfree_const(x: old_label); |
| 328 | } |
| 329 | |
| 330 | void |
| 331 | panthor_gem_kernel_bo_set_label(struct panthor_kernel_bo *bo, const char *label) |
| 332 | { |
| 333 | const char *str; |
| 334 | |
| 335 | /* We should never attempt labelling a UM-exposed GEM object */ |
| 336 | if (drm_WARN_ON(bo->obj->dev, bo->obj->handle_count > 0)) |
| 337 | return; |
| 338 | |
| 339 | if (!label) |
| 340 | return; |
| 341 | |
| 342 | str = kstrdup_const(s: label, GFP_KERNEL); |
| 343 | if (!str) { |
| 344 | /* Failing to allocate memory for a label isn't a fatal condition */ |
| 345 | drm_warn(bo->obj->dev, "Not enough memory to allocate BO label" ); |
| 346 | return; |
| 347 | } |
| 348 | |
| 349 | panthor_gem_bo_set_label(obj: bo->obj, label: str); |
| 350 | } |
| 351 | |
| 352 | #ifdef CONFIG_DEBUG_FS |
| 353 | struct gem_size_totals { |
| 354 | size_t size; |
| 355 | size_t resident; |
| 356 | size_t reclaimable; |
| 357 | }; |
| 358 | |
| 359 | static void panthor_gem_debugfs_print_flag_names(struct seq_file *m) |
| 360 | { |
| 361 | int len; |
| 362 | int i; |
| 363 | |
| 364 | static const char * const gem_state_flags_names[] = { |
| 365 | [PANTHOR_DEBUGFS_GEM_STATE_IMPORTED_BIT] = "imported" , |
| 366 | [PANTHOR_DEBUGFS_GEM_STATE_EXPORTED_BIT] = "exported" , |
| 367 | }; |
| 368 | |
| 369 | static const char * const gem_usage_flags_names[] = { |
| 370 | [PANTHOR_DEBUGFS_GEM_USAGE_KERNEL_BIT] = "kernel" , |
| 371 | [PANTHOR_DEBUGFS_GEM_USAGE_FW_MAPPED_BIT] = "fw-mapped" , |
| 372 | }; |
| 373 | |
| 374 | seq_puts(m, s: "GEM state flags: " ); |
| 375 | for (i = 0, len = ARRAY_SIZE(gem_state_flags_names); i < len; i++) { |
| 376 | if (!gem_state_flags_names[i]) |
| 377 | continue; |
| 378 | seq_printf(m, fmt: "%s (0x%x)%s" , gem_state_flags_names[i], |
| 379 | (u32)BIT(i), (i < len - 1) ? ", " : "\n" ); |
| 380 | } |
| 381 | |
| 382 | seq_puts(m, s: "GEM usage flags: " ); |
| 383 | for (i = 0, len = ARRAY_SIZE(gem_usage_flags_names); i < len; i++) { |
| 384 | if (!gem_usage_flags_names[i]) |
| 385 | continue; |
| 386 | seq_printf(m, fmt: "%s (0x%x)%s" , gem_usage_flags_names[i], |
| 387 | (u32)BIT(i), (i < len - 1) ? ", " : "\n\n" ); |
| 388 | } |
| 389 | } |
| 390 | |
| 391 | static void panthor_gem_debugfs_bo_print(struct panthor_gem_object *bo, |
| 392 | struct seq_file *m, |
| 393 | struct gem_size_totals *totals) |
| 394 | { |
| 395 | unsigned int refcount = kref_read(kref: &bo->base.base.refcount); |
| 396 | char creator_info[32] = {}; |
| 397 | size_t resident_size; |
| 398 | u32 gem_usage_flags = bo->debugfs.flags; |
| 399 | u32 gem_state_flags = 0; |
| 400 | |
| 401 | /* Skip BOs being destroyed. */ |
| 402 | if (!refcount) |
| 403 | return; |
| 404 | |
| 405 | resident_size = bo->base.pages ? bo->base.base.size : 0; |
| 406 | |
| 407 | snprintf(buf: creator_info, size: sizeof(creator_info), |
| 408 | fmt: "%s/%d" , bo->debugfs.creator.process_name, bo->debugfs.creator.tgid); |
| 409 | seq_printf(m, fmt: "%-32s%-16d%-16d%-16zd%-16zd0x%-16lx" , |
| 410 | creator_info, |
| 411 | bo->base.base.name, |
| 412 | refcount, |
| 413 | bo->base.base.size, |
| 414 | resident_size, |
| 415 | drm_vma_node_start(node: &bo->base.base.vma_node)); |
| 416 | |
| 417 | if (bo->base.base.import_attach) |
| 418 | gem_state_flags |= PANTHOR_DEBUGFS_GEM_STATE_FLAG_IMPORTED; |
| 419 | if (bo->base.base.dma_buf) |
| 420 | gem_state_flags |= PANTHOR_DEBUGFS_GEM_STATE_FLAG_EXPORTED; |
| 421 | |
| 422 | seq_printf(m, fmt: "0x%-8x 0x%-10x" , gem_state_flags, gem_usage_flags); |
| 423 | |
| 424 | scoped_guard(mutex, &bo->label.lock) { |
| 425 | seq_printf(m, fmt: "%s\n" , bo->label.str ? : "" ); |
| 426 | } |
| 427 | |
| 428 | totals->size += bo->base.base.size; |
| 429 | totals->resident += resident_size; |
| 430 | if (bo->base.madv > 0) |
| 431 | totals->reclaimable += resident_size; |
| 432 | } |
| 433 | |
| 434 | void panthor_gem_debugfs_print_bos(struct panthor_device *ptdev, |
| 435 | struct seq_file *m) |
| 436 | { |
| 437 | struct gem_size_totals totals = {0}; |
| 438 | struct panthor_gem_object *bo; |
| 439 | |
| 440 | panthor_gem_debugfs_print_flag_names(m); |
| 441 | |
| 442 | seq_puts(m, s: "created-by global-name refcount size resident-size file-offset state usage label\n" ); |
| 443 | seq_puts(m, s: "----------------------------------------------------------------------------------------------------------------------------------------------\n" ); |
| 444 | |
| 445 | scoped_guard(mutex, &ptdev->gems.lock) { |
| 446 | list_for_each_entry(bo, &ptdev->gems.node, debugfs.node) { |
| 447 | panthor_gem_debugfs_bo_print(bo, m, totals: &totals); |
| 448 | } |
| 449 | } |
| 450 | |
| 451 | seq_puts(m, s: "==============================================================================================================================================\n" ); |
| 452 | seq_printf(m, fmt: "Total size: %zd, Total resident: %zd, Total reclaimable: %zd\n" , |
| 453 | totals.size, totals.resident, totals.reclaimable); |
| 454 | } |
| 455 | #endif |
| 456 | |