| 1 | // SPDX-License-Identifier: GPL-2.0 |
| 2 | /* |
| 3 | * Copyright (C) 2015-2018 Etnaviv Project |
| 4 | */ |
| 5 | |
| 6 | #include <linux/component.h> |
| 7 | #include <linux/dma-mapping.h> |
| 8 | #include <linux/module.h> |
| 9 | #include <linux/of.h> |
| 10 | #include <linux/of_device.h> |
| 11 | #include <linux/platform_device.h> |
| 12 | #include <linux/uaccess.h> |
| 13 | |
| 14 | #include <drm/drm_debugfs.h> |
| 15 | #include <drm/drm_drv.h> |
| 16 | #include <drm/drm_file.h> |
| 17 | #include <drm/drm_ioctl.h> |
| 18 | #include <drm/drm_of.h> |
| 19 | #include <drm/drm_prime.h> |
| 20 | #include <drm/drm_print.h> |
| 21 | |
| 22 | #include "etnaviv_cmdbuf.h" |
| 23 | #include "etnaviv_drv.h" |
| 24 | #include "etnaviv_gpu.h" |
| 25 | #include "etnaviv_gem.h" |
| 26 | #include "etnaviv_mmu.h" |
| 27 | #include "etnaviv_perfmon.h" |
| 28 | |
| 29 | /* |
| 30 | * DRM operations: |
| 31 | */ |
| 32 | |
| 33 | static struct device_node *etnaviv_of_first_available_node(void) |
| 34 | { |
| 35 | struct device_node *np; |
| 36 | |
| 37 | for_each_compatible_node(np, NULL, "vivante,gc" ) { |
| 38 | if (of_device_is_available(device: np)) |
| 39 | return np; |
| 40 | } |
| 41 | |
| 42 | return NULL; |
| 43 | } |
| 44 | |
| 45 | static void load_gpu(struct drm_device *dev) |
| 46 | { |
| 47 | struct etnaviv_drm_private *priv = dev->dev_private; |
| 48 | unsigned int i; |
| 49 | |
| 50 | for (i = 0; i < ETNA_MAX_PIPES; i++) { |
| 51 | struct etnaviv_gpu *g = priv->gpu[i]; |
| 52 | |
| 53 | if (g) { |
| 54 | int ret; |
| 55 | |
| 56 | ret = etnaviv_gpu_init(gpu: g); |
| 57 | if (ret) |
| 58 | priv->gpu[i] = NULL; |
| 59 | } |
| 60 | } |
| 61 | } |
| 62 | |
| 63 | static int etnaviv_open(struct drm_device *dev, struct drm_file *file) |
| 64 | { |
| 65 | struct etnaviv_drm_private *priv = dev->dev_private; |
| 66 | struct etnaviv_file_private *ctx; |
| 67 | int ret, i; |
| 68 | |
| 69 | ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); |
| 70 | if (!ctx) |
| 71 | return -ENOMEM; |
| 72 | |
| 73 | ret = xa_alloc_cyclic(xa: &priv->active_contexts, id: &ctx->id, entry: ctx, |
| 74 | xa_limit_32b, next: &priv->next_context_id, GFP_KERNEL); |
| 75 | if (ret < 0) |
| 76 | goto out_free; |
| 77 | |
| 78 | ctx->mmu = etnaviv_iommu_context_init(global: priv->mmu_global, |
| 79 | suballoc: priv->cmdbuf_suballoc); |
| 80 | if (!ctx->mmu) { |
| 81 | ret = -ENOMEM; |
| 82 | goto out_free; |
| 83 | } |
| 84 | |
| 85 | for (i = 0; i < ETNA_MAX_PIPES; i++) { |
| 86 | struct etnaviv_gpu *gpu = priv->gpu[i]; |
| 87 | struct drm_gpu_scheduler *sched; |
| 88 | |
| 89 | if (gpu) { |
| 90 | sched = &gpu->sched; |
| 91 | drm_sched_entity_init(entity: &ctx->sched_entity[i], |
| 92 | priority: DRM_SCHED_PRIORITY_NORMAL, sched_list: &sched, |
| 93 | num_sched_list: 1, NULL); |
| 94 | } |
| 95 | } |
| 96 | |
| 97 | file->driver_priv = ctx; |
| 98 | |
| 99 | return 0; |
| 100 | |
| 101 | out_free: |
| 102 | kfree(objp: ctx); |
| 103 | return ret; |
| 104 | } |
| 105 | |
| 106 | static void etnaviv_postclose(struct drm_device *dev, struct drm_file *file) |
| 107 | { |
| 108 | struct etnaviv_drm_private *priv = dev->dev_private; |
| 109 | struct etnaviv_file_private *ctx = file->driver_priv; |
| 110 | unsigned int i; |
| 111 | |
| 112 | for (i = 0; i < ETNA_MAX_PIPES; i++) { |
| 113 | struct etnaviv_gpu *gpu = priv->gpu[i]; |
| 114 | |
| 115 | if (gpu) |
| 116 | drm_sched_entity_destroy(entity: &ctx->sched_entity[i]); |
| 117 | } |
| 118 | |
| 119 | etnaviv_iommu_context_put(ctx: ctx->mmu); |
| 120 | |
| 121 | xa_erase(&priv->active_contexts, index: ctx->id); |
| 122 | |
| 123 | kfree(objp: ctx); |
| 124 | } |
| 125 | |
| 126 | /* |
| 127 | * DRM debugfs: |
| 128 | */ |
| 129 | |
| 130 | #ifdef CONFIG_DEBUG_FS |
| 131 | static int etnaviv_gem_show(struct drm_device *dev, struct seq_file *m) |
| 132 | { |
| 133 | struct etnaviv_drm_private *priv = dev->dev_private; |
| 134 | |
| 135 | etnaviv_gem_describe_objects(priv, m); |
| 136 | |
| 137 | return 0; |
| 138 | } |
| 139 | |
| 140 | static int etnaviv_mm_show(struct drm_device *dev, struct seq_file *m) |
| 141 | { |
| 142 | struct drm_printer p = drm_seq_file_printer(f: m); |
| 143 | |
| 144 | read_lock(&dev->vma_offset_manager->vm_lock); |
| 145 | drm_mm_print(mm: &dev->vma_offset_manager->vm_addr_space_mm, p: &p); |
| 146 | read_unlock(&dev->vma_offset_manager->vm_lock); |
| 147 | |
| 148 | return 0; |
| 149 | } |
| 150 | |
| 151 | static int etnaviv_mmu_show(struct etnaviv_gpu *gpu, struct seq_file *m) |
| 152 | { |
| 153 | struct drm_printer p = drm_seq_file_printer(f: m); |
| 154 | struct etnaviv_iommu_context *mmu_context; |
| 155 | |
| 156 | seq_printf(m, fmt: "Active Objects (%s):\n" , dev_name(dev: gpu->dev)); |
| 157 | |
| 158 | /* |
| 159 | * Lock the GPU to avoid a MMU context switch just now and elevate |
| 160 | * the refcount of the current context to avoid it disappearing from |
| 161 | * under our feet. |
| 162 | */ |
| 163 | mutex_lock(&gpu->lock); |
| 164 | mmu_context = gpu->mmu_context; |
| 165 | if (mmu_context) |
| 166 | etnaviv_iommu_context_get(ctx: mmu_context); |
| 167 | mutex_unlock(lock: &gpu->lock); |
| 168 | |
| 169 | if (!mmu_context) |
| 170 | return 0; |
| 171 | |
| 172 | mutex_lock(&mmu_context->lock); |
| 173 | drm_mm_print(mm: &mmu_context->mm, p: &p); |
| 174 | mutex_unlock(lock: &mmu_context->lock); |
| 175 | |
| 176 | etnaviv_iommu_context_put(ctx: mmu_context); |
| 177 | |
| 178 | return 0; |
| 179 | } |
| 180 | |
| 181 | static void etnaviv_buffer_dump(struct etnaviv_gpu *gpu, struct seq_file *m) |
| 182 | { |
| 183 | struct etnaviv_cmdbuf *buf = &gpu->buffer; |
| 184 | u32 size = buf->size; |
| 185 | u32 *ptr = buf->vaddr; |
| 186 | u32 i; |
| 187 | |
| 188 | seq_printf(m, fmt: "virt %p - phys 0x%llx - free 0x%08x\n" , |
| 189 | buf->vaddr, (u64)etnaviv_cmdbuf_get_pa(buf), |
| 190 | size - buf->user_size); |
| 191 | |
| 192 | for (i = 0; i < size / 4; i++) { |
| 193 | if (i && !(i % 4)) |
| 194 | seq_puts(m, s: "\n" ); |
| 195 | if (i % 4 == 0) |
| 196 | seq_printf(m, fmt: "\t0x%p: " , ptr + i); |
| 197 | seq_printf(m, fmt: "%08x " , *(ptr + i)); |
| 198 | } |
| 199 | seq_puts(m, s: "\n" ); |
| 200 | } |
| 201 | |
| 202 | static int etnaviv_ring_show(struct etnaviv_gpu *gpu, struct seq_file *m) |
| 203 | { |
| 204 | seq_printf(m, fmt: "Ring Buffer (%s): " , dev_name(dev: gpu->dev)); |
| 205 | |
| 206 | mutex_lock(&gpu->lock); |
| 207 | etnaviv_buffer_dump(gpu, m); |
| 208 | mutex_unlock(lock: &gpu->lock); |
| 209 | |
| 210 | return 0; |
| 211 | } |
| 212 | |
| 213 | static int show_unlocked(struct seq_file *m, void *arg) |
| 214 | { |
| 215 | struct drm_info_node *node = (struct drm_info_node *) m->private; |
| 216 | struct drm_device *dev = node->minor->dev; |
| 217 | int (*show)(struct drm_device *dev, struct seq_file *m) = |
| 218 | node->info_ent->data; |
| 219 | |
| 220 | return show(dev, m); |
| 221 | } |
| 222 | |
| 223 | static int show_each_gpu(struct seq_file *m, void *arg) |
| 224 | { |
| 225 | struct drm_info_node *node = (struct drm_info_node *) m->private; |
| 226 | struct drm_device *dev = node->minor->dev; |
| 227 | struct etnaviv_drm_private *priv = dev->dev_private; |
| 228 | struct etnaviv_gpu *gpu; |
| 229 | int (*show)(struct etnaviv_gpu *gpu, struct seq_file *m) = |
| 230 | node->info_ent->data; |
| 231 | unsigned int i; |
| 232 | int ret = 0; |
| 233 | |
| 234 | for (i = 0; i < ETNA_MAX_PIPES; i++) { |
| 235 | gpu = priv->gpu[i]; |
| 236 | if (!gpu) |
| 237 | continue; |
| 238 | |
| 239 | ret = show(gpu, m); |
| 240 | if (ret < 0) |
| 241 | break; |
| 242 | } |
| 243 | |
| 244 | return ret; |
| 245 | } |
| 246 | |
| 247 | static struct drm_info_list etnaviv_debugfs_list[] = { |
| 248 | {"gpu" , show_each_gpu, 0, etnaviv_gpu_debugfs}, |
| 249 | {"gem" , show_unlocked, 0, etnaviv_gem_show}, |
| 250 | { "mm" , show_unlocked, 0, etnaviv_mm_show }, |
| 251 | {"mmu" , show_each_gpu, 0, etnaviv_mmu_show}, |
| 252 | {"ring" , show_each_gpu, 0, etnaviv_ring_show}, |
| 253 | }; |
| 254 | |
| 255 | static void etnaviv_debugfs_init(struct drm_minor *minor) |
| 256 | { |
| 257 | drm_debugfs_create_files(files: etnaviv_debugfs_list, |
| 258 | ARRAY_SIZE(etnaviv_debugfs_list), |
| 259 | root: minor->debugfs_root, minor); |
| 260 | } |
| 261 | #endif |
| 262 | |
| 263 | /* |
| 264 | * DRM ioctls: |
| 265 | */ |
| 266 | |
| 267 | static int etnaviv_ioctl_get_param(struct drm_device *dev, void *data, |
| 268 | struct drm_file *file) |
| 269 | { |
| 270 | struct etnaviv_drm_private *priv = dev->dev_private; |
| 271 | struct drm_etnaviv_param *args = data; |
| 272 | struct etnaviv_gpu *gpu; |
| 273 | |
| 274 | if (args->pipe >= ETNA_MAX_PIPES) |
| 275 | return -EINVAL; |
| 276 | |
| 277 | gpu = priv->gpu[args->pipe]; |
| 278 | if (!gpu) |
| 279 | return -ENXIO; |
| 280 | |
| 281 | return etnaviv_gpu_get_param(gpu, param: args->param, value: &args->value); |
| 282 | } |
| 283 | |
| 284 | static int etnaviv_ioctl_gem_new(struct drm_device *dev, void *data, |
| 285 | struct drm_file *file) |
| 286 | { |
| 287 | struct drm_etnaviv_gem_new *args = data; |
| 288 | |
| 289 | if (args->flags & ~(ETNA_BO_CACHED | ETNA_BO_WC | ETNA_BO_UNCACHED | |
| 290 | ETNA_BO_FORCE_MMU)) |
| 291 | return -EINVAL; |
| 292 | |
| 293 | return etnaviv_gem_new_handle(dev, file, size: args->size, |
| 294 | flags: args->flags, handle: &args->handle); |
| 295 | } |
| 296 | |
| 297 | static int etnaviv_ioctl_gem_cpu_prep(struct drm_device *dev, void *data, |
| 298 | struct drm_file *file) |
| 299 | { |
| 300 | struct drm_etnaviv_gem_cpu_prep *args = data; |
| 301 | struct drm_gem_object *obj; |
| 302 | int ret; |
| 303 | |
| 304 | if (args->op & ~(ETNA_PREP_READ | ETNA_PREP_WRITE | ETNA_PREP_NOSYNC)) |
| 305 | return -EINVAL; |
| 306 | |
| 307 | obj = drm_gem_object_lookup(filp: file, handle: args->handle); |
| 308 | if (!obj) |
| 309 | return -ENOENT; |
| 310 | |
| 311 | ret = etnaviv_gem_cpu_prep(obj, op: args->op, timeout: &args->timeout); |
| 312 | |
| 313 | drm_gem_object_put(obj); |
| 314 | |
| 315 | return ret; |
| 316 | } |
| 317 | |
| 318 | static int etnaviv_ioctl_gem_cpu_fini(struct drm_device *dev, void *data, |
| 319 | struct drm_file *file) |
| 320 | { |
| 321 | struct drm_etnaviv_gem_cpu_fini *args = data; |
| 322 | struct drm_gem_object *obj; |
| 323 | int ret; |
| 324 | |
| 325 | if (args->flags) |
| 326 | return -EINVAL; |
| 327 | |
| 328 | obj = drm_gem_object_lookup(filp: file, handle: args->handle); |
| 329 | if (!obj) |
| 330 | return -ENOENT; |
| 331 | |
| 332 | ret = etnaviv_gem_cpu_fini(obj); |
| 333 | |
| 334 | drm_gem_object_put(obj); |
| 335 | |
| 336 | return ret; |
| 337 | } |
| 338 | |
| 339 | static int etnaviv_ioctl_gem_info(struct drm_device *dev, void *data, |
| 340 | struct drm_file *file) |
| 341 | { |
| 342 | struct drm_etnaviv_gem_info *args = data; |
| 343 | struct drm_gem_object *obj; |
| 344 | int ret; |
| 345 | |
| 346 | if (args->pad) |
| 347 | return -EINVAL; |
| 348 | |
| 349 | obj = drm_gem_object_lookup(filp: file, handle: args->handle); |
| 350 | if (!obj) |
| 351 | return -ENOENT; |
| 352 | |
| 353 | ret = etnaviv_gem_mmap_offset(obj, offset: &args->offset); |
| 354 | drm_gem_object_put(obj); |
| 355 | |
| 356 | return ret; |
| 357 | } |
| 358 | |
| 359 | static int etnaviv_ioctl_wait_fence(struct drm_device *dev, void *data, |
| 360 | struct drm_file *file) |
| 361 | { |
| 362 | struct drm_etnaviv_wait_fence *args = data; |
| 363 | struct etnaviv_drm_private *priv = dev->dev_private; |
| 364 | struct drm_etnaviv_timespec *timeout = &args->timeout; |
| 365 | struct etnaviv_gpu *gpu; |
| 366 | |
| 367 | if (args->flags & ~(ETNA_WAIT_NONBLOCK)) |
| 368 | return -EINVAL; |
| 369 | |
| 370 | if (args->pipe >= ETNA_MAX_PIPES) |
| 371 | return -EINVAL; |
| 372 | |
| 373 | gpu = priv->gpu[args->pipe]; |
| 374 | if (!gpu) |
| 375 | return -ENXIO; |
| 376 | |
| 377 | if (args->flags & ETNA_WAIT_NONBLOCK) |
| 378 | timeout = NULL; |
| 379 | |
| 380 | return etnaviv_gpu_wait_fence_interruptible(gpu, fence: args->fence, |
| 381 | timeout); |
| 382 | } |
| 383 | |
| 384 | static int etnaviv_ioctl_gem_userptr(struct drm_device *dev, void *data, |
| 385 | struct drm_file *file) |
| 386 | { |
| 387 | struct drm_etnaviv_gem_userptr *args = data; |
| 388 | |
| 389 | if (args->flags & ~(ETNA_USERPTR_READ|ETNA_USERPTR_WRITE) || |
| 390 | args->flags == 0) |
| 391 | return -EINVAL; |
| 392 | |
| 393 | if (offset_in_page(args->user_ptr | args->user_size) || |
| 394 | (uintptr_t)args->user_ptr != args->user_ptr || |
| 395 | (u32)args->user_size != args->user_size || |
| 396 | args->user_ptr & ~PAGE_MASK) |
| 397 | return -EINVAL; |
| 398 | |
| 399 | if (!access_ok((void __user *)(unsigned long)args->user_ptr, |
| 400 | args->user_size)) |
| 401 | return -EFAULT; |
| 402 | |
| 403 | return etnaviv_gem_new_userptr(dev, file, ptr: args->user_ptr, |
| 404 | size: args->user_size, flags: args->flags, |
| 405 | handle: &args->handle); |
| 406 | } |
| 407 | |
| 408 | static int etnaviv_ioctl_gem_wait(struct drm_device *dev, void *data, |
| 409 | struct drm_file *file) |
| 410 | { |
| 411 | struct etnaviv_drm_private *priv = dev->dev_private; |
| 412 | struct drm_etnaviv_gem_wait *args = data; |
| 413 | struct drm_etnaviv_timespec *timeout = &args->timeout; |
| 414 | struct drm_gem_object *obj; |
| 415 | struct etnaviv_gpu *gpu; |
| 416 | int ret; |
| 417 | |
| 418 | if (args->flags & ~(ETNA_WAIT_NONBLOCK)) |
| 419 | return -EINVAL; |
| 420 | |
| 421 | if (args->pipe >= ETNA_MAX_PIPES) |
| 422 | return -EINVAL; |
| 423 | |
| 424 | gpu = priv->gpu[args->pipe]; |
| 425 | if (!gpu) |
| 426 | return -ENXIO; |
| 427 | |
| 428 | obj = drm_gem_object_lookup(filp: file, handle: args->handle); |
| 429 | if (!obj) |
| 430 | return -ENOENT; |
| 431 | |
| 432 | if (args->flags & ETNA_WAIT_NONBLOCK) |
| 433 | timeout = NULL; |
| 434 | |
| 435 | ret = etnaviv_gem_wait_bo(gpu, obj, timeout); |
| 436 | |
| 437 | drm_gem_object_put(obj); |
| 438 | |
| 439 | return ret; |
| 440 | } |
| 441 | |
| 442 | static int etnaviv_ioctl_pm_query_dom(struct drm_device *dev, void *data, |
| 443 | struct drm_file *file) |
| 444 | { |
| 445 | struct etnaviv_drm_private *priv = dev->dev_private; |
| 446 | struct drm_etnaviv_pm_domain *args = data; |
| 447 | struct etnaviv_gpu *gpu; |
| 448 | |
| 449 | if (args->pipe >= ETNA_MAX_PIPES) |
| 450 | return -EINVAL; |
| 451 | |
| 452 | gpu = priv->gpu[args->pipe]; |
| 453 | if (!gpu) |
| 454 | return -ENXIO; |
| 455 | |
| 456 | return etnaviv_pm_query_dom(gpu, domain: args); |
| 457 | } |
| 458 | |
| 459 | static int etnaviv_ioctl_pm_query_sig(struct drm_device *dev, void *data, |
| 460 | struct drm_file *file) |
| 461 | { |
| 462 | struct etnaviv_drm_private *priv = dev->dev_private; |
| 463 | struct drm_etnaviv_pm_signal *args = data; |
| 464 | struct etnaviv_gpu *gpu; |
| 465 | |
| 466 | if (args->pipe >= ETNA_MAX_PIPES) |
| 467 | return -EINVAL; |
| 468 | |
| 469 | gpu = priv->gpu[args->pipe]; |
| 470 | if (!gpu) |
| 471 | return -ENXIO; |
| 472 | |
| 473 | return etnaviv_pm_query_sig(gpu, signal: args); |
| 474 | } |
| 475 | |
| 476 | static const struct drm_ioctl_desc etnaviv_ioctls[] = { |
| 477 | #define ETNA_IOCTL(n, func, flags) \ |
| 478 | DRM_IOCTL_DEF_DRV(ETNAVIV_##n, etnaviv_ioctl_##func, flags) |
| 479 | ETNA_IOCTL(GET_PARAM, get_param, DRM_RENDER_ALLOW), |
| 480 | ETNA_IOCTL(GEM_NEW, gem_new, DRM_RENDER_ALLOW), |
| 481 | ETNA_IOCTL(GEM_INFO, gem_info, DRM_RENDER_ALLOW), |
| 482 | ETNA_IOCTL(GEM_CPU_PREP, gem_cpu_prep, DRM_RENDER_ALLOW), |
| 483 | ETNA_IOCTL(GEM_CPU_FINI, gem_cpu_fini, DRM_RENDER_ALLOW), |
| 484 | ETNA_IOCTL(GEM_SUBMIT, gem_submit, DRM_RENDER_ALLOW), |
| 485 | ETNA_IOCTL(WAIT_FENCE, wait_fence, DRM_RENDER_ALLOW), |
| 486 | ETNA_IOCTL(GEM_USERPTR, gem_userptr, DRM_RENDER_ALLOW), |
| 487 | ETNA_IOCTL(GEM_WAIT, gem_wait, DRM_RENDER_ALLOW), |
| 488 | ETNA_IOCTL(PM_QUERY_DOM, pm_query_dom, DRM_RENDER_ALLOW), |
| 489 | ETNA_IOCTL(PM_QUERY_SIG, pm_query_sig, DRM_RENDER_ALLOW), |
| 490 | }; |
| 491 | |
| 492 | static void etnaviv_show_fdinfo(struct drm_printer *p, struct drm_file *file) |
| 493 | { |
| 494 | drm_show_memory_stats(p, file); |
| 495 | } |
| 496 | |
| 497 | static const struct file_operations fops = { |
| 498 | .owner = THIS_MODULE, |
| 499 | DRM_GEM_FOPS, |
| 500 | .show_fdinfo = drm_show_fdinfo, |
| 501 | }; |
| 502 | |
| 503 | static const struct drm_driver etnaviv_drm_driver = { |
| 504 | .driver_features = DRIVER_GEM | DRIVER_RENDER, |
| 505 | .open = etnaviv_open, |
| 506 | .postclose = etnaviv_postclose, |
| 507 | .gem_prime_import_sg_table = etnaviv_gem_prime_import_sg_table, |
| 508 | #ifdef CONFIG_DEBUG_FS |
| 509 | .debugfs_init = etnaviv_debugfs_init, |
| 510 | #endif |
| 511 | .show_fdinfo = etnaviv_show_fdinfo, |
| 512 | .ioctls = etnaviv_ioctls, |
| 513 | .num_ioctls = DRM_ETNAVIV_NUM_IOCTLS, |
| 514 | .fops = &fops, |
| 515 | .name = "etnaviv" , |
| 516 | .desc = "etnaviv DRM" , |
| 517 | .major = 1, |
| 518 | .minor = 4, |
| 519 | }; |
| 520 | |
| 521 | /* |
| 522 | * Platform driver: |
| 523 | */ |
| 524 | static int etnaviv_bind(struct device *dev) |
| 525 | { |
| 526 | struct etnaviv_drm_private *priv; |
| 527 | struct drm_device *drm; |
| 528 | int ret; |
| 529 | |
| 530 | drm = drm_dev_alloc(driver: &etnaviv_drm_driver, parent: dev); |
| 531 | if (IS_ERR(ptr: drm)) |
| 532 | return PTR_ERR(ptr: drm); |
| 533 | |
| 534 | priv = kzalloc(sizeof(*priv), GFP_KERNEL); |
| 535 | if (!priv) { |
| 536 | dev_err(dev, "failed to allocate private data\n" ); |
| 537 | ret = -ENOMEM; |
| 538 | goto out_put; |
| 539 | } |
| 540 | drm->dev_private = priv; |
| 541 | |
| 542 | dma_set_max_seg_size(dev, SZ_2G); |
| 543 | |
| 544 | xa_init_flags(xa: &priv->active_contexts, XA_FLAGS_ALLOC); |
| 545 | |
| 546 | mutex_init(&priv->gem_lock); |
| 547 | INIT_LIST_HEAD(list: &priv->gem_list); |
| 548 | priv->num_gpus = 0; |
| 549 | priv->shm_gfp_mask = GFP_HIGHUSER | __GFP_RETRY_MAYFAIL | __GFP_NOWARN; |
| 550 | |
| 551 | /* |
| 552 | * If the GPU is part of a system with DMA addressing limitations, |
| 553 | * request pages for our SHM backend buffers from the DMA32 zone to |
| 554 | * hopefully avoid performance killing SWIOTLB bounce buffering. |
| 555 | */ |
| 556 | if (dma_addressing_limited(dev)) { |
| 557 | priv->shm_gfp_mask |= GFP_DMA32; |
| 558 | priv->shm_gfp_mask &= ~__GFP_HIGHMEM; |
| 559 | } |
| 560 | |
| 561 | priv->cmdbuf_suballoc = etnaviv_cmdbuf_suballoc_new(dev: drm->dev); |
| 562 | if (IS_ERR(ptr: priv->cmdbuf_suballoc)) { |
| 563 | dev_err(drm->dev, "Failed to create cmdbuf suballocator\n" ); |
| 564 | ret = PTR_ERR(ptr: priv->cmdbuf_suballoc); |
| 565 | goto out_free_priv; |
| 566 | } |
| 567 | |
| 568 | dev_set_drvdata(dev, data: drm); |
| 569 | |
| 570 | ret = component_bind_all(parent: dev, data: drm); |
| 571 | if (ret < 0) |
| 572 | goto out_destroy_suballoc; |
| 573 | |
| 574 | load_gpu(dev: drm); |
| 575 | |
| 576 | ret = drm_dev_register(dev: drm, flags: 0); |
| 577 | if (ret) |
| 578 | goto out_unbind; |
| 579 | |
| 580 | return 0; |
| 581 | |
| 582 | out_unbind: |
| 583 | component_unbind_all(parent: dev, data: drm); |
| 584 | out_destroy_suballoc: |
| 585 | etnaviv_cmdbuf_suballoc_destroy(suballoc: priv->cmdbuf_suballoc); |
| 586 | out_free_priv: |
| 587 | mutex_destroy(lock: &priv->gem_lock); |
| 588 | kfree(objp: priv); |
| 589 | out_put: |
| 590 | drm_dev_put(dev: drm); |
| 591 | |
| 592 | return ret; |
| 593 | } |
| 594 | |
| 595 | static void etnaviv_unbind(struct device *dev) |
| 596 | { |
| 597 | struct drm_device *drm = dev_get_drvdata(dev); |
| 598 | struct etnaviv_drm_private *priv = drm->dev_private; |
| 599 | |
| 600 | drm_dev_unregister(dev: drm); |
| 601 | |
| 602 | component_unbind_all(parent: dev, data: drm); |
| 603 | |
| 604 | etnaviv_cmdbuf_suballoc_destroy(suballoc: priv->cmdbuf_suballoc); |
| 605 | |
| 606 | xa_destroy(&priv->active_contexts); |
| 607 | |
| 608 | drm->dev_private = NULL; |
| 609 | kfree(objp: priv); |
| 610 | |
| 611 | drm_dev_put(dev: drm); |
| 612 | } |
| 613 | |
| 614 | static const struct component_master_ops etnaviv_master_ops = { |
| 615 | .bind = etnaviv_bind, |
| 616 | .unbind = etnaviv_unbind, |
| 617 | }; |
| 618 | |
| 619 | static int etnaviv_pdev_probe(struct platform_device *pdev) |
| 620 | { |
| 621 | struct device *dev = &pdev->dev; |
| 622 | struct device_node *first_node = NULL; |
| 623 | struct component_match *match = NULL; |
| 624 | |
| 625 | if (!dev->platform_data) { |
| 626 | struct device_node *core_node; |
| 627 | |
| 628 | for_each_compatible_node(core_node, NULL, "vivante,gc" ) { |
| 629 | if (!of_device_is_available(device: core_node)) |
| 630 | continue; |
| 631 | |
| 632 | drm_of_component_match_add(master: dev, matchptr: &match, |
| 633 | compare: component_compare_of, node: core_node); |
| 634 | } |
| 635 | } else { |
| 636 | char **names = dev->platform_data; |
| 637 | unsigned i; |
| 638 | |
| 639 | for (i = 0; names[i]; i++) |
| 640 | component_match_add(parent: dev, matchptr: &match, compare: component_compare_dev_name, compare_data: names[i]); |
| 641 | } |
| 642 | |
| 643 | /* |
| 644 | * PTA and MTLB can have 40 bit base addresses, but |
| 645 | * unfortunately, an entry in the MTLB can only point to a |
| 646 | * 32 bit base address of a STLB. Moreover, to initialize the |
| 647 | * MMU we need a command buffer with a 32 bit address because |
| 648 | * without an MMU there is only an indentity mapping between |
| 649 | * the internal 32 bit addresses and the bus addresses. |
| 650 | * |
| 651 | * To make things easy, we set the dma_coherent_mask to 32 |
| 652 | * bit to make sure we are allocating the command buffers and |
| 653 | * TLBs in the lower 4 GiB address space. |
| 654 | */ |
| 655 | if (dma_set_mask(dev, DMA_BIT_MASK(40)) || |
| 656 | dma_set_coherent_mask(dev, DMA_BIT_MASK(32))) { |
| 657 | dev_dbg(dev, "No suitable DMA available\n" ); |
| 658 | return -ENODEV; |
| 659 | } |
| 660 | |
| 661 | /* |
| 662 | * Apply the same DMA configuration to the virtual etnaviv |
| 663 | * device as the GPU we found. This assumes that all Vivante |
| 664 | * GPUs in the system share the same DMA constraints. |
| 665 | */ |
| 666 | first_node = etnaviv_of_first_available_node(); |
| 667 | if (first_node) { |
| 668 | of_dma_configure(dev, np: first_node, force_dma: true); |
| 669 | of_node_put(node: first_node); |
| 670 | } |
| 671 | |
| 672 | return component_master_add_with_match(dev, &etnaviv_master_ops, match); |
| 673 | } |
| 674 | |
| 675 | static void etnaviv_pdev_remove(struct platform_device *pdev) |
| 676 | { |
| 677 | component_master_del(&pdev->dev, &etnaviv_master_ops); |
| 678 | } |
| 679 | |
| 680 | static struct platform_driver etnaviv_platform_driver = { |
| 681 | .probe = etnaviv_pdev_probe, |
| 682 | .remove = etnaviv_pdev_remove, |
| 683 | .driver = { |
| 684 | .name = "etnaviv" , |
| 685 | }, |
| 686 | }; |
| 687 | |
| 688 | static int etnaviv_create_platform_device(const char *name, |
| 689 | struct platform_device **ppdev) |
| 690 | { |
| 691 | struct platform_device *pdev; |
| 692 | int ret; |
| 693 | |
| 694 | pdev = platform_device_alloc(name, PLATFORM_DEVID_NONE); |
| 695 | if (!pdev) |
| 696 | return -ENOMEM; |
| 697 | |
| 698 | ret = platform_device_add(pdev); |
| 699 | if (ret) { |
| 700 | platform_device_put(pdev); |
| 701 | return ret; |
| 702 | } |
| 703 | |
| 704 | *ppdev = pdev; |
| 705 | |
| 706 | return 0; |
| 707 | } |
| 708 | |
| 709 | static void etnaviv_destroy_platform_device(struct platform_device **ppdev) |
| 710 | { |
| 711 | struct platform_device *pdev = *ppdev; |
| 712 | |
| 713 | if (!pdev) |
| 714 | return; |
| 715 | |
| 716 | platform_device_unregister(pdev); |
| 717 | |
| 718 | *ppdev = NULL; |
| 719 | } |
| 720 | |
| 721 | static struct platform_device *etnaviv_drm; |
| 722 | |
| 723 | static int __init etnaviv_init(void) |
| 724 | { |
| 725 | int ret; |
| 726 | struct device_node *np; |
| 727 | |
| 728 | etnaviv_validate_init(); |
| 729 | |
| 730 | ret = platform_driver_register(&etnaviv_gpu_driver); |
| 731 | if (ret != 0) |
| 732 | return ret; |
| 733 | |
| 734 | ret = platform_driver_register(&etnaviv_platform_driver); |
| 735 | if (ret != 0) |
| 736 | goto unregister_gpu_driver; |
| 737 | |
| 738 | /* |
| 739 | * If the DT contains at least one available GPU device, instantiate |
| 740 | * the DRM platform device. |
| 741 | */ |
| 742 | np = etnaviv_of_first_available_node(); |
| 743 | if (np) { |
| 744 | of_node_put(node: np); |
| 745 | |
| 746 | ret = etnaviv_create_platform_device(name: "etnaviv" , ppdev: &etnaviv_drm); |
| 747 | if (ret) |
| 748 | goto unregister_platform_driver; |
| 749 | } |
| 750 | |
| 751 | return 0; |
| 752 | |
| 753 | unregister_platform_driver: |
| 754 | platform_driver_unregister(&etnaviv_platform_driver); |
| 755 | unregister_gpu_driver: |
| 756 | platform_driver_unregister(&etnaviv_gpu_driver); |
| 757 | return ret; |
| 758 | } |
| 759 | module_init(etnaviv_init); |
| 760 | |
| 761 | static void __exit etnaviv_exit(void) |
| 762 | { |
| 763 | etnaviv_destroy_platform_device(ppdev: &etnaviv_drm); |
| 764 | platform_driver_unregister(&etnaviv_platform_driver); |
| 765 | platform_driver_unregister(&etnaviv_gpu_driver); |
| 766 | } |
| 767 | module_exit(etnaviv_exit); |
| 768 | |
| 769 | MODULE_AUTHOR("Christian Gmeiner <christian.gmeiner@gmail.com>" ); |
| 770 | MODULE_AUTHOR("Russell King <rmk+kernel@armlinux.org.uk>" ); |
| 771 | MODULE_AUTHOR("Lucas Stach <l.stach@pengutronix.de>" ); |
| 772 | MODULE_DESCRIPTION("etnaviv DRM Driver" ); |
| 773 | MODULE_LICENSE("GPL v2" ); |
| 774 | MODULE_ALIAS("platform:etnaviv" ); |
| 775 | |