| 1 | // SPDX-License-Identifier: MIT |
| 2 | /* |
| 3 | * Copyright 2012 Red Hat Inc |
| 4 | */ |
| 5 | |
| 6 | #include <linux/dma-buf.h> |
| 7 | #include <linux/highmem.h> |
| 8 | #include <linux/dma-resv.h> |
| 9 | #include <linux/module.h> |
| 10 | |
| 11 | #include <asm/smp.h> |
| 12 | |
| 13 | #include "gem/i915_gem_dmabuf.h" |
| 14 | #include "i915_drv.h" |
| 15 | #include "i915_gem_object.h" |
| 16 | #include "i915_scatterlist.h" |
| 17 | |
| 18 | MODULE_IMPORT_NS("DMA_BUF" ); |
| 19 | |
| 20 | I915_SELFTEST_DECLARE(static bool force_different_devices;) |
| 21 | |
| 22 | static struct drm_i915_gem_object *dma_buf_to_obj(struct dma_buf *buf) |
| 23 | { |
| 24 | return to_intel_bo(gem: buf->priv); |
| 25 | } |
| 26 | |
| 27 | static struct sg_table *i915_gem_map_dma_buf(struct dma_buf_attachment *attach, |
| 28 | enum dma_data_direction dir) |
| 29 | { |
| 30 | struct drm_i915_gem_object *obj = dma_buf_to_obj(buf: attach->dmabuf); |
| 31 | struct sg_table *sgt; |
| 32 | struct scatterlist *src, *dst; |
| 33 | int ret, i; |
| 34 | |
| 35 | /* |
| 36 | * Make a copy of the object's sgt, so that we can make an independent |
| 37 | * mapping |
| 38 | */ |
| 39 | sgt = kmalloc(sizeof(*sgt), GFP_KERNEL); |
| 40 | if (!sgt) { |
| 41 | ret = -ENOMEM; |
| 42 | goto err; |
| 43 | } |
| 44 | |
| 45 | ret = sg_alloc_table(sgt, obj->mm.pages->orig_nents, GFP_KERNEL); |
| 46 | if (ret) |
| 47 | goto err_free; |
| 48 | |
| 49 | dst = sgt->sgl; |
| 50 | for_each_sg(obj->mm.pages->sgl, src, obj->mm.pages->orig_nents, i) { |
| 51 | sg_set_page(sg: dst, page: sg_page(sg: src), len: src->length, offset: 0); |
| 52 | dst = sg_next(sg: dst); |
| 53 | } |
| 54 | |
| 55 | ret = dma_map_sgtable(dev: attach->dev, sgt, dir, DMA_ATTR_SKIP_CPU_SYNC); |
| 56 | if (ret) |
| 57 | goto err_free_sg; |
| 58 | |
| 59 | return sgt; |
| 60 | |
| 61 | err_free_sg: |
| 62 | sg_free_table(sgt); |
| 63 | err_free: |
| 64 | kfree(objp: sgt); |
| 65 | err: |
| 66 | return ERR_PTR(error: ret); |
| 67 | } |
| 68 | |
| 69 | static int i915_gem_dmabuf_vmap(struct dma_buf *dma_buf, |
| 70 | struct iosys_map *map) |
| 71 | { |
| 72 | struct drm_i915_gem_object *obj = dma_buf_to_obj(buf: dma_buf); |
| 73 | void *vaddr; |
| 74 | |
| 75 | vaddr = i915_gem_object_pin_map(obj, type: I915_MAP_WB); |
| 76 | if (IS_ERR(ptr: vaddr)) |
| 77 | return PTR_ERR(ptr: vaddr); |
| 78 | |
| 79 | iosys_map_set_vaddr(map, vaddr); |
| 80 | |
| 81 | return 0; |
| 82 | } |
| 83 | |
| 84 | static void i915_gem_dmabuf_vunmap(struct dma_buf *dma_buf, |
| 85 | struct iosys_map *map) |
| 86 | { |
| 87 | struct drm_i915_gem_object *obj = dma_buf_to_obj(buf: dma_buf); |
| 88 | |
| 89 | i915_gem_object_flush_map(obj); |
| 90 | i915_gem_object_unpin_map(obj); |
| 91 | } |
| 92 | |
| 93 | static int i915_gem_dmabuf_mmap(struct dma_buf *dma_buf, struct vm_area_struct *vma) |
| 94 | { |
| 95 | struct drm_i915_gem_object *obj = dma_buf_to_obj(buf: dma_buf); |
| 96 | struct drm_i915_private *i915 = to_i915(dev: obj->base.dev); |
| 97 | int ret; |
| 98 | |
| 99 | if (obj->base.size < vma->vm_end - vma->vm_start) |
| 100 | return -EINVAL; |
| 101 | |
| 102 | if (HAS_LMEM(i915)) |
| 103 | return drm_gem_prime_mmap(obj: &obj->base, vma); |
| 104 | |
| 105 | if (!obj->base.filp) |
| 106 | return -ENODEV; |
| 107 | |
| 108 | ret = vfs_mmap(file: obj->base.filp, vma); |
| 109 | if (ret) |
| 110 | return ret; |
| 111 | |
| 112 | vma_set_file(vma, file: obj->base.filp); |
| 113 | |
| 114 | return 0; |
| 115 | } |
| 116 | |
| 117 | static int i915_gem_begin_cpu_access(struct dma_buf *dma_buf, enum dma_data_direction direction) |
| 118 | { |
| 119 | struct drm_i915_gem_object *obj = dma_buf_to_obj(buf: dma_buf); |
| 120 | bool write = (direction == DMA_BIDIRECTIONAL || direction == DMA_TO_DEVICE); |
| 121 | struct i915_gem_ww_ctx ww; |
| 122 | int err; |
| 123 | |
| 124 | i915_gem_ww_ctx_init(ctx: &ww, intr: true); |
| 125 | retry: |
| 126 | err = i915_gem_object_lock(obj, ww: &ww); |
| 127 | if (!err) |
| 128 | err = i915_gem_object_pin_pages(obj); |
| 129 | if (!err) { |
| 130 | err = i915_gem_object_set_to_cpu_domain(obj, write); |
| 131 | i915_gem_object_unpin_pages(obj); |
| 132 | } |
| 133 | if (err == -EDEADLK) { |
| 134 | err = i915_gem_ww_ctx_backoff(ctx: &ww); |
| 135 | if (!err) |
| 136 | goto retry; |
| 137 | } |
| 138 | i915_gem_ww_ctx_fini(ctx: &ww); |
| 139 | return err; |
| 140 | } |
| 141 | |
| 142 | static int i915_gem_end_cpu_access(struct dma_buf *dma_buf, enum dma_data_direction direction) |
| 143 | { |
| 144 | struct drm_i915_gem_object *obj = dma_buf_to_obj(buf: dma_buf); |
| 145 | struct i915_gem_ww_ctx ww; |
| 146 | int err; |
| 147 | |
| 148 | i915_gem_ww_ctx_init(ctx: &ww, intr: true); |
| 149 | retry: |
| 150 | err = i915_gem_object_lock(obj, ww: &ww); |
| 151 | if (!err) |
| 152 | err = i915_gem_object_pin_pages(obj); |
| 153 | if (!err) { |
| 154 | err = i915_gem_object_set_to_gtt_domain(obj, write: false); |
| 155 | i915_gem_object_unpin_pages(obj); |
| 156 | } |
| 157 | if (err == -EDEADLK) { |
| 158 | err = i915_gem_ww_ctx_backoff(ctx: &ww); |
| 159 | if (!err) |
| 160 | goto retry; |
| 161 | } |
| 162 | i915_gem_ww_ctx_fini(ctx: &ww); |
| 163 | return err; |
| 164 | } |
| 165 | |
| 166 | static int i915_gem_dmabuf_attach(struct dma_buf *dmabuf, |
| 167 | struct dma_buf_attachment *attach) |
| 168 | { |
| 169 | struct drm_i915_gem_object *obj = dma_buf_to_obj(buf: dmabuf); |
| 170 | struct i915_gem_ww_ctx ww; |
| 171 | int err; |
| 172 | |
| 173 | if (!i915_gem_object_can_migrate(obj, id: INTEL_REGION_SMEM)) |
| 174 | return -EOPNOTSUPP; |
| 175 | |
| 176 | for_i915_gem_ww(&ww, err, true) { |
| 177 | err = i915_gem_object_lock(obj, ww: &ww); |
| 178 | if (err) |
| 179 | continue; |
| 180 | |
| 181 | err = i915_gem_object_migrate(obj, ww: &ww, id: INTEL_REGION_SMEM); |
| 182 | if (err) |
| 183 | continue; |
| 184 | |
| 185 | err = i915_gem_object_wait_migration(obj, flags: 0); |
| 186 | if (err) |
| 187 | continue; |
| 188 | |
| 189 | err = i915_gem_object_pin_pages(obj); |
| 190 | } |
| 191 | |
| 192 | return err; |
| 193 | } |
| 194 | |
| 195 | static void i915_gem_dmabuf_detach(struct dma_buf *dmabuf, |
| 196 | struct dma_buf_attachment *attach) |
| 197 | { |
| 198 | struct drm_i915_gem_object *obj = dma_buf_to_obj(buf: dmabuf); |
| 199 | |
| 200 | i915_gem_object_unpin_pages(obj); |
| 201 | } |
| 202 | |
| 203 | static const struct dma_buf_ops i915_dmabuf_ops = { |
| 204 | .attach = i915_gem_dmabuf_attach, |
| 205 | .detach = i915_gem_dmabuf_detach, |
| 206 | .map_dma_buf = i915_gem_map_dma_buf, |
| 207 | .unmap_dma_buf = drm_gem_unmap_dma_buf, |
| 208 | .release = drm_gem_dmabuf_release, |
| 209 | .mmap = i915_gem_dmabuf_mmap, |
| 210 | .vmap = i915_gem_dmabuf_vmap, |
| 211 | .vunmap = i915_gem_dmabuf_vunmap, |
| 212 | .begin_cpu_access = i915_gem_begin_cpu_access, |
| 213 | .end_cpu_access = i915_gem_end_cpu_access, |
| 214 | }; |
| 215 | |
| 216 | struct dma_buf *i915_gem_prime_export(struct drm_gem_object *gem_obj, int flags) |
| 217 | { |
| 218 | struct drm_i915_gem_object *obj = to_intel_bo(gem: gem_obj); |
| 219 | DEFINE_DMA_BUF_EXPORT_INFO(exp_info); |
| 220 | |
| 221 | exp_info.ops = &i915_dmabuf_ops; |
| 222 | exp_info.size = gem_obj->size; |
| 223 | exp_info.flags = flags; |
| 224 | exp_info.priv = gem_obj; |
| 225 | exp_info.resv = obj->base.resv; |
| 226 | |
| 227 | if (obj->ops->dmabuf_export) { |
| 228 | int ret = obj->ops->dmabuf_export(obj); |
| 229 | if (ret) |
| 230 | return ERR_PTR(error: ret); |
| 231 | } |
| 232 | |
| 233 | return drm_gem_dmabuf_export(dev: gem_obj->dev, exp_info: &exp_info); |
| 234 | } |
| 235 | |
| 236 | static int i915_gem_object_get_pages_dmabuf(struct drm_i915_gem_object *obj) |
| 237 | { |
| 238 | struct drm_i915_private *i915 = to_i915(dev: obj->base.dev); |
| 239 | struct sg_table *sgt; |
| 240 | |
| 241 | assert_object_held(obj); |
| 242 | |
| 243 | sgt = dma_buf_map_attachment(obj->base.import_attach, |
| 244 | DMA_BIDIRECTIONAL); |
| 245 | if (IS_ERR(ptr: sgt)) |
| 246 | return PTR_ERR(ptr: sgt); |
| 247 | |
| 248 | /* |
| 249 | * DG1 is special here since it still snoops transactions even with |
| 250 | * CACHE_NONE. This is not the case with other HAS_SNOOP platforms. We |
| 251 | * might need to revisit this as we add new discrete platforms. |
| 252 | * |
| 253 | * XXX: Consider doing a vmap flush or something, where possible. |
| 254 | * Currently we just do a heavy handed wbinvd_on_all_cpus() here since |
| 255 | * the underlying sg_table might not even point to struct pages, so we |
| 256 | * can't just call drm_clflush_sg or similar, like we do elsewhere in |
| 257 | * the driver. |
| 258 | */ |
| 259 | if (i915_gem_object_can_bypass_llc(obj) || |
| 260 | (!HAS_LLC(i915) && !IS_DG1(i915))) |
| 261 | wbinvd_on_all_cpus(); |
| 262 | |
| 263 | __i915_gem_object_set_pages(obj, pages: sgt); |
| 264 | |
| 265 | return 0; |
| 266 | } |
| 267 | |
| 268 | static void i915_gem_object_put_pages_dmabuf(struct drm_i915_gem_object *obj, |
| 269 | struct sg_table *sgt) |
| 270 | { |
| 271 | dma_buf_unmap_attachment(obj->base.import_attach, sgt, |
| 272 | DMA_BIDIRECTIONAL); |
| 273 | } |
| 274 | |
| 275 | static const struct drm_i915_gem_object_ops i915_gem_object_dmabuf_ops = { |
| 276 | .name = "i915_gem_object_dmabuf" , |
| 277 | .get_pages = i915_gem_object_get_pages_dmabuf, |
| 278 | .put_pages = i915_gem_object_put_pages_dmabuf, |
| 279 | }; |
| 280 | |
| 281 | struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev, |
| 282 | struct dma_buf *dma_buf) |
| 283 | { |
| 284 | static struct lock_class_key lock_class; |
| 285 | struct dma_buf_attachment *attach; |
| 286 | struct drm_i915_gem_object *obj; |
| 287 | int ret; |
| 288 | |
| 289 | /* is this one of own objects? */ |
| 290 | if (dma_buf->ops == &i915_dmabuf_ops) { |
| 291 | obj = dma_buf_to_obj(buf: dma_buf); |
| 292 | /* is it from our device? */ |
| 293 | if (obj->base.dev == dev && |
| 294 | !I915_SELFTEST_ONLY(force_different_devices)) { |
| 295 | /* |
| 296 | * Importing dmabuf exported from out own gem increases |
| 297 | * refcount on gem itself instead of f_count of dmabuf. |
| 298 | */ |
| 299 | return &i915_gem_object_get(obj)->base; |
| 300 | } |
| 301 | } |
| 302 | |
| 303 | if (i915_gem_object_size_2big(size: dma_buf->size)) |
| 304 | return ERR_PTR(error: -E2BIG); |
| 305 | |
| 306 | /* need to attach */ |
| 307 | attach = dma_buf_attach(dmabuf: dma_buf, dev: dev->dev); |
| 308 | if (IS_ERR(ptr: attach)) |
| 309 | return ERR_CAST(ptr: attach); |
| 310 | |
| 311 | get_dma_buf(dmabuf: dma_buf); |
| 312 | |
| 313 | obj = i915_gem_object_alloc(); |
| 314 | if (!obj) { |
| 315 | ret = -ENOMEM; |
| 316 | goto fail_detach; |
| 317 | } |
| 318 | |
| 319 | drm_gem_private_object_init(dev, obj: &obj->base, size: dma_buf->size); |
| 320 | i915_gem_object_init(obj, ops: &i915_gem_object_dmabuf_ops, key: &lock_class, |
| 321 | I915_BO_ALLOC_USER); |
| 322 | obj->base.import_attach = attach; |
| 323 | obj->base.resv = dma_buf->resv; |
| 324 | |
| 325 | /* We use GTT as shorthand for a coherent domain, one that is |
| 326 | * neither in the GPU cache nor in the CPU cache, where all |
| 327 | * writes are immediately visible in memory. (That's not strictly |
| 328 | * true, but it's close! There are internal buffers such as the |
| 329 | * write-combined buffer or a delay through the chipset for GTT |
| 330 | * writes that do require us to treat GTT as a separate cache domain.) |
| 331 | */ |
| 332 | obj->read_domains = I915_GEM_DOMAIN_GTT; |
| 333 | obj->write_domain = 0; |
| 334 | |
| 335 | return &obj->base; |
| 336 | |
| 337 | fail_detach: |
| 338 | dma_buf_detach(dmabuf: dma_buf, attach); |
| 339 | dma_buf_put(dmabuf: dma_buf); |
| 340 | |
| 341 | return ERR_PTR(error: ret); |
| 342 | } |
| 343 | |
| 344 | #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) |
| 345 | #include "selftests/mock_dmabuf.c" |
| 346 | #include "selftests/i915_gem_dmabuf.c" |
| 347 | #endif |
| 348 | |