| 1 | // SPDX-License-Identifier: GPL-2.0 OR MIT |
| 2 | |
| 3 | /* |
| 4 | * Xen para-virtual DRM device |
| 5 | * |
| 6 | * Copyright (C) 2016-2018 EPAM Systems Inc. |
| 7 | * |
| 8 | * Author: Oleksandr Andrushchenko <oleksandr_andrushchenko@epam.com> |
| 9 | */ |
| 10 | |
| 11 | #include <linux/dma-buf.h> |
| 12 | #include <linux/scatterlist.h> |
| 13 | #include <linux/shmem_fs.h> |
| 14 | #include <linux/vmalloc.h> |
| 15 | |
| 16 | #include <drm/drm_gem.h> |
| 17 | #include <drm/drm_prime.h> |
| 18 | #include <drm/drm_print.h> |
| 19 | #include <drm/drm_probe_helper.h> |
| 20 | |
| 21 | #include <xen/balloon.h> |
| 22 | #include <xen/xen.h> |
| 23 | |
| 24 | #include "xen_drm_front.h" |
| 25 | #include "xen_drm_front_gem.h" |
| 26 | |
| 27 | struct xen_gem_object { |
| 28 | struct drm_gem_object base; |
| 29 | |
| 30 | size_t num_pages; |
| 31 | struct page **pages; |
| 32 | |
| 33 | /* set for buffers allocated by the backend */ |
| 34 | bool be_alloc; |
| 35 | |
| 36 | /* this is for imported PRIME buffer */ |
| 37 | struct sg_table *sgt_imported; |
| 38 | }; |
| 39 | |
| 40 | static inline struct xen_gem_object * |
| 41 | to_xen_gem_obj(struct drm_gem_object *gem_obj) |
| 42 | { |
| 43 | return container_of(gem_obj, struct xen_gem_object, base); |
| 44 | } |
| 45 | |
| 46 | static int gem_alloc_pages_array(struct xen_gem_object *xen_obj, |
| 47 | size_t buf_size) |
| 48 | { |
| 49 | xen_obj->num_pages = DIV_ROUND_UP(buf_size, PAGE_SIZE); |
| 50 | xen_obj->pages = kvmalloc_array(xen_obj->num_pages, |
| 51 | sizeof(struct page *), GFP_KERNEL); |
| 52 | return !xen_obj->pages ? -ENOMEM : 0; |
| 53 | } |
| 54 | |
| 55 | static void gem_free_pages_array(struct xen_gem_object *xen_obj) |
| 56 | { |
| 57 | kvfree(addr: xen_obj->pages); |
| 58 | xen_obj->pages = NULL; |
| 59 | } |
| 60 | |
| 61 | static int xen_drm_front_gem_object_mmap(struct drm_gem_object *gem_obj, |
| 62 | struct vm_area_struct *vma) |
| 63 | { |
| 64 | struct xen_gem_object *xen_obj = to_xen_gem_obj(gem_obj); |
| 65 | int ret; |
| 66 | |
| 67 | vma->vm_ops = gem_obj->funcs->vm_ops; |
| 68 | |
| 69 | /* |
| 70 | * Clear the VM_PFNMAP flag that was set by drm_gem_mmap(), and set the |
| 71 | * vm_pgoff (used as a fake buffer offset by DRM) to 0 as we want to map |
| 72 | * the whole buffer. |
| 73 | */ |
| 74 | vm_flags_mod(vma, VM_MIXEDMAP | VM_DONTEXPAND, VM_PFNMAP); |
| 75 | vma->vm_pgoff = 0; |
| 76 | |
| 77 | /* |
| 78 | * According to Xen on ARM ABI (xen/include/public/arch-arm.h): |
| 79 | * all memory which is shared with other entities in the system |
| 80 | * (including the hypervisor and other guests) must reside in memory |
| 81 | * which is mapped as Normal Inner Write-Back Outer Write-Back |
| 82 | * Inner-Shareable. |
| 83 | */ |
| 84 | vma->vm_page_prot = vm_get_page_prot(vm_flags: vma->vm_flags); |
| 85 | |
| 86 | /* |
| 87 | * vm_operations_struct.fault handler will be called if CPU access |
| 88 | * to VM is here. For GPUs this isn't the case, because CPU doesn't |
| 89 | * touch the memory. Insert pages now, so both CPU and GPU are happy. |
| 90 | * |
| 91 | * FIXME: as we insert all the pages now then no .fault handler must |
| 92 | * be called, so don't provide one |
| 93 | */ |
| 94 | ret = vm_map_pages(vma, pages: xen_obj->pages, num: xen_obj->num_pages); |
| 95 | if (ret < 0) |
| 96 | DRM_ERROR("Failed to map pages into vma: %d\n" , ret); |
| 97 | |
| 98 | return ret; |
| 99 | } |
| 100 | |
| 101 | static const struct vm_operations_struct xen_drm_drv_vm_ops = { |
| 102 | .open = drm_gem_vm_open, |
| 103 | .close = drm_gem_vm_close, |
| 104 | }; |
| 105 | |
| 106 | static const struct drm_gem_object_funcs xen_drm_front_gem_object_funcs = { |
| 107 | .free = xen_drm_front_gem_object_free, |
| 108 | .get_sg_table = xen_drm_front_gem_get_sg_table, |
| 109 | .vmap = xen_drm_front_gem_prime_vmap, |
| 110 | .vunmap = xen_drm_front_gem_prime_vunmap, |
| 111 | .mmap = xen_drm_front_gem_object_mmap, |
| 112 | .vm_ops = &xen_drm_drv_vm_ops, |
| 113 | }; |
| 114 | |
| 115 | static struct xen_gem_object *gem_create_obj(struct drm_device *dev, |
| 116 | size_t size) |
| 117 | { |
| 118 | struct xen_gem_object *xen_obj; |
| 119 | int ret; |
| 120 | |
| 121 | xen_obj = kzalloc(sizeof(*xen_obj), GFP_KERNEL); |
| 122 | if (!xen_obj) |
| 123 | return ERR_PTR(error: -ENOMEM); |
| 124 | |
| 125 | xen_obj->base.funcs = &xen_drm_front_gem_object_funcs; |
| 126 | |
| 127 | ret = drm_gem_object_init(dev, obj: &xen_obj->base, size); |
| 128 | if (ret < 0) { |
| 129 | kfree(objp: xen_obj); |
| 130 | return ERR_PTR(error: ret); |
| 131 | } |
| 132 | |
| 133 | return xen_obj; |
| 134 | } |
| 135 | |
| 136 | static struct xen_gem_object *gem_create(struct drm_device *dev, size_t size) |
| 137 | { |
| 138 | struct xen_drm_front_drm_info *drm_info = dev->dev_private; |
| 139 | struct xen_gem_object *xen_obj; |
| 140 | int ret; |
| 141 | |
| 142 | size = round_up(size, PAGE_SIZE); |
| 143 | xen_obj = gem_create_obj(dev, size); |
| 144 | if (IS_ERR(ptr: xen_obj)) |
| 145 | return xen_obj; |
| 146 | |
| 147 | if (drm_info->front_info->cfg.be_alloc) { |
| 148 | /* |
| 149 | * backend will allocate space for this buffer, so |
| 150 | * only allocate array of pointers to pages |
| 151 | */ |
| 152 | ret = gem_alloc_pages_array(xen_obj, buf_size: size); |
| 153 | if (ret < 0) |
| 154 | goto fail; |
| 155 | |
| 156 | /* |
| 157 | * allocate ballooned pages which will be used to map |
| 158 | * grant references provided by the backend |
| 159 | */ |
| 160 | ret = xen_alloc_unpopulated_pages(nr_pages: xen_obj->num_pages, |
| 161 | pages: xen_obj->pages); |
| 162 | if (ret < 0) { |
| 163 | DRM_ERROR("Cannot allocate %zu ballooned pages: %d\n" , |
| 164 | xen_obj->num_pages, ret); |
| 165 | gem_free_pages_array(xen_obj); |
| 166 | goto fail; |
| 167 | } |
| 168 | |
| 169 | xen_obj->be_alloc = true; |
| 170 | return xen_obj; |
| 171 | } |
| 172 | /* |
| 173 | * need to allocate backing pages now, so we can share those |
| 174 | * with the backend |
| 175 | */ |
| 176 | xen_obj->num_pages = DIV_ROUND_UP(size, PAGE_SIZE); |
| 177 | xen_obj->pages = drm_gem_get_pages(obj: &xen_obj->base); |
| 178 | if (IS_ERR(ptr: xen_obj->pages)) { |
| 179 | ret = PTR_ERR(ptr: xen_obj->pages); |
| 180 | xen_obj->pages = NULL; |
| 181 | goto fail; |
| 182 | } |
| 183 | |
| 184 | return xen_obj; |
| 185 | |
| 186 | fail: |
| 187 | DRM_ERROR("Failed to allocate buffer with size %zu\n" , size); |
| 188 | return ERR_PTR(error: ret); |
| 189 | } |
| 190 | |
| 191 | struct drm_gem_object *xen_drm_front_gem_create(struct drm_device *dev, |
| 192 | size_t size) |
| 193 | { |
| 194 | struct xen_gem_object *xen_obj; |
| 195 | |
| 196 | xen_obj = gem_create(dev, size); |
| 197 | if (IS_ERR(ptr: xen_obj)) |
| 198 | return ERR_CAST(ptr: xen_obj); |
| 199 | |
| 200 | return &xen_obj->base; |
| 201 | } |
| 202 | |
| 203 | void xen_drm_front_gem_free_object_unlocked(struct drm_gem_object *gem_obj) |
| 204 | { |
| 205 | struct xen_gem_object *xen_obj = to_xen_gem_obj(gem_obj); |
| 206 | |
| 207 | if (xen_obj->base.import_attach) { |
| 208 | drm_prime_gem_destroy(obj: &xen_obj->base, sg: xen_obj->sgt_imported); |
| 209 | gem_free_pages_array(xen_obj); |
| 210 | } else { |
| 211 | if (xen_obj->pages) { |
| 212 | if (xen_obj->be_alloc) { |
| 213 | xen_free_unpopulated_pages(nr_pages: xen_obj->num_pages, |
| 214 | pages: xen_obj->pages); |
| 215 | gem_free_pages_array(xen_obj); |
| 216 | } else { |
| 217 | drm_gem_put_pages(obj: &xen_obj->base, |
| 218 | pages: xen_obj->pages, dirty: true, accessed: false); |
| 219 | } |
| 220 | } |
| 221 | } |
| 222 | drm_gem_object_release(obj: gem_obj); |
| 223 | kfree(objp: xen_obj); |
| 224 | } |
| 225 | |
| 226 | struct page **xen_drm_front_gem_get_pages(struct drm_gem_object *gem_obj) |
| 227 | { |
| 228 | struct xen_gem_object *xen_obj = to_xen_gem_obj(gem_obj); |
| 229 | |
| 230 | return xen_obj->pages; |
| 231 | } |
| 232 | |
| 233 | struct sg_table *xen_drm_front_gem_get_sg_table(struct drm_gem_object *gem_obj) |
| 234 | { |
| 235 | struct xen_gem_object *xen_obj = to_xen_gem_obj(gem_obj); |
| 236 | |
| 237 | if (!xen_obj->pages) |
| 238 | return ERR_PTR(error: -ENOMEM); |
| 239 | |
| 240 | return drm_prime_pages_to_sg(dev: gem_obj->dev, |
| 241 | pages: xen_obj->pages, nr_pages: xen_obj->num_pages); |
| 242 | } |
| 243 | |
| 244 | struct drm_gem_object * |
| 245 | xen_drm_front_gem_import_sg_table(struct drm_device *dev, |
| 246 | struct dma_buf_attachment *attach, |
| 247 | struct sg_table *sgt) |
| 248 | { |
| 249 | struct xen_drm_front_drm_info *drm_info = dev->dev_private; |
| 250 | struct xen_gem_object *xen_obj; |
| 251 | size_t size; |
| 252 | int ret; |
| 253 | |
| 254 | size = attach->dmabuf->size; |
| 255 | xen_obj = gem_create_obj(dev, size); |
| 256 | if (IS_ERR(ptr: xen_obj)) |
| 257 | return ERR_CAST(ptr: xen_obj); |
| 258 | |
| 259 | ret = gem_alloc_pages_array(xen_obj, buf_size: size); |
| 260 | if (ret < 0) |
| 261 | return ERR_PTR(error: ret); |
| 262 | |
| 263 | xen_obj->sgt_imported = sgt; |
| 264 | |
| 265 | ret = drm_prime_sg_to_page_array(sgt, pages: xen_obj->pages, |
| 266 | max_pages: xen_obj->num_pages); |
| 267 | if (ret < 0) |
| 268 | return ERR_PTR(error: ret); |
| 269 | |
| 270 | ret = xen_drm_front_dbuf_create(front_info: drm_info->front_info, |
| 271 | dbuf_cookie: xen_drm_front_dbuf_to_cookie(gem_obj: &xen_obj->base), |
| 272 | width: 0, height: 0, bpp: 0, size, offset: sgt->sgl->offset, |
| 273 | pages: xen_obj->pages); |
| 274 | if (ret < 0) |
| 275 | return ERR_PTR(error: ret); |
| 276 | |
| 277 | DRM_DEBUG("Imported buffer of size %zu with nents %u\n" , |
| 278 | size, sgt->orig_nents); |
| 279 | |
| 280 | return &xen_obj->base; |
| 281 | } |
| 282 | |
| 283 | int xen_drm_front_gem_prime_vmap(struct drm_gem_object *gem_obj, |
| 284 | struct iosys_map *map) |
| 285 | { |
| 286 | struct xen_gem_object *xen_obj = to_xen_gem_obj(gem_obj); |
| 287 | void *vaddr; |
| 288 | |
| 289 | if (!xen_obj->pages) |
| 290 | return -ENOMEM; |
| 291 | |
| 292 | /* Please see comment in gem_mmap_obj on mapping and attributes. */ |
| 293 | vaddr = vmap(pages: xen_obj->pages, count: xen_obj->num_pages, |
| 294 | VM_MAP, PAGE_KERNEL); |
| 295 | if (!vaddr) |
| 296 | return -ENOMEM; |
| 297 | iosys_map_set_vaddr(map, vaddr); |
| 298 | |
| 299 | return 0; |
| 300 | } |
| 301 | |
| 302 | void xen_drm_front_gem_prime_vunmap(struct drm_gem_object *gem_obj, |
| 303 | struct iosys_map *map) |
| 304 | { |
| 305 | vunmap(addr: map->vaddr); |
| 306 | } |
| 307 | |