1// SPDX-License-Identifier: GPL-2.0 OR MIT
2/**************************************************************************
3 *
4 * Copyright (c) 2009-2025 Broadcom. All Rights Reserved. The term
5 * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
6 *
7 **************************************************************************/
8
9#include "vmwgfx_bo.h"
10#include "vmwgfx_cursor_plane.h"
11#include "vmwgfx_drv.h"
12#include "vmwgfx_resource_priv.h"
13#include "vmwgfx_so.h"
14#include "vmwgfx_binding.h"
15#include "vmw_surface_cache.h"
16#include "device_include/svga3d_surfacedefs.h"
17
18#include <drm/drm_dumb_buffers.h>
19#include <drm/ttm/ttm_placement.h>
20
21#define SVGA3D_FLAGS_64(upper32, lower32) (((uint64_t)upper32 << 32) | lower32)
22
23/**
24 * struct vmw_user_surface - User-space visible surface resource
25 *
26 * @prime: The TTM prime object.
27 * @srf: The surface metadata.
28 * @master: Master of the creating client. Used for security check.
29 */
30struct vmw_user_surface {
31 struct ttm_prime_object prime;
32 struct vmw_surface srf;
33 struct drm_master *master;
34};
35
36/**
37 * struct vmw_surface_offset - Backing store mip level offset info
38 *
39 * @face: Surface face.
40 * @mip: Mip level.
41 * @bo_offset: Offset into backing store of this mip level.
42 *
43 */
44struct vmw_surface_offset {
45 uint32_t face;
46 uint32_t mip;
47 uint32_t bo_offset;
48};
49
50/**
51 * struct vmw_surface_dirty - Surface dirty-tracker
52 * @cache: Cached layout information of the surface.
53 * @num_subres: Number of subresources.
54 * @boxes: Array of SVGA3dBoxes indicating dirty regions. One per subresource.
55 */
56struct vmw_surface_dirty {
57 struct vmw_surface_cache cache;
58 u32 num_subres;
59 SVGA3dBox boxes[] __counted_by(num_subres);
60};
61
62static void vmw_user_surface_free(struct vmw_resource *res);
63static struct vmw_resource *
64vmw_user_surface_base_to_res(struct ttm_base_object *base);
65static int vmw_legacy_srf_bind(struct vmw_resource *res,
66 struct ttm_validate_buffer *val_buf);
67static int vmw_legacy_srf_unbind(struct vmw_resource *res,
68 bool readback,
69 struct ttm_validate_buffer *val_buf);
70static int vmw_legacy_srf_create(struct vmw_resource *res);
71static int vmw_legacy_srf_destroy(struct vmw_resource *res);
72static int vmw_gb_surface_create(struct vmw_resource *res);
73static int vmw_gb_surface_bind(struct vmw_resource *res,
74 struct ttm_validate_buffer *val_buf);
75static int vmw_gb_surface_unbind(struct vmw_resource *res,
76 bool readback,
77 struct ttm_validate_buffer *val_buf);
78static int vmw_gb_surface_destroy(struct vmw_resource *res);
79static int
80vmw_gb_surface_define_internal(struct drm_device *dev,
81 struct drm_vmw_gb_surface_create_ext_req *req,
82 struct drm_vmw_gb_surface_create_rep *rep,
83 struct drm_file *file_priv);
84static int
85vmw_gb_surface_reference_internal(struct drm_device *dev,
86 struct drm_vmw_surface_arg *req,
87 struct drm_vmw_gb_surface_ref_ext_rep *rep,
88 struct drm_file *file_priv);
89
90static void vmw_surface_dirty_free(struct vmw_resource *res);
91static int vmw_surface_dirty_alloc(struct vmw_resource *res);
92static int vmw_surface_dirty_sync(struct vmw_resource *res);
93static void vmw_surface_dirty_range_add(struct vmw_resource *res, size_t start,
94 size_t end);
95static int vmw_surface_clean(struct vmw_resource *res);
96
97static const struct vmw_user_resource_conv user_surface_conv = {
98 .object_type = VMW_RES_SURFACE,
99 .base_obj_to_res = vmw_user_surface_base_to_res,
100 .res_free = vmw_user_surface_free
101};
102
103const struct vmw_user_resource_conv *user_surface_converter =
104 &user_surface_conv;
105
106static const struct vmw_res_func vmw_legacy_surface_func = {
107 .res_type = vmw_res_surface,
108 .needs_guest_memory = false,
109 .may_evict = true,
110 .prio = 1,
111 .dirty_prio = 1,
112 .type_name = "legacy surfaces",
113 .domain = VMW_BO_DOMAIN_GMR,
114 .busy_domain = VMW_BO_DOMAIN_GMR | VMW_BO_DOMAIN_VRAM,
115 .create = &vmw_legacy_srf_create,
116 .destroy = &vmw_legacy_srf_destroy,
117 .bind = &vmw_legacy_srf_bind,
118 .unbind = &vmw_legacy_srf_unbind
119};
120
121static const struct vmw_res_func vmw_gb_surface_func = {
122 .res_type = vmw_res_surface,
123 .needs_guest_memory = true,
124 .may_evict = true,
125 .prio = 1,
126 .dirty_prio = 2,
127 .type_name = "guest backed surfaces",
128 .domain = VMW_BO_DOMAIN_MOB,
129 .busy_domain = VMW_BO_DOMAIN_MOB,
130 .create = vmw_gb_surface_create,
131 .destroy = vmw_gb_surface_destroy,
132 .bind = vmw_gb_surface_bind,
133 .unbind = vmw_gb_surface_unbind,
134 .dirty_alloc = vmw_surface_dirty_alloc,
135 .dirty_free = vmw_surface_dirty_free,
136 .dirty_sync = vmw_surface_dirty_sync,
137 .dirty_range_add = vmw_surface_dirty_range_add,
138 .clean = vmw_surface_clean,
139};
140
141/*
142 * struct vmw_surface_dma - SVGA3D DMA command
143 */
144struct vmw_surface_dma {
145 SVGA3dCmdHeader header;
146 SVGA3dCmdSurfaceDMA body;
147 SVGA3dCopyBox cb;
148 SVGA3dCmdSurfaceDMASuffix suffix;
149};
150
151/*
152 * struct vmw_surface_define - SVGA3D Surface Define command
153 */
154struct vmw_surface_define {
155 SVGA3dCmdHeader header;
156 SVGA3dCmdDefineSurface body;
157};
158
159/*
160 * struct vmw_surface_destroy - SVGA3D Surface Destroy command
161 */
162struct vmw_surface_destroy {
163 SVGA3dCmdHeader header;
164 SVGA3dCmdDestroySurface body;
165};
166
167
168/**
169 * vmw_surface_dma_size - Compute fifo size for a dma command.
170 *
171 * @srf: Pointer to a struct vmw_surface
172 *
173 * Computes the required size for a surface dma command for backup or
174 * restoration of the surface represented by @srf.
175 */
176static inline uint32_t vmw_surface_dma_size(const struct vmw_surface *srf)
177{
178 return srf->metadata.num_sizes * sizeof(struct vmw_surface_dma);
179}
180
181
182/**
183 * vmw_surface_define_size - Compute fifo size for a surface define command.
184 *
185 * @srf: Pointer to a struct vmw_surface
186 *
187 * Computes the required size for a surface define command for the definition
188 * of the surface represented by @srf.
189 */
190static inline uint32_t vmw_surface_define_size(const struct vmw_surface *srf)
191{
192 return sizeof(struct vmw_surface_define) + srf->metadata.num_sizes *
193 sizeof(SVGA3dSize);
194}
195
196
197/**
198 * vmw_surface_destroy_size - Compute fifo size for a surface destroy command.
199 *
200 * Computes the required size for a surface destroy command for the destruction
201 * of a hw surface.
202 */
203static inline uint32_t vmw_surface_destroy_size(void)
204{
205 return sizeof(struct vmw_surface_destroy);
206}
207
208/**
209 * vmw_surface_destroy_encode - Encode a surface_destroy command.
210 *
211 * @id: The surface id
212 * @cmd_space: Pointer to memory area in which the commands should be encoded.
213 */
214static void vmw_surface_destroy_encode(uint32_t id,
215 void *cmd_space)
216{
217 struct vmw_surface_destroy *cmd = (struct vmw_surface_destroy *)
218 cmd_space;
219
220 cmd->header.id = SVGA_3D_CMD_SURFACE_DESTROY;
221 cmd->header.size = sizeof(cmd->body);
222 cmd->body.sid = id;
223}
224
225/**
226 * vmw_surface_define_encode - Encode a surface_define command.
227 *
228 * @srf: Pointer to a struct vmw_surface object.
229 * @cmd_space: Pointer to memory area in which the commands should be encoded.
230 */
231static void vmw_surface_define_encode(const struct vmw_surface *srf,
232 void *cmd_space)
233{
234 struct vmw_surface_define *cmd = (struct vmw_surface_define *)
235 cmd_space;
236 struct drm_vmw_size *src_size;
237 SVGA3dSize *cmd_size;
238 uint32_t cmd_len;
239 int i;
240
241 cmd_len = sizeof(cmd->body) + srf->metadata.num_sizes *
242 sizeof(SVGA3dSize);
243
244 cmd->header.id = SVGA_3D_CMD_SURFACE_DEFINE;
245 cmd->header.size = cmd_len;
246 cmd->body.sid = srf->res.id;
247 /*
248 * Downcast of surfaceFlags, was upcasted when received from user-space,
249 * since driver internally stores as 64 bit.
250 * For legacy surface define only 32 bit flag is supported.
251 */
252 cmd->body.surfaceFlags = (SVGA3dSurface1Flags)srf->metadata.flags;
253 cmd->body.format = srf->metadata.format;
254 for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i)
255 cmd->body.face[i].numMipLevels = srf->metadata.mip_levels[i];
256
257 cmd += 1;
258 cmd_size = (SVGA3dSize *) cmd;
259 src_size = srf->metadata.sizes;
260
261 for (i = 0; i < srf->metadata.num_sizes; ++i, cmd_size++, src_size++) {
262 cmd_size->width = src_size->width;
263 cmd_size->height = src_size->height;
264 cmd_size->depth = src_size->depth;
265 }
266}
267
268/**
269 * vmw_surface_dma_encode - Encode a surface_dma command.
270 *
271 * @srf: Pointer to a struct vmw_surface object.
272 * @cmd_space: Pointer to memory area in which the commands should be encoded.
273 * @ptr: Pointer to an SVGAGuestPtr indicating where the surface contents
274 * should be placed or read from.
275 * @to_surface: Boolean whether to DMA to the surface or from the surface.
276 */
277static void vmw_surface_dma_encode(struct vmw_surface *srf,
278 void *cmd_space,
279 const SVGAGuestPtr *ptr,
280 bool to_surface)
281{
282 uint32_t i;
283 struct vmw_surface_dma *cmd = (struct vmw_surface_dma *)cmd_space;
284 const struct SVGA3dSurfaceDesc *desc =
285 vmw_surface_get_desc(format: srf->metadata.format);
286
287 for (i = 0; i < srf->metadata.num_sizes; ++i) {
288 SVGA3dCmdHeader *header = &cmd->header;
289 SVGA3dCmdSurfaceDMA *body = &cmd->body;
290 SVGA3dCopyBox *cb = &cmd->cb;
291 SVGA3dCmdSurfaceDMASuffix *suffix = &cmd->suffix;
292 const struct vmw_surface_offset *cur_offset = &srf->offsets[i];
293 const struct drm_vmw_size *cur_size = &srf->metadata.sizes[i];
294
295 header->id = SVGA_3D_CMD_SURFACE_DMA;
296 header->size = sizeof(*body) + sizeof(*cb) + sizeof(*suffix);
297
298 body->guest.ptr = *ptr;
299 body->guest.ptr.offset += cur_offset->bo_offset;
300 body->guest.pitch = vmw_surface_calculate_pitch(desc, size: cur_size);
301 body->host.sid = srf->res.id;
302 body->host.face = cur_offset->face;
303 body->host.mipmap = cur_offset->mip;
304 body->transfer = ((to_surface) ? SVGA3D_WRITE_HOST_VRAM :
305 SVGA3D_READ_HOST_VRAM);
306 cb->x = 0;
307 cb->y = 0;
308 cb->z = 0;
309 cb->srcx = 0;
310 cb->srcy = 0;
311 cb->srcz = 0;
312 cb->w = cur_size->width;
313 cb->h = cur_size->height;
314 cb->d = cur_size->depth;
315
316 suffix->suffixSize = sizeof(*suffix);
317 suffix->maximumOffset =
318 vmw_surface_get_image_buffer_size(desc, size: cur_size,
319 pitch: body->guest.pitch);
320 suffix->flags.discard = 0;
321 suffix->flags.unsynchronized = 0;
322 suffix->flags.reserved = 0;
323 ++cmd;
324 }
325};
326
327
328/**
329 * vmw_hw_surface_destroy - destroy a Device surface
330 *
331 * @res: Pointer to a struct vmw_resource embedded in a struct
332 * vmw_surface.
333 *
334 * Destroys a the device surface associated with a struct vmw_surface if
335 * any, and adjusts resource count accordingly.
336 */
337static void vmw_hw_surface_destroy(struct vmw_resource *res)
338{
339
340 struct vmw_private *dev_priv = res->dev_priv;
341 void *cmd;
342
343 if (res->func->destroy == vmw_gb_surface_destroy) {
344 (void) vmw_gb_surface_destroy(res);
345 return;
346 }
347
348 if (res->id != -1) {
349
350 cmd = VMW_CMD_RESERVE(dev_priv, vmw_surface_destroy_size());
351 if (unlikely(!cmd))
352 return;
353
354 vmw_surface_destroy_encode(id: res->id, cmd_space: cmd);
355 vmw_cmd_commit(dev_priv, bytes: vmw_surface_destroy_size());
356
357 /*
358 * used_memory_size_atomic, or separate lock
359 * to avoid taking dev_priv::cmdbuf_mutex in
360 * the destroy path.
361 */
362
363 mutex_lock(&dev_priv->cmdbuf_mutex);
364 dev_priv->used_memory_size -= res->guest_memory_size;
365 mutex_unlock(lock: &dev_priv->cmdbuf_mutex);
366 }
367}
368
369/**
370 * vmw_legacy_srf_create - Create a device surface as part of the
371 * resource validation process.
372 *
373 * @res: Pointer to a struct vmw_surface.
374 *
375 * If the surface doesn't have a hw id.
376 *
377 * Returns -EBUSY if there wasn't sufficient device resources to
378 * complete the validation. Retry after freeing up resources.
379 *
380 * May return other errors if the kernel is out of guest resources.
381 */
382static int vmw_legacy_srf_create(struct vmw_resource *res)
383{
384 struct vmw_private *dev_priv = res->dev_priv;
385 struct vmw_surface *srf;
386 uint32_t submit_size;
387 uint8_t *cmd;
388 int ret;
389
390 if (likely(res->id != -1))
391 return 0;
392
393 srf = vmw_res_to_srf(res);
394 if (unlikely(dev_priv->used_memory_size + res->guest_memory_size >=
395 dev_priv->memory_size))
396 return -EBUSY;
397
398 /*
399 * Alloc id for the resource.
400 */
401
402 ret = vmw_resource_alloc_id(res);
403 if (unlikely(ret != 0)) {
404 DRM_ERROR("Failed to allocate a surface id.\n");
405 goto out_no_id;
406 }
407
408 if (unlikely(res->id >= SVGA3D_HB_MAX_SURFACE_IDS)) {
409 ret = -EBUSY;
410 goto out_no_fifo;
411 }
412
413 /*
414 * Encode surface define- commands.
415 */
416
417 submit_size = vmw_surface_define_size(srf);
418 cmd = VMW_CMD_RESERVE(dev_priv, submit_size);
419 if (unlikely(!cmd)) {
420 ret = -ENOMEM;
421 goto out_no_fifo;
422 }
423
424 vmw_surface_define_encode(srf, cmd_space: cmd);
425 vmw_cmd_commit(dev_priv, bytes: submit_size);
426 vmw_fifo_resource_inc(dev_priv);
427
428 /*
429 * Surface memory usage accounting.
430 */
431
432 dev_priv->used_memory_size += res->guest_memory_size;
433 return 0;
434
435out_no_fifo:
436 vmw_resource_release_id(res);
437out_no_id:
438 return ret;
439}
440
441/**
442 * vmw_legacy_srf_dma - Copy backup data to or from a legacy surface.
443 *
444 * @res: Pointer to a struct vmw_res embedded in a struct
445 * vmw_surface.
446 * @val_buf: Pointer to a struct ttm_validate_buffer containing
447 * information about the backup buffer.
448 * @bind: Boolean wether to DMA to the surface.
449 *
450 * Transfer backup data to or from a legacy surface as part of the
451 * validation process.
452 * May return other errors if the kernel is out of guest resources.
453 * The backup buffer will be fenced or idle upon successful completion,
454 * and if the surface needs persistent backup storage, the backup buffer
455 * will also be returned reserved iff @bind is true.
456 */
457static int vmw_legacy_srf_dma(struct vmw_resource *res,
458 struct ttm_validate_buffer *val_buf,
459 bool bind)
460{
461 SVGAGuestPtr ptr;
462 struct vmw_fence_obj *fence;
463 uint32_t submit_size;
464 struct vmw_surface *srf = vmw_res_to_srf(res);
465 uint8_t *cmd;
466 struct vmw_private *dev_priv = res->dev_priv;
467
468 BUG_ON(!val_buf->bo);
469 submit_size = vmw_surface_dma_size(srf);
470 cmd = VMW_CMD_RESERVE(dev_priv, submit_size);
471 if (unlikely(!cmd))
472 return -ENOMEM;
473
474 vmw_bo_get_guest_ptr(buf: val_buf->bo, ptr: &ptr);
475 vmw_surface_dma_encode(srf, cmd_space: cmd, ptr: &ptr, to_surface: bind);
476
477 vmw_cmd_commit(dev_priv, bytes: submit_size);
478
479 /*
480 * Create a fence object and fence the backup buffer.
481 */
482
483 (void) vmw_execbuf_fence_commands(NULL, dev_priv,
484 p_fence: &fence, NULL);
485
486 vmw_bo_fence_single(bo: val_buf->bo, fence);
487
488 if (likely(fence != NULL))
489 vmw_fence_obj_unreference(fence_p: &fence);
490
491 return 0;
492}
493
494/**
495 * vmw_legacy_srf_bind - Perform a legacy surface bind as part of the
496 * surface validation process.
497 *
498 * @res: Pointer to a struct vmw_res embedded in a struct
499 * vmw_surface.
500 * @val_buf: Pointer to a struct ttm_validate_buffer containing
501 * information about the backup buffer.
502 *
503 * This function will copy backup data to the surface if the
504 * backup buffer is dirty.
505 */
506static int vmw_legacy_srf_bind(struct vmw_resource *res,
507 struct ttm_validate_buffer *val_buf)
508{
509 if (!res->guest_memory_dirty)
510 return 0;
511
512 return vmw_legacy_srf_dma(res, val_buf, bind: true);
513}
514
515
516/**
517 * vmw_legacy_srf_unbind - Perform a legacy surface unbind as part of the
518 * surface eviction process.
519 *
520 * @res: Pointer to a struct vmw_res embedded in a struct
521 * vmw_surface.
522 * @readback: Readback - only true if dirty
523 * @val_buf: Pointer to a struct ttm_validate_buffer containing
524 * information about the backup buffer.
525 *
526 * This function will copy backup data from the surface.
527 */
528static int vmw_legacy_srf_unbind(struct vmw_resource *res,
529 bool readback,
530 struct ttm_validate_buffer *val_buf)
531{
532 if (unlikely(readback))
533 return vmw_legacy_srf_dma(res, val_buf, bind: false);
534 return 0;
535}
536
537/**
538 * vmw_legacy_srf_destroy - Destroy a device surface as part of a
539 * resource eviction process.
540 *
541 * @res: Pointer to a struct vmw_res embedded in a struct
542 * vmw_surface.
543 */
544static int vmw_legacy_srf_destroy(struct vmw_resource *res)
545{
546 struct vmw_private *dev_priv = res->dev_priv;
547 uint32_t submit_size;
548 uint8_t *cmd;
549
550 BUG_ON(res->id == -1);
551
552 /*
553 * Encode the dma- and surface destroy commands.
554 */
555
556 submit_size = vmw_surface_destroy_size();
557 cmd = VMW_CMD_RESERVE(dev_priv, submit_size);
558 if (unlikely(!cmd))
559 return -ENOMEM;
560
561 vmw_surface_destroy_encode(id: res->id, cmd_space: cmd);
562 vmw_cmd_commit(dev_priv, bytes: submit_size);
563
564 /*
565 * Surface memory usage accounting.
566 */
567
568 dev_priv->used_memory_size -= res->guest_memory_size;
569
570 /*
571 * Release the surface ID.
572 */
573
574 vmw_resource_release_id(res);
575 vmw_fifo_resource_dec(dev_priv);
576
577 return 0;
578}
579
580
581/**
582 * vmw_surface_init - initialize a struct vmw_surface
583 *
584 * @dev_priv: Pointer to a device private struct.
585 * @srf: Pointer to the struct vmw_surface to initialize.
586 * @res_free: Pointer to a resource destructor used to free
587 * the object.
588 */
589static int vmw_surface_init(struct vmw_private *dev_priv,
590 struct vmw_surface *srf,
591 void (*res_free) (struct vmw_resource *res))
592{
593 int ret;
594 struct vmw_resource *res = &srf->res;
595
596 BUG_ON(!res_free);
597 ret = vmw_resource_init(dev_priv, res, delay_id: true, res_free,
598 func: (dev_priv->has_mob) ? &vmw_gb_surface_func :
599 &vmw_legacy_surface_func);
600
601 if (unlikely(ret != 0)) {
602 res_free(res);
603 return ret;
604 }
605
606 /*
607 * The surface won't be visible to hardware until a
608 * surface validate.
609 */
610
611 INIT_LIST_HEAD(list: &srf->view_list);
612 res->hw_destroy = vmw_hw_surface_destroy;
613 return ret;
614}
615
616/**
617 * vmw_user_surface_base_to_res - TTM base object to resource converter for
618 * user visible surfaces
619 *
620 * @base: Pointer to a TTM base object
621 *
622 * Returns the struct vmw_resource embedded in a struct vmw_surface
623 * for the user-visible object identified by the TTM base object @base.
624 */
625static struct vmw_resource *
626vmw_user_surface_base_to_res(struct ttm_base_object *base)
627{
628 return &(container_of(base, struct vmw_user_surface,
629 prime.base)->srf.res);
630}
631
632/**
633 * vmw_user_surface_free - User visible surface resource destructor
634 *
635 * @res: A struct vmw_resource embedded in a struct vmw_surface.
636 */
637static void vmw_user_surface_free(struct vmw_resource *res)
638{
639 struct vmw_surface *srf = vmw_res_to_srf(res);
640 struct vmw_user_surface *user_srf =
641 container_of(srf, struct vmw_user_surface, srf);
642
643 WARN_ON(res->dirty);
644 if (user_srf->master)
645 drm_master_put(master: &user_srf->master);
646 kfree(objp: srf->offsets);
647 kfree(objp: srf->metadata.sizes);
648 kfree(objp: srf->snooper.image);
649 ttm_prime_object_kfree(user_srf, prime);
650}
651
652/**
653 * vmw_user_surface_base_release - User visible surface TTM base object destructor
654 *
655 * @p_base: Pointer to a pointer to a TTM base object
656 * embedded in a struct vmw_user_surface.
657 *
658 * Drops the base object's reference on its resource, and the
659 * pointer pointed to by *p_base is set to NULL.
660 */
661static void vmw_user_surface_base_release(struct ttm_base_object **p_base)
662{
663 struct ttm_base_object *base = *p_base;
664 struct vmw_user_surface *user_srf =
665 container_of(base, struct vmw_user_surface, prime.base);
666 struct vmw_resource *res = &user_srf->srf.res;
667
668 *p_base = NULL;
669
670 /*
671 * Dumb buffers own the resource and they'll unref the
672 * resource themselves
673 */
674 WARN_ON(res && res->guest_memory_bo && res->guest_memory_bo->is_dumb);
675
676 vmw_resource_unreference(p_res: &res);
677}
678
679/**
680 * vmw_surface_destroy_ioctl - Ioctl function implementing
681 * the user surface destroy functionality.
682 *
683 * @dev: Pointer to a struct drm_device.
684 * @data: Pointer to data copied from / to user-space.
685 * @file_priv: Pointer to a drm file private structure.
686 */
687int vmw_surface_destroy_ioctl(struct drm_device *dev, void *data,
688 struct drm_file *file_priv)
689{
690 struct drm_vmw_surface_arg *arg = (struct drm_vmw_surface_arg *)data;
691 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
692
693 return ttm_ref_object_base_unref(tfile, key: arg->sid);
694}
695
696/**
697 * vmw_surface_define_ioctl - Ioctl function implementing
698 * the user surface define functionality.
699 *
700 * @dev: Pointer to a struct drm_device.
701 * @data: Pointer to data copied from / to user-space.
702 * @file_priv: Pointer to a drm file private structure.
703 */
704int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
705 struct drm_file *file_priv)
706{
707 struct vmw_private *dev_priv = vmw_priv(dev);
708 struct vmw_user_surface *user_srf;
709 struct vmw_surface *srf;
710 struct vmw_surface_metadata *metadata;
711 struct vmw_resource *res;
712 struct vmw_resource *tmp;
713 union drm_vmw_surface_create_arg *arg =
714 (union drm_vmw_surface_create_arg *)data;
715 struct drm_vmw_surface_create_req *req = &arg->req;
716 struct drm_vmw_surface_arg *rep = &arg->rep;
717 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
718 int ret;
719 int i, j;
720 uint32_t cur_bo_offset;
721 struct drm_vmw_size *cur_size;
722 struct vmw_surface_offset *cur_offset;
723 uint32_t num_sizes;
724 const SVGA3dSurfaceDesc *desc;
725
726 num_sizes = 0;
727 for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i) {
728 if (req->mip_levels[i] > DRM_VMW_MAX_MIP_LEVELS)
729 return -EINVAL;
730 num_sizes += req->mip_levels[i];
731 }
732
733 if (num_sizes > DRM_VMW_MAX_SURFACE_FACES * DRM_VMW_MAX_MIP_LEVELS ||
734 num_sizes == 0)
735 return -EINVAL;
736
737 desc = vmw_surface_get_desc(format: req->format);
738 if (unlikely(desc->blockDesc == SVGA3DBLOCKDESC_NONE)) {
739 VMW_DEBUG_USER("Invalid format %d for surface creation.\n",
740 req->format);
741 return -EINVAL;
742 }
743
744 user_srf = kzalloc(sizeof(*user_srf), GFP_KERNEL);
745 if (unlikely(!user_srf)) {
746 ret = -ENOMEM;
747 goto out_unlock;
748 }
749
750 srf = &user_srf->srf;
751 metadata = &srf->metadata;
752 res = &srf->res;
753
754 /* Driver internally stores as 64-bit flags */
755 metadata->flags = (SVGA3dSurfaceAllFlags)req->flags;
756 metadata->format = req->format;
757 metadata->scanout = req->scanout;
758
759 memcpy(metadata->mip_levels, req->mip_levels,
760 sizeof(metadata->mip_levels));
761 metadata->num_sizes = num_sizes;
762 metadata->sizes =
763 memdup_array_user(src: (struct drm_vmw_size __user *)(unsigned long)
764 req->size_addr,
765 n: metadata->num_sizes, size: sizeof(*metadata->sizes));
766 if (IS_ERR(ptr: metadata->sizes)) {
767 ret = PTR_ERR(ptr: metadata->sizes);
768 goto out_no_sizes;
769 }
770 srf->offsets = kmalloc_array(metadata->num_sizes, sizeof(*srf->offsets),
771 GFP_KERNEL);
772 if (unlikely(!srf->offsets)) {
773 ret = -ENOMEM;
774 goto out_no_offsets;
775 }
776
777 metadata->base_size = *srf->metadata.sizes;
778 metadata->autogen_filter = SVGA3D_TEX_FILTER_NONE;
779 metadata->multisample_count = 0;
780 metadata->multisample_pattern = SVGA3D_MS_PATTERN_NONE;
781 metadata->quality_level = SVGA3D_MS_QUALITY_NONE;
782
783 cur_bo_offset = 0;
784 cur_offset = srf->offsets;
785 cur_size = metadata->sizes;
786
787 for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i) {
788 for (j = 0; j < metadata->mip_levels[i]; ++j) {
789 uint32_t stride = vmw_surface_calculate_pitch(
790 desc, size: cur_size);
791
792 cur_offset->face = i;
793 cur_offset->mip = j;
794 cur_offset->bo_offset = cur_bo_offset;
795 cur_bo_offset += vmw_surface_get_image_buffer_size
796 (desc, size: cur_size, pitch: stride);
797 ++cur_offset;
798 ++cur_size;
799 }
800 }
801 res->guest_memory_size = cur_bo_offset;
802
803 srf->snooper.image = vmw_cursor_snooper_create(file_priv, metadata);
804 if (IS_ERR(ptr: srf->snooper.image)) {
805 ret = PTR_ERR(ptr: srf->snooper.image);
806 goto out_no_copy;
807 }
808
809 if (drm_is_primary_client(file_priv))
810 user_srf->master = drm_file_get_master(file_priv);
811
812 /**
813 * From this point, the generic resource management functions
814 * destroy the object on failure.
815 */
816
817 ret = vmw_surface_init(dev_priv, srf, res_free: vmw_user_surface_free);
818 if (unlikely(ret != 0))
819 goto out_unlock;
820
821 /*
822 * A gb-aware client referencing a surface will expect a backup
823 * buffer to be present.
824 */
825 if (dev_priv->has_mob) {
826 struct vmw_bo_params params = {
827 .domain = VMW_BO_DOMAIN_SYS,
828 .busy_domain = VMW_BO_DOMAIN_SYS,
829 .bo_type = ttm_bo_type_device,
830 .size = res->guest_memory_size,
831 .pin = false
832 };
833
834 ret = vmw_bo_create(dev_priv, params: &params, p_bo: &res->guest_memory_bo);
835 if (unlikely(ret != 0)) {
836 vmw_resource_unreference(p_res: &res);
837 goto out_unlock;
838 }
839
840 ret = vmw_bo_add_detached_resource(vbo: res->guest_memory_bo, res);
841 if (unlikely(ret != 0)) {
842 vmw_resource_unreference(p_res: &res);
843 goto out_unlock;
844 }
845 }
846
847 tmp = vmw_resource_reference(res: &srf->res);
848 ret = ttm_prime_object_init(tfile, size: res->guest_memory_size,
849 prime: &user_srf->prime,
850 VMW_RES_SURFACE,
851 refcount_release: &vmw_user_surface_base_release);
852
853 if (unlikely(ret != 0)) {
854 vmw_resource_unreference(p_res: &tmp);
855 vmw_resource_unreference(p_res: &res);
856 goto out_unlock;
857 }
858
859 rep->sid = user_srf->prime.base.handle;
860 vmw_resource_unreference(p_res: &res);
861
862 return 0;
863out_no_copy:
864 kfree(objp: srf->offsets);
865out_no_offsets:
866 kfree(objp: metadata->sizes);
867out_no_sizes:
868 ttm_prime_object_kfree(user_srf, prime);
869out_unlock:
870 return ret;
871}
872
873static struct vmw_user_surface *
874vmw_lookup_user_surface_for_buffer(struct vmw_private *vmw, struct vmw_bo *bo,
875 u32 handle)
876{
877 struct vmw_user_surface *user_srf = NULL;
878 struct vmw_surface *surf;
879 struct ttm_base_object *base;
880
881 surf = vmw_bo_surface(vbo: bo);
882 if (surf) {
883 rcu_read_lock();
884 user_srf = container_of(surf, struct vmw_user_surface, srf);
885 base = &user_srf->prime.base;
886 if (base && !kref_get_unless_zero(kref: &base->refcount)) {
887 drm_dbg_driver(&vmw->drm,
888 "%s: referencing a stale surface handle %d\n",
889 __func__, handle);
890 base = NULL;
891 user_srf = NULL;
892 }
893 rcu_read_unlock();
894 }
895
896 return user_srf;
897}
898
899struct vmw_surface *vmw_lookup_surface_for_buffer(struct vmw_private *vmw,
900 struct vmw_bo *bo,
901 u32 handle)
902{
903 struct vmw_user_surface *user_srf =
904 vmw_lookup_user_surface_for_buffer(vmw, bo, handle);
905 struct vmw_surface *surf = NULL;
906 struct ttm_base_object *base;
907
908 if (user_srf) {
909 surf = vmw_surface_reference(srf: &user_srf->srf);
910 base = &user_srf->prime.base;
911 ttm_base_object_unref(p_base: &base);
912 }
913 return surf;
914}
915
916u32 vmw_lookup_surface_handle_for_buffer(struct vmw_private *vmw,
917 struct vmw_bo *bo,
918 u32 handle)
919{
920 struct vmw_user_surface *user_srf =
921 vmw_lookup_user_surface_for_buffer(vmw, bo, handle);
922 int surf_handle = 0;
923 struct ttm_base_object *base;
924
925 if (user_srf) {
926 base = &user_srf->prime.base;
927 surf_handle = (u32)base->handle;
928 ttm_base_object_unref(p_base: &base);
929 }
930 return surf_handle;
931}
932
933static int vmw_buffer_prime_to_surface_base(struct vmw_private *dev_priv,
934 struct drm_file *file_priv,
935 u32 fd, u32 *handle,
936 struct ttm_base_object **base_p)
937{
938 struct ttm_base_object *base;
939 struct vmw_bo *bo;
940 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
941 struct vmw_user_surface *user_srf;
942 int ret;
943
944 ret = drm_gem_prime_fd_to_handle(dev: &dev_priv->drm, file_priv, prime_fd: fd, handle);
945 if (ret) {
946 drm_warn(&dev_priv->drm,
947 "Wasn't able to find user buffer for fd = %u.\n", fd);
948 return ret;
949 }
950
951 ret = vmw_user_bo_lookup(filp: file_priv, handle: *handle, out: &bo);
952 if (ret) {
953 drm_warn(&dev_priv->drm,
954 "Wasn't able to lookup user buffer for handle = %u.\n", *handle);
955 return ret;
956 }
957
958 user_srf = vmw_lookup_user_surface_for_buffer(vmw: dev_priv, bo, handle: *handle);
959 if (WARN_ON(!user_srf)) {
960 drm_warn(&dev_priv->drm,
961 "User surface fd %d (handle %d) is null.\n", fd, *handle);
962 ret = -EINVAL;
963 goto out;
964 }
965
966 base = &user_srf->prime.base;
967 ret = ttm_ref_object_add(tfile, base, NULL, require_existed: false);
968 if (ret) {
969 drm_warn(&dev_priv->drm,
970 "Couldn't add an object ref for the buffer (%d).\n", *handle);
971 goto out;
972 }
973
974 *base_p = base;
975out:
976 vmw_user_bo_unref(buf: &bo);
977
978 return ret;
979}
980
981static int
982vmw_surface_handle_reference(struct vmw_private *dev_priv,
983 struct drm_file *file_priv,
984 uint32_t u_handle,
985 enum drm_vmw_handle_type handle_type,
986 struct ttm_base_object **base_p)
987{
988 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
989 struct vmw_user_surface *user_srf = NULL;
990 uint32_t handle;
991 struct ttm_base_object *base;
992 int ret;
993
994 if (handle_type == DRM_VMW_HANDLE_PRIME) {
995 ret = ttm_prime_fd_to_handle(tfile, fd: u_handle, handle: &handle);
996 if (ret)
997 return vmw_buffer_prime_to_surface_base(dev_priv,
998 file_priv,
999 fd: u_handle,
1000 handle: &handle,
1001 base_p);
1002 } else {
1003 handle = u_handle;
1004 }
1005
1006 ret = -EINVAL;
1007 base = ttm_base_object_lookup_for_ref(tdev: dev_priv->tdev, key: handle);
1008 if (unlikely(!base)) {
1009 VMW_DEBUG_USER("Could not find surface to reference.\n");
1010 goto out_no_lookup;
1011 }
1012
1013 if (unlikely(ttm_base_object_type(base) != VMW_RES_SURFACE)) {
1014 VMW_DEBUG_USER("Referenced object is not a surface.\n");
1015 goto out_bad_resource;
1016 }
1017 if (handle_type != DRM_VMW_HANDLE_PRIME) {
1018 bool require_exist = false;
1019
1020 user_srf = container_of(base, struct vmw_user_surface,
1021 prime.base);
1022
1023 /* Error out if we are unauthenticated primary */
1024 if (drm_is_primary_client(file_priv) &&
1025 !file_priv->authenticated) {
1026 ret = -EACCES;
1027 goto out_bad_resource;
1028 }
1029
1030 /*
1031 * Make sure the surface creator has the same
1032 * authenticating master, or is already registered with us.
1033 */
1034 if (drm_is_primary_client(file_priv) &&
1035 user_srf->master != file_priv->master)
1036 require_exist = true;
1037
1038 if (unlikely(drm_is_render_client(file_priv)))
1039 require_exist = true;
1040
1041 ret = ttm_ref_object_add(tfile, base, NULL, require_existed: require_exist);
1042 if (unlikely(ret != 0)) {
1043 DRM_ERROR("Could not add a reference to a surface.\n");
1044 goto out_bad_resource;
1045 }
1046 }
1047
1048 *base_p = base;
1049 return 0;
1050
1051out_bad_resource:
1052 ttm_base_object_unref(p_base: &base);
1053out_no_lookup:
1054 if (handle_type == DRM_VMW_HANDLE_PRIME)
1055 (void) ttm_ref_object_base_unref(tfile, key: handle);
1056
1057 return ret;
1058}
1059
1060/**
1061 * vmw_surface_reference_ioctl - Ioctl function implementing
1062 * the user surface reference functionality.
1063 *
1064 * @dev: Pointer to a struct drm_device.
1065 * @data: Pointer to data copied from / to user-space.
1066 * @file_priv: Pointer to a drm file private structure.
1067 */
1068int vmw_surface_reference_ioctl(struct drm_device *dev, void *data,
1069 struct drm_file *file_priv)
1070{
1071 struct vmw_private *dev_priv = vmw_priv(dev);
1072 union drm_vmw_surface_reference_arg *arg =
1073 (union drm_vmw_surface_reference_arg *)data;
1074 struct drm_vmw_surface_arg *req = &arg->req;
1075 struct drm_vmw_surface_create_req *rep = &arg->rep;
1076 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
1077 struct vmw_surface *srf;
1078 struct vmw_user_surface *user_srf;
1079 struct drm_vmw_size __user *user_sizes;
1080 struct ttm_base_object *base;
1081 int ret;
1082
1083 ret = vmw_surface_handle_reference(dev_priv, file_priv, u_handle: req->sid,
1084 handle_type: req->handle_type, base_p: &base);
1085 if (unlikely(ret != 0))
1086 return ret;
1087
1088 user_srf = container_of(base, struct vmw_user_surface, prime.base);
1089 srf = &user_srf->srf;
1090
1091 /* Downcast of flags when sending back to user space */
1092 rep->flags = (uint32_t)srf->metadata.flags;
1093 rep->format = srf->metadata.format;
1094 memcpy(rep->mip_levels, srf->metadata.mip_levels,
1095 sizeof(srf->metadata.mip_levels));
1096 user_sizes = (struct drm_vmw_size __user *)(unsigned long)
1097 rep->size_addr;
1098
1099 if (user_sizes)
1100 ret = copy_to_user(to: user_sizes, from: &srf->metadata.base_size,
1101 n: sizeof(srf->metadata.base_size));
1102 if (unlikely(ret != 0)) {
1103 VMW_DEBUG_USER("copy_to_user failed %p %u\n", user_sizes,
1104 srf->metadata.num_sizes);
1105 ttm_ref_object_base_unref(tfile, key: base->handle);
1106 ret = -EFAULT;
1107 }
1108
1109 ttm_base_object_unref(p_base: &base);
1110
1111 return ret;
1112}
1113
1114/**
1115 * vmw_gb_surface_create - Encode a surface_define command.
1116 *
1117 * @res: Pointer to a struct vmw_resource embedded in a struct
1118 * vmw_surface.
1119 */
1120static int vmw_gb_surface_create(struct vmw_resource *res)
1121{
1122 struct vmw_private *dev_priv = res->dev_priv;
1123 struct vmw_surface *srf = vmw_res_to_srf(res);
1124 struct vmw_surface_metadata *metadata = &srf->metadata;
1125 uint32_t cmd_len, cmd_id, submit_len;
1126 int ret;
1127 struct {
1128 SVGA3dCmdHeader header;
1129 SVGA3dCmdDefineGBSurface body;
1130 } *cmd;
1131 struct {
1132 SVGA3dCmdHeader header;
1133 SVGA3dCmdDefineGBSurface_v2 body;
1134 } *cmd2;
1135 struct {
1136 SVGA3dCmdHeader header;
1137 SVGA3dCmdDefineGBSurface_v3 body;
1138 } *cmd3;
1139 struct {
1140 SVGA3dCmdHeader header;
1141 SVGA3dCmdDefineGBSurface_v4 body;
1142 } *cmd4;
1143
1144 if (likely(res->id != -1))
1145 return 0;
1146
1147 vmw_fifo_resource_inc(dev_priv);
1148 ret = vmw_resource_alloc_id(res);
1149 if (unlikely(ret != 0)) {
1150 DRM_ERROR("Failed to allocate a surface id.\n");
1151 goto out_no_id;
1152 }
1153
1154 if (unlikely(res->id >= VMWGFX_NUM_GB_SURFACE)) {
1155 ret = -EBUSY;
1156 goto out_no_fifo;
1157 }
1158
1159 if (has_sm5_context(dev_priv) && metadata->array_size > 0) {
1160 cmd_id = SVGA_3D_CMD_DEFINE_GB_SURFACE_V4;
1161 cmd_len = sizeof(cmd4->body);
1162 submit_len = sizeof(*cmd4);
1163 } else if (has_sm4_1_context(dev_priv) && metadata->array_size > 0) {
1164 cmd_id = SVGA_3D_CMD_DEFINE_GB_SURFACE_V3;
1165 cmd_len = sizeof(cmd3->body);
1166 submit_len = sizeof(*cmd3);
1167 } else if (metadata->array_size > 0) {
1168 /* VMW_SM_4 support verified at creation time. */
1169 cmd_id = SVGA_3D_CMD_DEFINE_GB_SURFACE_V2;
1170 cmd_len = sizeof(cmd2->body);
1171 submit_len = sizeof(*cmd2);
1172 } else {
1173 cmd_id = SVGA_3D_CMD_DEFINE_GB_SURFACE;
1174 cmd_len = sizeof(cmd->body);
1175 submit_len = sizeof(*cmd);
1176 }
1177
1178 cmd = VMW_CMD_RESERVE(dev_priv, submit_len);
1179 cmd2 = (typeof(cmd2))cmd;
1180 cmd3 = (typeof(cmd3))cmd;
1181 cmd4 = (typeof(cmd4))cmd;
1182 if (unlikely(!cmd)) {
1183 ret = -ENOMEM;
1184 goto out_no_fifo;
1185 }
1186
1187 if (has_sm5_context(dev_priv) && metadata->array_size > 0) {
1188 cmd4->header.id = cmd_id;
1189 cmd4->header.size = cmd_len;
1190 cmd4->body.sid = srf->res.id;
1191 cmd4->body.surfaceFlags = metadata->flags;
1192 cmd4->body.format = metadata->format;
1193 cmd4->body.numMipLevels = metadata->mip_levels[0];
1194 cmd4->body.multisampleCount = metadata->multisample_count;
1195 cmd4->body.multisamplePattern = metadata->multisample_pattern;
1196 cmd4->body.qualityLevel = metadata->quality_level;
1197 cmd4->body.autogenFilter = metadata->autogen_filter;
1198 cmd4->body.size.width = metadata->base_size.width;
1199 cmd4->body.size.height = metadata->base_size.height;
1200 cmd4->body.size.depth = metadata->base_size.depth;
1201 cmd4->body.arraySize = metadata->array_size;
1202 cmd4->body.bufferByteStride = metadata->buffer_byte_stride;
1203 } else if (has_sm4_1_context(dev_priv) && metadata->array_size > 0) {
1204 cmd3->header.id = cmd_id;
1205 cmd3->header.size = cmd_len;
1206 cmd3->body.sid = srf->res.id;
1207 cmd3->body.surfaceFlags = metadata->flags;
1208 cmd3->body.format = metadata->format;
1209 cmd3->body.numMipLevels = metadata->mip_levels[0];
1210 cmd3->body.multisampleCount = metadata->multisample_count;
1211 cmd3->body.multisamplePattern = metadata->multisample_pattern;
1212 cmd3->body.qualityLevel = metadata->quality_level;
1213 cmd3->body.autogenFilter = metadata->autogen_filter;
1214 cmd3->body.size.width = metadata->base_size.width;
1215 cmd3->body.size.height = metadata->base_size.height;
1216 cmd3->body.size.depth = metadata->base_size.depth;
1217 cmd3->body.arraySize = metadata->array_size;
1218 } else if (metadata->array_size > 0) {
1219 cmd2->header.id = cmd_id;
1220 cmd2->header.size = cmd_len;
1221 cmd2->body.sid = srf->res.id;
1222 cmd2->body.surfaceFlags = metadata->flags;
1223 cmd2->body.format = metadata->format;
1224 cmd2->body.numMipLevels = metadata->mip_levels[0];
1225 cmd2->body.multisampleCount = metadata->multisample_count;
1226 cmd2->body.autogenFilter = metadata->autogen_filter;
1227 cmd2->body.size.width = metadata->base_size.width;
1228 cmd2->body.size.height = metadata->base_size.height;
1229 cmd2->body.size.depth = metadata->base_size.depth;
1230 cmd2->body.arraySize = metadata->array_size;
1231 } else {
1232 cmd->header.id = cmd_id;
1233 cmd->header.size = cmd_len;
1234 cmd->body.sid = srf->res.id;
1235 cmd->body.surfaceFlags = metadata->flags;
1236 cmd->body.format = metadata->format;
1237 cmd->body.numMipLevels = metadata->mip_levels[0];
1238 cmd->body.multisampleCount = metadata->multisample_count;
1239 cmd->body.autogenFilter = metadata->autogen_filter;
1240 cmd->body.size.width = metadata->base_size.width;
1241 cmd->body.size.height = metadata->base_size.height;
1242 cmd->body.size.depth = metadata->base_size.depth;
1243 }
1244
1245 vmw_cmd_commit(dev_priv, bytes: submit_len);
1246
1247 return 0;
1248
1249out_no_fifo:
1250 vmw_resource_release_id(res);
1251out_no_id:
1252 vmw_fifo_resource_dec(dev_priv);
1253 return ret;
1254}
1255
1256
1257static int vmw_gb_surface_bind(struct vmw_resource *res,
1258 struct ttm_validate_buffer *val_buf)
1259{
1260 struct vmw_private *dev_priv = res->dev_priv;
1261 struct {
1262 SVGA3dCmdHeader header;
1263 SVGA3dCmdBindGBSurface body;
1264 } *cmd1;
1265 struct {
1266 SVGA3dCmdHeader header;
1267 SVGA3dCmdUpdateGBSurface body;
1268 } *cmd2;
1269 uint32_t submit_size;
1270 struct ttm_buffer_object *bo = val_buf->bo;
1271
1272 BUG_ON(bo->resource->mem_type != VMW_PL_MOB);
1273
1274 submit_size = sizeof(*cmd1) + (res->guest_memory_dirty ? sizeof(*cmd2) : 0);
1275
1276 cmd1 = VMW_CMD_RESERVE(dev_priv, submit_size);
1277 if (unlikely(!cmd1))
1278 return -ENOMEM;
1279
1280 cmd1->header.id = SVGA_3D_CMD_BIND_GB_SURFACE;
1281 cmd1->header.size = sizeof(cmd1->body);
1282 cmd1->body.sid = res->id;
1283 cmd1->body.mobid = bo->resource->start;
1284 if (res->guest_memory_dirty) {
1285 cmd2 = (void *) &cmd1[1];
1286 cmd2->header.id = SVGA_3D_CMD_UPDATE_GB_SURFACE;
1287 cmd2->header.size = sizeof(cmd2->body);
1288 cmd2->body.sid = res->id;
1289 }
1290 vmw_cmd_commit(dev_priv, bytes: submit_size);
1291
1292 if (res->guest_memory_bo->dirty && res->guest_memory_dirty) {
1293 /* We've just made a full upload. Cear dirty regions. */
1294 vmw_bo_dirty_clear_res(res);
1295 }
1296
1297 res->guest_memory_dirty = false;
1298
1299 return 0;
1300}
1301
1302static int vmw_gb_surface_unbind(struct vmw_resource *res,
1303 bool readback,
1304 struct ttm_validate_buffer *val_buf)
1305{
1306 struct vmw_private *dev_priv = res->dev_priv;
1307 struct ttm_buffer_object *bo = val_buf->bo;
1308 struct vmw_fence_obj *fence;
1309
1310 struct {
1311 SVGA3dCmdHeader header;
1312 SVGA3dCmdReadbackGBSurface body;
1313 } *cmd1;
1314 struct {
1315 SVGA3dCmdHeader header;
1316 SVGA3dCmdInvalidateGBSurface body;
1317 } *cmd2;
1318 struct {
1319 SVGA3dCmdHeader header;
1320 SVGA3dCmdBindGBSurface body;
1321 } *cmd3;
1322 uint32_t submit_size;
1323 uint8_t *cmd;
1324
1325
1326 BUG_ON(bo->resource->mem_type != VMW_PL_MOB);
1327
1328 submit_size = sizeof(*cmd3) + (readback ? sizeof(*cmd1) : sizeof(*cmd2));
1329 cmd = VMW_CMD_RESERVE(dev_priv, submit_size);
1330 if (unlikely(!cmd))
1331 return -ENOMEM;
1332
1333 if (readback) {
1334 cmd1 = (void *) cmd;
1335 cmd1->header.id = SVGA_3D_CMD_READBACK_GB_SURFACE;
1336 cmd1->header.size = sizeof(cmd1->body);
1337 cmd1->body.sid = res->id;
1338 cmd3 = (void *) &cmd1[1];
1339 } else {
1340 cmd2 = (void *) cmd;
1341 cmd2->header.id = SVGA_3D_CMD_INVALIDATE_GB_SURFACE;
1342 cmd2->header.size = sizeof(cmd2->body);
1343 cmd2->body.sid = res->id;
1344 cmd3 = (void *) &cmd2[1];
1345 }
1346
1347 cmd3->header.id = SVGA_3D_CMD_BIND_GB_SURFACE;
1348 cmd3->header.size = sizeof(cmd3->body);
1349 cmd3->body.sid = res->id;
1350 cmd3->body.mobid = SVGA3D_INVALID_ID;
1351
1352 vmw_cmd_commit(dev_priv, bytes: submit_size);
1353
1354 /*
1355 * Create a fence object and fence the backup buffer.
1356 */
1357
1358 (void) vmw_execbuf_fence_commands(NULL, dev_priv,
1359 p_fence: &fence, NULL);
1360
1361 vmw_bo_fence_single(bo: val_buf->bo, fence);
1362
1363 if (likely(fence != NULL))
1364 vmw_fence_obj_unreference(fence_p: &fence);
1365
1366 return 0;
1367}
1368
1369static int vmw_gb_surface_destroy(struct vmw_resource *res)
1370{
1371 struct vmw_private *dev_priv = res->dev_priv;
1372 struct vmw_surface *srf = vmw_res_to_srf(res);
1373 struct {
1374 SVGA3dCmdHeader header;
1375 SVGA3dCmdDestroyGBSurface body;
1376 } *cmd;
1377
1378 if (likely(res->id == -1))
1379 return 0;
1380
1381 mutex_lock(&dev_priv->binding_mutex);
1382 vmw_view_surface_list_destroy(dev_priv, view_list: &srf->view_list);
1383 vmw_binding_res_list_scrub(head: &res->binding_head);
1384
1385 cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd));
1386 if (unlikely(!cmd)) {
1387 mutex_unlock(lock: &dev_priv->binding_mutex);
1388 return -ENOMEM;
1389 }
1390
1391 cmd->header.id = SVGA_3D_CMD_DESTROY_GB_SURFACE;
1392 cmd->header.size = sizeof(cmd->body);
1393 cmd->body.sid = res->id;
1394 vmw_cmd_commit(dev_priv, bytes: sizeof(*cmd));
1395 mutex_unlock(lock: &dev_priv->binding_mutex);
1396 vmw_resource_release_id(res);
1397 vmw_fifo_resource_dec(dev_priv);
1398
1399 return 0;
1400}
1401
1402/**
1403 * vmw_gb_surface_define_ioctl - Ioctl function implementing
1404 * the user surface define functionality.
1405 *
1406 * @dev: Pointer to a struct drm_device.
1407 * @data: Pointer to data copied from / to user-space.
1408 * @file_priv: Pointer to a drm file private structure.
1409 */
1410int vmw_gb_surface_define_ioctl(struct drm_device *dev, void *data,
1411 struct drm_file *file_priv)
1412{
1413 union drm_vmw_gb_surface_create_arg *arg =
1414 (union drm_vmw_gb_surface_create_arg *)data;
1415 struct drm_vmw_gb_surface_create_rep *rep = &arg->rep;
1416 struct drm_vmw_gb_surface_create_ext_req req_ext;
1417
1418 req_ext.base = arg->req;
1419 req_ext.version = drm_vmw_gb_surface_v1;
1420 req_ext.svga3d_flags_upper_32_bits = 0;
1421 req_ext.multisample_pattern = SVGA3D_MS_PATTERN_NONE;
1422 req_ext.quality_level = SVGA3D_MS_QUALITY_NONE;
1423 req_ext.buffer_byte_stride = 0;
1424 req_ext.must_be_zero = 0;
1425
1426 return vmw_gb_surface_define_internal(dev, req: &req_ext, rep, file_priv);
1427}
1428
1429/**
1430 * vmw_gb_surface_reference_ioctl - Ioctl function implementing
1431 * the user surface reference functionality.
1432 *
1433 * @dev: Pointer to a struct drm_device.
1434 * @data: Pointer to data copied from / to user-space.
1435 * @file_priv: Pointer to a drm file private structure.
1436 */
1437int vmw_gb_surface_reference_ioctl(struct drm_device *dev, void *data,
1438 struct drm_file *file_priv)
1439{
1440 union drm_vmw_gb_surface_reference_arg *arg =
1441 (union drm_vmw_gb_surface_reference_arg *)data;
1442 struct drm_vmw_surface_arg *req = &arg->req;
1443 struct drm_vmw_gb_surface_ref_rep *rep = &arg->rep;
1444 struct drm_vmw_gb_surface_ref_ext_rep rep_ext;
1445 int ret;
1446
1447 ret = vmw_gb_surface_reference_internal(dev, req, rep: &rep_ext, file_priv);
1448
1449 if (unlikely(ret != 0))
1450 return ret;
1451
1452 rep->creq = rep_ext.creq.base;
1453 rep->crep = rep_ext.crep;
1454
1455 return ret;
1456}
1457
1458/**
1459 * vmw_gb_surface_define_ext_ioctl - Ioctl function implementing
1460 * the user surface define functionality.
1461 *
1462 * @dev: Pointer to a struct drm_device.
1463 * @data: Pointer to data copied from / to user-space.
1464 * @file_priv: Pointer to a drm file private structure.
1465 */
1466int vmw_gb_surface_define_ext_ioctl(struct drm_device *dev, void *data,
1467 struct drm_file *file_priv)
1468{
1469 union drm_vmw_gb_surface_create_ext_arg *arg =
1470 (union drm_vmw_gb_surface_create_ext_arg *)data;
1471 struct drm_vmw_gb_surface_create_ext_req *req = &arg->req;
1472 struct drm_vmw_gb_surface_create_rep *rep = &arg->rep;
1473
1474 return vmw_gb_surface_define_internal(dev, req, rep, file_priv);
1475}
1476
1477/**
1478 * vmw_gb_surface_reference_ext_ioctl - Ioctl function implementing
1479 * the user surface reference functionality.
1480 *
1481 * @dev: Pointer to a struct drm_device.
1482 * @data: Pointer to data copied from / to user-space.
1483 * @file_priv: Pointer to a drm file private structure.
1484 */
1485int vmw_gb_surface_reference_ext_ioctl(struct drm_device *dev, void *data,
1486 struct drm_file *file_priv)
1487{
1488 union drm_vmw_gb_surface_reference_ext_arg *arg =
1489 (union drm_vmw_gb_surface_reference_ext_arg *)data;
1490 struct drm_vmw_surface_arg *req = &arg->req;
1491 struct drm_vmw_gb_surface_ref_ext_rep *rep = &arg->rep;
1492
1493 return vmw_gb_surface_reference_internal(dev, req, rep, file_priv);
1494}
1495
1496/**
1497 * vmw_gb_surface_define_internal - Ioctl function implementing
1498 * the user surface define functionality.
1499 *
1500 * @dev: Pointer to a struct drm_device.
1501 * @req: Request argument from user-space.
1502 * @rep: Response argument to user-space.
1503 * @file_priv: Pointer to a drm file private structure.
1504 */
1505static int
1506vmw_gb_surface_define_internal(struct drm_device *dev,
1507 struct drm_vmw_gb_surface_create_ext_req *req,
1508 struct drm_vmw_gb_surface_create_rep *rep,
1509 struct drm_file *file_priv)
1510{
1511 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
1512 struct vmw_private *dev_priv = vmw_priv(dev);
1513 struct vmw_user_surface *user_srf;
1514 struct vmw_surface_metadata metadata = {0};
1515 struct vmw_surface *srf;
1516 struct vmw_resource *res;
1517 struct vmw_resource *tmp;
1518 int ret = 0;
1519 uint32_t backup_handle = 0;
1520 SVGA3dSurfaceAllFlags svga3d_flags_64 =
1521 SVGA3D_FLAGS_64(req->svga3d_flags_upper_32_bits,
1522 req->base.svga3d_flags);
1523
1524 /* array_size must be null for non-GL3 host. */
1525 if (req->base.array_size > 0 && !has_sm4_context(dev_priv)) {
1526 VMW_DEBUG_USER("SM4 surface not supported.\n");
1527 return -EINVAL;
1528 }
1529
1530 if (!has_sm4_1_context(dev_priv)) {
1531 if (req->svga3d_flags_upper_32_bits != 0)
1532 ret = -EINVAL;
1533
1534 if (req->base.multisample_count != 0)
1535 ret = -EINVAL;
1536
1537 if (req->multisample_pattern != SVGA3D_MS_PATTERN_NONE)
1538 ret = -EINVAL;
1539
1540 if (req->quality_level != SVGA3D_MS_QUALITY_NONE)
1541 ret = -EINVAL;
1542
1543 if (ret) {
1544 VMW_DEBUG_USER("SM4.1 surface not supported.\n");
1545 return ret;
1546 }
1547 }
1548
1549 if (req->buffer_byte_stride > 0 && !has_sm5_context(dev_priv)) {
1550 VMW_DEBUG_USER("SM5 surface not supported.\n");
1551 return -EINVAL;
1552 }
1553
1554 if ((svga3d_flags_64 & SVGA3D_SURFACE_MULTISAMPLE) &&
1555 req->base.multisample_count == 0) {
1556 VMW_DEBUG_USER("Invalid sample count.\n");
1557 return -EINVAL;
1558 }
1559
1560 if (req->base.mip_levels > DRM_VMW_MAX_MIP_LEVELS) {
1561 VMW_DEBUG_USER("Invalid mip level.\n");
1562 return -EINVAL;
1563 }
1564
1565 metadata.flags = svga3d_flags_64;
1566 metadata.format = req->base.format;
1567 metadata.mip_levels[0] = req->base.mip_levels;
1568 metadata.multisample_count = req->base.multisample_count;
1569 metadata.multisample_pattern = req->multisample_pattern;
1570 metadata.quality_level = req->quality_level;
1571 metadata.array_size = req->base.array_size;
1572 metadata.buffer_byte_stride = req->buffer_byte_stride;
1573 metadata.num_sizes = 1;
1574 metadata.base_size = req->base.base_size;
1575 metadata.scanout = req->base.drm_surface_flags &
1576 drm_vmw_surface_flag_scanout;
1577
1578 /* Define a surface based on the parameters. */
1579 ret = vmw_gb_surface_define(dev_priv, req: &metadata, srf_out: &srf);
1580 if (ret != 0) {
1581 VMW_DEBUG_USER("Failed to define surface.\n");
1582 return ret;
1583 }
1584
1585 user_srf = container_of(srf, struct vmw_user_surface, srf);
1586 if (drm_is_primary_client(file_priv))
1587 user_srf->master = drm_file_get_master(file_priv);
1588
1589 res = &user_srf->srf.res;
1590
1591 if (req->base.buffer_handle != SVGA3D_INVALID_ID) {
1592 ret = vmw_user_bo_lookup(filp: file_priv, handle: req->base.buffer_handle,
1593 out: &res->guest_memory_bo);
1594 if (ret == 0) {
1595 if (res->guest_memory_bo->is_dumb) {
1596 VMW_DEBUG_USER("Can't backup surface with a dumb buffer.\n");
1597 vmw_user_bo_unref(buf: &res->guest_memory_bo);
1598 ret = -EINVAL;
1599 goto out_unlock;
1600 } else if (res->guest_memory_bo->tbo.base.size < res->guest_memory_size) {
1601 VMW_DEBUG_USER("Surface backup buffer too small.\n");
1602 vmw_user_bo_unref(buf: &res->guest_memory_bo);
1603 ret = -EINVAL;
1604 goto out_unlock;
1605 } else {
1606 backup_handle = req->base.buffer_handle;
1607 }
1608 }
1609 } else if (req->base.drm_surface_flags &
1610 (drm_vmw_surface_flag_create_buffer |
1611 drm_vmw_surface_flag_coherent)) {
1612 ret = vmw_gem_object_create_with_handle(dev_priv, filp: file_priv,
1613 size: res->guest_memory_size,
1614 handle: &backup_handle,
1615 p_vbo: &res->guest_memory_bo);
1616 }
1617
1618 if (unlikely(ret != 0)) {
1619 vmw_resource_unreference(p_res: &res);
1620 goto out_unlock;
1621 }
1622
1623 if (req->base.drm_surface_flags & drm_vmw_surface_flag_coherent) {
1624 struct vmw_bo *backup = res->guest_memory_bo;
1625
1626 ttm_bo_reserve(bo: &backup->tbo, interruptible: false, no_wait: false, NULL);
1627 if (!res->func->dirty_alloc)
1628 ret = -EINVAL;
1629 if (!ret)
1630 ret = vmw_bo_dirty_add(vbo: backup);
1631 if (!ret) {
1632 res->coherent = true;
1633 ret = res->func->dirty_alloc(res);
1634 }
1635 ttm_bo_unreserve(bo: &backup->tbo);
1636 if (ret) {
1637 vmw_resource_unreference(p_res: &res);
1638 goto out_unlock;
1639 }
1640
1641 }
1642
1643 if (res->guest_memory_bo) {
1644 ret = vmw_bo_add_detached_resource(vbo: res->guest_memory_bo, res);
1645 if (unlikely(ret != 0)) {
1646 vmw_resource_unreference(p_res: &res);
1647 goto out_unlock;
1648 }
1649 }
1650
1651 tmp = vmw_resource_reference(res);
1652 ret = ttm_prime_object_init(tfile, size: res->guest_memory_size, prime: &user_srf->prime,
1653 VMW_RES_SURFACE,
1654 refcount_release: &vmw_user_surface_base_release);
1655
1656 if (unlikely(ret != 0)) {
1657 vmw_resource_unreference(p_res: &tmp);
1658 vmw_resource_unreference(p_res: &res);
1659 goto out_unlock;
1660 }
1661
1662 rep->handle = user_srf->prime.base.handle;
1663 rep->backup_size = res->guest_memory_size;
1664 if (res->guest_memory_bo) {
1665 rep->buffer_map_handle =
1666 drm_vma_node_offset_addr(node: &res->guest_memory_bo->tbo.base.vma_node);
1667 rep->buffer_size = res->guest_memory_bo->tbo.base.size;
1668 rep->buffer_handle = backup_handle;
1669 } else {
1670 rep->buffer_map_handle = 0;
1671 rep->buffer_size = 0;
1672 rep->buffer_handle = SVGA3D_INVALID_ID;
1673 }
1674 vmw_resource_unreference(p_res: &res);
1675
1676out_unlock:
1677 return ret;
1678}
1679
1680/**
1681 * vmw_gb_surface_reference_internal - Ioctl function implementing
1682 * the user surface reference functionality.
1683 *
1684 * @dev: Pointer to a struct drm_device.
1685 * @req: Pointer to user-space request surface arg.
1686 * @rep: Pointer to response to user-space.
1687 * @file_priv: Pointer to a drm file private structure.
1688 */
1689static int
1690vmw_gb_surface_reference_internal(struct drm_device *dev,
1691 struct drm_vmw_surface_arg *req,
1692 struct drm_vmw_gb_surface_ref_ext_rep *rep,
1693 struct drm_file *file_priv)
1694{
1695 struct vmw_private *dev_priv = vmw_priv(dev);
1696 struct vmw_surface *srf;
1697 struct vmw_user_surface *user_srf;
1698 struct vmw_surface_metadata *metadata;
1699 struct ttm_base_object *base;
1700 u32 backup_handle;
1701 int ret;
1702
1703 ret = vmw_surface_handle_reference(dev_priv, file_priv, u_handle: req->sid,
1704 handle_type: req->handle_type, base_p: &base);
1705 if (unlikely(ret != 0))
1706 return ret;
1707
1708 user_srf = container_of(base, struct vmw_user_surface, prime.base);
1709 srf = &user_srf->srf;
1710 if (!srf->res.guest_memory_bo) {
1711 DRM_ERROR("Shared GB surface is missing a backup buffer.\n");
1712 goto out_bad_resource;
1713 }
1714 metadata = &srf->metadata;
1715
1716 mutex_lock(&dev_priv->cmdbuf_mutex); /* Protect res->backup */
1717 ret = drm_gem_handle_create(file_priv, obj: &srf->res.guest_memory_bo->tbo.base,
1718 handlep: &backup_handle);
1719 mutex_unlock(lock: &dev_priv->cmdbuf_mutex);
1720 if (ret != 0) {
1721 drm_err(dev, "Wasn't able to create a backing handle for surface sid = %u.\n",
1722 req->sid);
1723 goto out_bad_resource;
1724 }
1725
1726 rep->creq.base.svga3d_flags = SVGA3D_FLAGS_LOWER_32(metadata->flags);
1727 rep->creq.base.format = metadata->format;
1728 rep->creq.base.mip_levels = metadata->mip_levels[0];
1729 rep->creq.base.drm_surface_flags = 0;
1730 rep->creq.base.multisample_count = metadata->multisample_count;
1731 rep->creq.base.autogen_filter = metadata->autogen_filter;
1732 rep->creq.base.array_size = metadata->array_size;
1733 rep->creq.base.buffer_handle = backup_handle;
1734 rep->creq.base.base_size = metadata->base_size;
1735 rep->crep.handle = user_srf->prime.base.handle;
1736 rep->crep.backup_size = srf->res.guest_memory_size;
1737 rep->crep.buffer_handle = backup_handle;
1738 rep->crep.buffer_map_handle =
1739 drm_vma_node_offset_addr(node: &srf->res.guest_memory_bo->tbo.base.vma_node);
1740 rep->crep.buffer_size = srf->res.guest_memory_bo->tbo.base.size;
1741
1742 rep->creq.version = drm_vmw_gb_surface_v1;
1743 rep->creq.svga3d_flags_upper_32_bits =
1744 SVGA3D_FLAGS_UPPER_32(metadata->flags);
1745 rep->creq.multisample_pattern = metadata->multisample_pattern;
1746 rep->creq.quality_level = metadata->quality_level;
1747 rep->creq.must_be_zero = 0;
1748
1749out_bad_resource:
1750 ttm_base_object_unref(p_base: &base);
1751
1752 return ret;
1753}
1754
1755/**
1756 * vmw_subres_dirty_add - Add a dirty region to a subresource
1757 * @dirty: The surfaces's dirty tracker.
1758 * @loc_start: The location corresponding to the start of the region.
1759 * @loc_end: The location corresponding to the end of the region.
1760 *
1761 * As we are assuming that @loc_start and @loc_end represent a sequential
1762 * range of backing store memory, if the region spans multiple lines then
1763 * regardless of the x coordinate, the full lines are dirtied.
1764 * Correspondingly if the region spans multiple z slices, then full rather
1765 * than partial z slices are dirtied.
1766 */
1767static void vmw_subres_dirty_add(struct vmw_surface_dirty *dirty,
1768 const struct vmw_surface_loc *loc_start,
1769 const struct vmw_surface_loc *loc_end)
1770{
1771 const struct vmw_surface_cache *cache = &dirty->cache;
1772 SVGA3dBox *box = &dirty->boxes[loc_start->sub_resource];
1773 u32 mip = loc_start->sub_resource % cache->num_mip_levels;
1774 const struct drm_vmw_size *size = &cache->mip[mip].size;
1775 u32 box_c2 = box->z + box->d;
1776
1777 if (WARN_ON(loc_start->sub_resource >= dirty->num_subres))
1778 return;
1779
1780 if (box->d == 0 || box->z > loc_start->z)
1781 box->z = loc_start->z;
1782 if (box_c2 < loc_end->z)
1783 box->d = loc_end->z - box->z;
1784
1785 if (loc_start->z + 1 == loc_end->z) {
1786 box_c2 = box->y + box->h;
1787 if (box->h == 0 || box->y > loc_start->y)
1788 box->y = loc_start->y;
1789 if (box_c2 < loc_end->y)
1790 box->h = loc_end->y - box->y;
1791
1792 if (loc_start->y + 1 == loc_end->y) {
1793 box_c2 = box->x + box->w;
1794 if (box->w == 0 || box->x > loc_start->x)
1795 box->x = loc_start->x;
1796 if (box_c2 < loc_end->x)
1797 box->w = loc_end->x - box->x;
1798 } else {
1799 box->x = 0;
1800 box->w = size->width;
1801 }
1802 } else {
1803 box->y = 0;
1804 box->h = size->height;
1805 box->x = 0;
1806 box->w = size->width;
1807 }
1808}
1809
1810/**
1811 * vmw_subres_dirty_full - Mark a full subresource as dirty
1812 * @dirty: The surface's dirty tracker.
1813 * @subres: The subresource
1814 */
1815static void vmw_subres_dirty_full(struct vmw_surface_dirty *dirty, u32 subres)
1816{
1817 const struct vmw_surface_cache *cache = &dirty->cache;
1818 u32 mip = subres % cache->num_mip_levels;
1819 const struct drm_vmw_size *size = &cache->mip[mip].size;
1820 SVGA3dBox *box = &dirty->boxes[subres];
1821
1822 box->x = 0;
1823 box->y = 0;
1824 box->z = 0;
1825 box->w = size->width;
1826 box->h = size->height;
1827 box->d = size->depth;
1828}
1829
1830/*
1831 * vmw_surface_tex_dirty_add_range - The dirty_add_range callback for texture
1832 * surfaces.
1833 */
1834static void vmw_surface_tex_dirty_range_add(struct vmw_resource *res,
1835 size_t start, size_t end)
1836{
1837 struct vmw_surface_dirty *dirty =
1838 (struct vmw_surface_dirty *) res->dirty;
1839 size_t backup_end = res->guest_memory_offset + res->guest_memory_size;
1840 struct vmw_surface_loc loc1, loc2;
1841 const struct vmw_surface_cache *cache;
1842
1843 start = max_t(size_t, start, res->guest_memory_offset) - res->guest_memory_offset;
1844 end = min(end, backup_end) - res->guest_memory_offset;
1845 cache = &dirty->cache;
1846 vmw_surface_get_loc(cache, loc: &loc1, offset: start);
1847 vmw_surface_get_loc(cache, loc: &loc2, offset: end - 1);
1848 vmw_surface_inc_loc(cache, loc: &loc2);
1849
1850 if (loc1.sheet != loc2.sheet) {
1851 u32 sub_res;
1852
1853 /*
1854 * Multiple multisample sheets. To do this in an optimized
1855 * fashion, compute the dirty region for each sheet and the
1856 * resulting union. Since this is not a common case, just dirty
1857 * the whole surface.
1858 */
1859 for (sub_res = 0; sub_res < dirty->num_subres; ++sub_res)
1860 vmw_subres_dirty_full(dirty, subres: sub_res);
1861 return;
1862 }
1863 if (loc1.sub_resource + 1 == loc2.sub_resource) {
1864 /* Dirty range covers a single sub-resource */
1865 vmw_subres_dirty_add(dirty, loc_start: &loc1, loc_end: &loc2);
1866 } else {
1867 /* Dirty range covers multiple sub-resources */
1868 struct vmw_surface_loc loc_min, loc_max;
1869 u32 sub_res;
1870
1871 vmw_surface_max_loc(cache, sub_resource: loc1.sub_resource, loc: &loc_max);
1872 vmw_subres_dirty_add(dirty, loc_start: &loc1, loc_end: &loc_max);
1873 vmw_surface_min_loc(cache, sub_resource: loc2.sub_resource - 1, loc: &loc_min);
1874 vmw_subres_dirty_add(dirty, loc_start: &loc_min, loc_end: &loc2);
1875 for (sub_res = loc1.sub_resource + 1;
1876 sub_res < loc2.sub_resource - 1; ++sub_res)
1877 vmw_subres_dirty_full(dirty, subres: sub_res);
1878 }
1879}
1880
1881/*
1882 * vmw_surface_tex_dirty_add_range - The dirty_add_range callback for buffer
1883 * surfaces.
1884 */
1885static void vmw_surface_buf_dirty_range_add(struct vmw_resource *res,
1886 size_t start, size_t end)
1887{
1888 struct vmw_surface_dirty *dirty =
1889 (struct vmw_surface_dirty *) res->dirty;
1890 const struct vmw_surface_cache *cache = &dirty->cache;
1891 size_t backup_end = res->guest_memory_offset + cache->mip_chain_bytes;
1892 SVGA3dBox *box = &dirty->boxes[0];
1893 u32 box_c2;
1894
1895 box->h = box->d = 1;
1896 start = max_t(size_t, start, res->guest_memory_offset) - res->guest_memory_offset;
1897 end = min(end, backup_end) - res->guest_memory_offset;
1898 box_c2 = box->x + box->w;
1899 if (box->w == 0 || box->x > start)
1900 box->x = start;
1901 if (box_c2 < end)
1902 box->w = end - box->x;
1903}
1904
1905/*
1906 * vmw_surface_tex_dirty_add_range - The dirty_add_range callback for surfaces
1907 */
1908static void vmw_surface_dirty_range_add(struct vmw_resource *res, size_t start,
1909 size_t end)
1910{
1911 struct vmw_surface *srf = vmw_res_to_srf(res);
1912
1913 if (WARN_ON(end <= res->guest_memory_offset ||
1914 start >= res->guest_memory_offset + res->guest_memory_size))
1915 return;
1916
1917 if (srf->metadata.format == SVGA3D_BUFFER)
1918 vmw_surface_buf_dirty_range_add(res, start, end);
1919 else
1920 vmw_surface_tex_dirty_range_add(res, start, end);
1921}
1922
1923/*
1924 * vmw_surface_dirty_sync - The surface's dirty_sync callback.
1925 */
1926static int vmw_surface_dirty_sync(struct vmw_resource *res)
1927{
1928 struct vmw_private *dev_priv = res->dev_priv;
1929 u32 i, num_dirty;
1930 struct vmw_surface_dirty *dirty =
1931 (struct vmw_surface_dirty *) res->dirty;
1932 size_t alloc_size;
1933 const struct vmw_surface_cache *cache = &dirty->cache;
1934 struct {
1935 SVGA3dCmdHeader header;
1936 SVGA3dCmdDXUpdateSubResource body;
1937 } *cmd1;
1938 struct {
1939 SVGA3dCmdHeader header;
1940 SVGA3dCmdUpdateGBImage body;
1941 } *cmd2;
1942 void *cmd;
1943
1944 num_dirty = 0;
1945 for (i = 0; i < dirty->num_subres; ++i) {
1946 const SVGA3dBox *box = &dirty->boxes[i];
1947
1948 if (box->d)
1949 num_dirty++;
1950 }
1951
1952 if (!num_dirty)
1953 goto out;
1954
1955 alloc_size = num_dirty * ((has_sm4_context(dev_priv)) ? sizeof(*cmd1) : sizeof(*cmd2));
1956 cmd = VMW_CMD_RESERVE(dev_priv, alloc_size);
1957 if (!cmd)
1958 return -ENOMEM;
1959
1960 cmd1 = cmd;
1961 cmd2 = cmd;
1962
1963 for (i = 0; i < dirty->num_subres; ++i) {
1964 const SVGA3dBox *box = &dirty->boxes[i];
1965
1966 if (!box->d)
1967 continue;
1968
1969 /*
1970 * DX_UPDATE_SUBRESOURCE is aware of array surfaces.
1971 * UPDATE_GB_IMAGE is not.
1972 */
1973 if (has_sm4_context(dev_priv)) {
1974 cmd1->header.id = SVGA_3D_CMD_DX_UPDATE_SUBRESOURCE;
1975 cmd1->header.size = sizeof(cmd1->body);
1976 cmd1->body.sid = res->id;
1977 cmd1->body.subResource = i;
1978 cmd1->body.box = *box;
1979 cmd1++;
1980 } else {
1981 cmd2->header.id = SVGA_3D_CMD_UPDATE_GB_IMAGE;
1982 cmd2->header.size = sizeof(cmd2->body);
1983 cmd2->body.image.sid = res->id;
1984 cmd2->body.image.face = i / cache->num_mip_levels;
1985 cmd2->body.image.mipmap = i -
1986 (cache->num_mip_levels * cmd2->body.image.face);
1987 cmd2->body.box = *box;
1988 cmd2++;
1989 }
1990
1991 }
1992 vmw_cmd_commit(dev_priv, bytes: alloc_size);
1993 out:
1994 memset(&dirty->boxes[0], 0, sizeof(dirty->boxes[0]) *
1995 dirty->num_subres);
1996
1997 return 0;
1998}
1999
2000/*
2001 * vmw_surface_dirty_alloc - The surface's dirty_alloc callback.
2002 */
2003static int vmw_surface_dirty_alloc(struct vmw_resource *res)
2004{
2005 struct vmw_surface *srf = vmw_res_to_srf(res);
2006 const struct vmw_surface_metadata *metadata = &srf->metadata;
2007 struct vmw_surface_dirty *dirty;
2008 u32 num_layers = 1;
2009 u32 num_mip;
2010 u32 num_subres;
2011 u32 num_samples;
2012 size_t dirty_size;
2013 int ret;
2014
2015 if (metadata->array_size)
2016 num_layers = metadata->array_size;
2017 else if (metadata->flags & SVGA3D_SURFACE_CUBEMAP)
2018 num_layers *= SVGA3D_MAX_SURFACE_FACES;
2019
2020 num_mip = metadata->mip_levels[0];
2021 if (!num_mip)
2022 num_mip = 1;
2023
2024 num_subres = num_layers * num_mip;
2025 dirty_size = struct_size(dirty, boxes, num_subres);
2026
2027 dirty = kvzalloc(dirty_size, GFP_KERNEL);
2028 if (!dirty) {
2029 ret = -ENOMEM;
2030 goto out_no_dirty;
2031 }
2032
2033 num_samples = max_t(u32, 1, metadata->multisample_count);
2034 ret = vmw_surface_setup_cache(size: &metadata->base_size, format: metadata->format,
2035 num_mip_levels: num_mip, num_layers, num_samples,
2036 cache: &dirty->cache);
2037 if (ret)
2038 goto out_no_cache;
2039
2040 dirty->num_subres = num_subres;
2041 res->dirty = (struct vmw_resource_dirty *) dirty;
2042
2043 return 0;
2044
2045out_no_cache:
2046 kvfree(addr: dirty);
2047out_no_dirty:
2048 return ret;
2049}
2050
2051/*
2052 * vmw_surface_dirty_free - The surface's dirty_free callback
2053 */
2054static void vmw_surface_dirty_free(struct vmw_resource *res)
2055{
2056 struct vmw_surface_dirty *dirty =
2057 (struct vmw_surface_dirty *) res->dirty;
2058
2059 kvfree(addr: dirty);
2060 res->dirty = NULL;
2061}
2062
2063/*
2064 * vmw_surface_clean - The surface's clean callback
2065 */
2066static int vmw_surface_clean(struct vmw_resource *res)
2067{
2068 struct vmw_private *dev_priv = res->dev_priv;
2069 size_t alloc_size;
2070 struct {
2071 SVGA3dCmdHeader header;
2072 SVGA3dCmdReadbackGBSurface body;
2073 } *cmd;
2074
2075 alloc_size = sizeof(*cmd);
2076 cmd = VMW_CMD_RESERVE(dev_priv, alloc_size);
2077 if (!cmd)
2078 return -ENOMEM;
2079
2080 cmd->header.id = SVGA_3D_CMD_READBACK_GB_SURFACE;
2081 cmd->header.size = sizeof(cmd->body);
2082 cmd->body.sid = res->id;
2083 vmw_cmd_commit(dev_priv, bytes: alloc_size);
2084
2085 return 0;
2086}
2087
2088/*
2089 * vmw_gb_surface_define - Define a private GB surface
2090 *
2091 * @dev_priv: Pointer to a device private.
2092 * @metadata: Metadata representing the surface to create.
2093 * @user_srf_out: allocated user_srf. Set to NULL on failure.
2094 *
2095 * GB surfaces allocated by this function will not have a user mode handle, and
2096 * thus will only be visible to vmwgfx. For optimization reasons the
2097 * surface may later be given a user mode handle by another function to make
2098 * it available to user mode drivers.
2099 */
2100int vmw_gb_surface_define(struct vmw_private *dev_priv,
2101 const struct vmw_surface_metadata *req,
2102 struct vmw_surface **srf_out)
2103{
2104 struct vmw_surface_metadata *metadata;
2105 struct vmw_user_surface *user_srf;
2106 struct vmw_surface *srf;
2107 u32 sample_count = 1;
2108 u32 num_layers = 1;
2109 int ret;
2110
2111 *srf_out = NULL;
2112
2113 if (req->scanout) {
2114 if (!vmw_surface_is_screen_target_format(format: req->format)) {
2115 VMW_DEBUG_USER("Invalid Screen Target surface format.");
2116 return -EINVAL;
2117 }
2118
2119 if (req->base_size.width > dev_priv->texture_max_width ||
2120 req->base_size.height > dev_priv->texture_max_height) {
2121 VMW_DEBUG_USER("%ux%u\n, exceed max surface size %ux%u",
2122 req->base_size.width,
2123 req->base_size.height,
2124 dev_priv->texture_max_width,
2125 dev_priv->texture_max_height);
2126 return -EINVAL;
2127 }
2128 } else {
2129 const SVGA3dSurfaceDesc *desc =
2130 vmw_surface_get_desc(format: req->format);
2131
2132 if (desc->blockDesc == SVGA3DBLOCKDESC_NONE) {
2133 VMW_DEBUG_USER("Invalid surface format.\n");
2134 return -EINVAL;
2135 }
2136 }
2137
2138 if (req->autogen_filter != SVGA3D_TEX_FILTER_NONE)
2139 return -EINVAL;
2140
2141 if (req->num_sizes != 1)
2142 return -EINVAL;
2143
2144 if (req->sizes != NULL)
2145 return -EINVAL;
2146
2147 user_srf = kzalloc(sizeof(*user_srf), GFP_KERNEL);
2148 if (unlikely(!user_srf)) {
2149 ret = -ENOMEM;
2150 goto out_unlock;
2151 }
2152
2153 *srf_out = &user_srf->srf;
2154
2155 srf = &user_srf->srf;
2156 srf->metadata = *req;
2157 srf->offsets = NULL;
2158
2159 metadata = &srf->metadata;
2160
2161 if (metadata->array_size)
2162 num_layers = req->array_size;
2163 else if (metadata->flags & SVGA3D_SURFACE_CUBEMAP)
2164 num_layers = SVGA3D_MAX_SURFACE_FACES;
2165
2166 if (metadata->flags & SVGA3D_SURFACE_MULTISAMPLE)
2167 sample_count = metadata->multisample_count;
2168
2169 srf->res.guest_memory_size =
2170 vmw_surface_get_serialized_size_extended(
2171 format: metadata->format,
2172 base_level_size: metadata->base_size,
2173 num_mip_levels: metadata->mip_levels[0],
2174 num_layers,
2175 num_samples: sample_count);
2176
2177 if (metadata->flags & SVGA3D_SURFACE_BIND_STREAM_OUTPUT)
2178 srf->res.guest_memory_size += sizeof(SVGA3dDXSOState);
2179
2180 /*
2181 * Don't set SVGA3D_SURFACE_SCREENTARGET flag for a scanout surface with
2182 * size greater than STDU max width/height. This is really a workaround
2183 * to support creation of big framebuffer requested by some user-space
2184 * for whole topology. That big framebuffer won't really be used for
2185 * binding with screen target as during prepare_fb a separate surface is
2186 * created so it's safe to ignore SVGA3D_SURFACE_SCREENTARGET flag.
2187 */
2188 if (dev_priv->active_display_unit == vmw_du_screen_target &&
2189 metadata->scanout &&
2190 metadata->base_size.width <= dev_priv->stdu_max_width &&
2191 metadata->base_size.height <= dev_priv->stdu_max_height)
2192 metadata->flags |= SVGA3D_SURFACE_SCREENTARGET;
2193
2194 /*
2195 * From this point, the generic resource management functions
2196 * destroy the object on failure.
2197 */
2198 ret = vmw_surface_init(dev_priv, srf, res_free: vmw_user_surface_free);
2199
2200 return ret;
2201
2202out_unlock:
2203 return ret;
2204}
2205
2206static SVGA3dSurfaceFormat vmw_format_bpp_to_svga(struct vmw_private *vmw,
2207 int bpp)
2208{
2209 switch (bpp) {
2210 case 8: /* DRM_FORMAT_C8 */
2211 return SVGA3D_P8;
2212 case 16: /* DRM_FORMAT_RGB565 */
2213 return SVGA3D_R5G6B5;
2214 case 32: /* DRM_FORMAT_XRGB8888 */
2215 if (has_sm4_context(dev_priv: vmw))
2216 return SVGA3D_B8G8R8X8_UNORM;
2217 return SVGA3D_X8R8G8B8;
2218 default:
2219 drm_warn(&vmw->drm, "Unsupported format bpp: %d\n", bpp);
2220 return SVGA3D_X8R8G8B8;
2221 }
2222}
2223
2224/**
2225 * vmw_dumb_create - Create a dumb kms buffer
2226 *
2227 * @file_priv: Pointer to a struct drm_file identifying the caller.
2228 * @dev: Pointer to the drm device.
2229 * @args: Pointer to a struct drm_mode_create_dumb structure
2230 * Return: Zero on success, negative error code on failure.
2231 *
2232 * This is a driver callback for the core drm create_dumb functionality.
2233 * Note that this is very similar to the vmw_bo_alloc ioctl, except
2234 * that the arguments have a different format.
2235 */
2236int vmw_dumb_create(struct drm_file *file_priv,
2237 struct drm_device *dev,
2238 struct drm_mode_create_dumb *args)
2239{
2240 struct vmw_private *dev_priv = vmw_priv(dev);
2241 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
2242 struct vmw_bo *vbo = NULL;
2243 struct vmw_resource *res = NULL;
2244 union drm_vmw_gb_surface_create_ext_arg arg = { 0 };
2245 struct drm_vmw_gb_surface_create_ext_req *req = &arg.req;
2246 int ret;
2247 struct drm_vmw_size drm_size = {
2248 .width = args->width,
2249 .height = args->height,
2250 .depth = 1,
2251 };
2252 SVGA3dSurfaceFormat format = vmw_format_bpp_to_svga(vmw: dev_priv, bpp: args->bpp);
2253 const struct SVGA3dSurfaceDesc *desc = vmw_surface_get_desc(format);
2254 SVGA3dSurfaceAllFlags flags = SVGA3D_SURFACE_HINT_TEXTURE |
2255 SVGA3D_SURFACE_HINT_RENDERTARGET |
2256 SVGA3D_SURFACE_SCREENTARGET;
2257
2258 if (vmw_surface_is_dx_screen_target_format(format)) {
2259 flags |= SVGA3D_SURFACE_BIND_SHADER_RESOURCE |
2260 SVGA3D_SURFACE_BIND_RENDER_TARGET;
2261 }
2262
2263 /*
2264 * Without mob support we're just going to use raw memory buffer
2265 * because we wouldn't be able to support full surface coherency
2266 * without mobs. There also no reason to support surface coherency
2267 * without 3d (i.e. gpu usage on the host) because then all the
2268 * contents is going to be rendered guest side.
2269 */
2270 if (!dev_priv->has_mob || !vmw_supports_3d(dev_priv)) {
2271 ret = drm_mode_size_dumb(dev, args, hw_pitch_align: 0, hw_size_align: 0);
2272 if (ret)
2273 return ret;
2274
2275 ret = vmw_gem_object_create_with_handle(dev_priv, filp: file_priv,
2276 size: args->size, handle: &args->handle,
2277 p_vbo: &vbo);
2278 /* drop reference from allocate - handle holds it now */
2279 drm_gem_object_put(obj: &vbo->tbo.base);
2280 return ret;
2281 }
2282
2283 req->version = drm_vmw_gb_surface_v1;
2284 req->multisample_pattern = SVGA3D_MS_PATTERN_NONE;
2285 req->quality_level = SVGA3D_MS_QUALITY_NONE;
2286 req->buffer_byte_stride = 0;
2287 req->must_be_zero = 0;
2288 req->base.svga3d_flags = SVGA3D_FLAGS_LOWER_32(flags);
2289 req->svga3d_flags_upper_32_bits = SVGA3D_FLAGS_UPPER_32(flags);
2290 req->base.format = (uint32_t)format;
2291 req->base.drm_surface_flags = drm_vmw_surface_flag_scanout;
2292 req->base.drm_surface_flags |= drm_vmw_surface_flag_shareable;
2293 req->base.drm_surface_flags |= drm_vmw_surface_flag_create_buffer;
2294 req->base.drm_surface_flags |= drm_vmw_surface_flag_coherent;
2295 req->base.base_size.width = args->width;
2296 req->base.base_size.height = args->height;
2297 req->base.base_size.depth = 1;
2298 req->base.array_size = 0;
2299 req->base.mip_levels = 1;
2300 req->base.multisample_count = 0;
2301 req->base.buffer_handle = SVGA3D_INVALID_ID;
2302 req->base.autogen_filter = SVGA3D_TEX_FILTER_NONE;
2303 ret = vmw_gb_surface_define_ext_ioctl(dev, data: &arg, file_priv);
2304 if (ret) {
2305 drm_warn(dev, "Unable to create a dumb buffer\n");
2306 return ret;
2307 }
2308
2309 args->handle = arg.rep.buffer_handle;
2310 args->size = arg.rep.buffer_size;
2311 args->pitch = vmw_surface_calculate_pitch(desc, size: &drm_size);
2312
2313 ret = vmw_user_resource_lookup_handle(dev_priv, tfile, handle: arg.rep.handle,
2314 converter: user_surface_converter,
2315 p_res: &res);
2316 if (ret) {
2317 drm_err(dev, "Created resource handle doesn't exist!\n");
2318 goto err;
2319 }
2320
2321 vbo = res->guest_memory_bo;
2322 vbo->is_dumb = true;
2323 vbo->dumb_surface = vmw_res_to_srf(res);
2324 drm_gem_object_put(obj: &vbo->tbo.base);
2325 /*
2326 * Unset the user surface dtor since this in not actually exposed
2327 * to userspace. The suface is owned via the dumb_buffer's GEM handle
2328 */
2329 struct vmw_user_surface *usurf = container_of(vbo->dumb_surface,
2330 struct vmw_user_surface, srf);
2331 usurf->prime.base.refcount_release = NULL;
2332err:
2333 if (res)
2334 vmw_resource_unreference(p_res: &res);
2335
2336 ttm_ref_object_base_unref(tfile, key: arg.rep.handle);
2337
2338 return ret;
2339}
2340

source code of linux/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c