1/* SPDX-License-Identifier: MIT */
2/*
3 * Copyright © 2023 Intel Corporation
4 */
5
6#ifndef _XE_TLB_INVAL_TYPES_H_
7#define _XE_TLB_INVAL_TYPES_H_
8
9#include <linux/workqueue.h>
10#include <linux/dma-fence.h>
11
12struct xe_tlb_inval;
13
14/** struct xe_tlb_inval_ops - TLB invalidation ops (backend) */
15struct xe_tlb_inval_ops {
16 /**
17 * @all: Invalidate all TLBs
18 * @tlb_inval: TLB invalidation client
19 * @seqno: Seqno of TLB invalidation
20 *
21 * Return 0 on success, -ECANCELED if backend is mid-reset, error on
22 * failure
23 */
24 int (*all)(struct xe_tlb_inval *tlb_inval, u32 seqno);
25
26 /**
27 * @ggtt: Invalidate global translation TLBs
28 * @tlb_inval: TLB invalidation client
29 * @seqno: Seqno of TLB invalidation
30 *
31 * Return 0 on success, -ECANCELED if backend is mid-reset, error on
32 * failure
33 */
34 int (*ggtt)(struct xe_tlb_inval *tlb_inval, u32 seqno);
35
36 /**
37 * @ppgtt: Invalidate per-process translation TLBs
38 * @tlb_inval: TLB invalidation client
39 * @seqno: Seqno of TLB invalidation
40 * @start: Start address
41 * @end: End address
42 * @asid: Address space ID
43 *
44 * Return 0 on success, -ECANCELED if backend is mid-reset, error on
45 * failure
46 */
47 int (*ppgtt)(struct xe_tlb_inval *tlb_inval, u32 seqno, u64 start,
48 u64 end, u32 asid);
49
50 /**
51 * @initialized: Backend is initialized
52 * @tlb_inval: TLB invalidation client
53 *
54 * Return: True if back is initialized, False otherwise
55 */
56 bool (*initialized)(struct xe_tlb_inval *tlb_inval);
57
58 /**
59 * @flush: Flush pending TLB invalidations
60 * @tlb_inval: TLB invalidation client
61 */
62 void (*flush)(struct xe_tlb_inval *tlb_inval);
63
64 /**
65 * @timeout_delay: Timeout delay for TLB invalidation
66 * @tlb_inval: TLB invalidation client
67 *
68 * Return: Timeout delay for TLB invalidation in jiffies
69 */
70 long (*timeout_delay)(struct xe_tlb_inval *tlb_inval);
71};
72
73/** struct xe_tlb_inval - TLB invalidation client (frontend) */
74struct xe_tlb_inval {
75 /** @private: Backend private pointer */
76 void *private;
77 /** @xe: Pointer to Xe device */
78 struct xe_device *xe;
79 /** @ops: TLB invalidation ops */
80 const struct xe_tlb_inval_ops *ops;
81 /** @tlb_inval.seqno: TLB invalidation seqno, protected by CT lock */
82#define TLB_INVALIDATION_SEQNO_MAX 0x100000
83 int seqno;
84 /** @tlb_invalidation.seqno_lock: protects @tlb_invalidation.seqno */
85 struct mutex seqno_lock;
86 /**
87 * @seqno_recv: last received TLB invalidation seqno, protected by
88 * CT lock
89 */
90 int seqno_recv;
91 /**
92 * @pending_fences: list of pending fences waiting TLB invaliations,
93 * protected CT lock
94 */
95 struct list_head pending_fences;
96 /**
97 * @pending_lock: protects @pending_fences and updating @seqno_recv.
98 */
99 spinlock_t pending_lock;
100 /**
101 * @fence_tdr: schedules a delayed call to xe_tlb_fence_timeout after
102 * the timeout interval is over.
103 */
104 struct delayed_work fence_tdr;
105 /** @job_wq: schedules TLB invalidation jobs */
106 struct workqueue_struct *job_wq;
107 /** @tlb_inval.lock: protects TLB invalidation fences */
108 spinlock_t lock;
109};
110
111/**
112 * struct xe_tlb_inval_fence - TLB invalidation fence
113 *
114 * Optionally passed to xe_tlb_inval* functions and will be signaled upon TLB
115 * invalidation completion.
116 */
117struct xe_tlb_inval_fence {
118 /** @base: dma fence base */
119 struct dma_fence base;
120 /** @tlb_inval: TLB invalidation client which fence belong to */
121 struct xe_tlb_inval *tlb_inval;
122 /** @link: link into list of pending tlb fences */
123 struct list_head link;
124 /** @seqno: seqno of TLB invalidation to signal fence one */
125 int seqno;
126 /** @inval_time: time of TLB invalidation */
127 ktime_t inval_time;
128};
129
130#endif
131

source code of linux/drivers/gpu/drm/xe/xe_tlb_inval_types.h