| 1 | /* SPDX-License-Identifier: MIT */ |
| 2 | #ifndef _DRM_PAGEMAP_H_ |
| 3 | #define _DRM_PAGEMAP_H_ |
| 4 | |
| 5 | #include <linux/dma-direction.h> |
| 6 | #include <linux/hmm.h> |
| 7 | #include <linux/types.h> |
| 8 | |
| 9 | #define NR_PAGES(order) (1U << (order)) |
| 10 | |
| 11 | struct dma_fence; |
| 12 | struct drm_pagemap; |
| 13 | struct drm_pagemap_zdd; |
| 14 | struct device; |
| 15 | |
| 16 | /** |
| 17 | * enum drm_interconnect_protocol - Used to identify an interconnect protocol. |
| 18 | * |
| 19 | * @DRM_INTERCONNECT_SYSTEM: DMA map is system pages |
| 20 | * @DRM_INTERCONNECT_DRIVER: DMA map is driver defined |
| 21 | */ |
| 22 | enum drm_interconnect_protocol { |
| 23 | DRM_INTERCONNECT_SYSTEM, |
| 24 | DRM_INTERCONNECT_DRIVER, |
| 25 | /* A driver can add private values beyond DRM_INTERCONNECT_DRIVER */ |
| 26 | }; |
| 27 | |
| 28 | /** |
| 29 | * struct drm_pagemap_addr - Address representation. |
| 30 | * @addr: The dma address or driver-defined address for driver private interconnects. |
| 31 | * @proto: The interconnect protocol. |
| 32 | * @order: The page order of the device mapping. (Size is PAGE_SIZE << order). |
| 33 | * @dir: The DMA direction. |
| 34 | * |
| 35 | * Note: There is room for improvement here. We should be able to pack into |
| 36 | * 64 bits. |
| 37 | */ |
| 38 | struct drm_pagemap_addr { |
| 39 | dma_addr_t addr; |
| 40 | u64 proto : 54; |
| 41 | u64 order : 8; |
| 42 | u64 dir : 2; |
| 43 | }; |
| 44 | |
| 45 | /** |
| 46 | * drm_pagemap_addr_encode() - Encode a dma address with metadata |
| 47 | * @addr: The dma address or driver-defined address for driver private interconnects. |
| 48 | * @proto: The interconnect protocol. |
| 49 | * @order: The page order of the dma mapping. (Size is PAGE_SIZE << order). |
| 50 | * @dir: The DMA direction. |
| 51 | * |
| 52 | * Return: A struct drm_pagemap_addr encoding the above information. |
| 53 | */ |
| 54 | static inline struct drm_pagemap_addr |
| 55 | drm_pagemap_addr_encode(dma_addr_t addr, |
| 56 | enum drm_interconnect_protocol proto, |
| 57 | unsigned int order, |
| 58 | enum dma_data_direction dir) |
| 59 | { |
| 60 | return (struct drm_pagemap_addr) { |
| 61 | .addr = addr, |
| 62 | .proto = proto, |
| 63 | .order = order, |
| 64 | .dir = dir, |
| 65 | }; |
| 66 | } |
| 67 | |
| 68 | /** |
| 69 | * struct drm_pagemap_ops: Ops for a drm-pagemap. |
| 70 | */ |
| 71 | struct drm_pagemap_ops { |
| 72 | /** |
| 73 | * @device_map: Map for device access or provide a virtual address suitable for |
| 74 | * |
| 75 | * @dpagemap: The struct drm_pagemap for the page. |
| 76 | * @dev: The device mapper. |
| 77 | * @page: The page to map. |
| 78 | * @order: The page order of the device mapping. (Size is PAGE_SIZE << order). |
| 79 | * @dir: The transfer direction. |
| 80 | */ |
| 81 | struct drm_pagemap_addr (*device_map)(struct drm_pagemap *dpagemap, |
| 82 | struct device *dev, |
| 83 | struct page *page, |
| 84 | unsigned int order, |
| 85 | enum dma_data_direction dir); |
| 86 | |
| 87 | /** |
| 88 | * @device_unmap: Unmap a device address previously obtained using @device_map. |
| 89 | * |
| 90 | * @dpagemap: The struct drm_pagemap for the mapping. |
| 91 | * @dev: The device unmapper. |
| 92 | * @addr: The device address obtained when mapping. |
| 93 | */ |
| 94 | void (*device_unmap)(struct drm_pagemap *dpagemap, |
| 95 | struct device *dev, |
| 96 | struct drm_pagemap_addr addr); |
| 97 | |
| 98 | /** |
| 99 | * @populate_mm: Populate part of the mm with @dpagemap memory, |
| 100 | * migrating existing data. |
| 101 | * @dpagemap: The struct drm_pagemap managing the memory. |
| 102 | * @start: The virtual start address in @mm |
| 103 | * @end: The virtual end address in @mm |
| 104 | * @mm: Pointer to a live mm. The caller must have an mmget() |
| 105 | * reference. |
| 106 | * |
| 107 | * The caller will have the mm lock at least in read mode. |
| 108 | * Note that there is no guarantee that the memory is resident |
| 109 | * after the function returns, it's best effort only. |
| 110 | * When the mm is not using the memory anymore, |
| 111 | * it will be released. The struct drm_pagemap might have a |
| 112 | * mechanism in place to reclaim the memory and the data will |
| 113 | * then be migrated. Typically to system memory. |
| 114 | * The implementation should hold sufficient runtime power- |
| 115 | * references while pages are used in an address space and |
| 116 | * should ideally guard against hardware device unbind in |
| 117 | * a way such that device pages are migrated back to system |
| 118 | * followed by device page removal. The implementation should |
| 119 | * return -ENODEV after device removal. |
| 120 | * |
| 121 | * Return: 0 if successful. Negative error code on error. |
| 122 | */ |
| 123 | int (*populate_mm)(struct drm_pagemap *dpagemap, |
| 124 | unsigned long start, unsigned long end, |
| 125 | struct mm_struct *mm, |
| 126 | unsigned long timeslice_ms); |
| 127 | }; |
| 128 | |
| 129 | /** |
| 130 | * struct drm_pagemap: Additional information for a struct dev_pagemap |
| 131 | * used for device p2p handshaking. |
| 132 | * @ops: The struct drm_pagemap_ops. |
| 133 | * @dev: The struct drevice owning the device-private memory. |
| 134 | */ |
| 135 | struct drm_pagemap { |
| 136 | const struct drm_pagemap_ops *ops; |
| 137 | struct device *dev; |
| 138 | }; |
| 139 | |
| 140 | struct drm_pagemap_devmem; |
| 141 | |
| 142 | /** |
| 143 | * struct drm_pagemap_devmem_ops - Operations structure for GPU SVM device memory |
| 144 | * |
| 145 | * This structure defines the operations for GPU Shared Virtual Memory (SVM) |
| 146 | * device memory. These operations are provided by the GPU driver to manage device memory |
| 147 | * allocations and perform operations such as migration between device memory and system |
| 148 | * RAM. |
| 149 | */ |
| 150 | struct drm_pagemap_devmem_ops { |
| 151 | /** |
| 152 | * @devmem_release: Release device memory allocation (optional) |
| 153 | * @devmem_allocation: device memory allocation |
| 154 | * |
| 155 | * Release device memory allocation and drop a reference to device |
| 156 | * memory allocation. |
| 157 | */ |
| 158 | void (*devmem_release)(struct drm_pagemap_devmem *devmem_allocation); |
| 159 | |
| 160 | /** |
| 161 | * @populate_devmem_pfn: Populate device memory PFN (required for migration) |
| 162 | * @devmem_allocation: device memory allocation |
| 163 | * @npages: Number of pages to populate |
| 164 | * @pfn: Array of page frame numbers to populate |
| 165 | * |
| 166 | * Populate device memory page frame numbers (PFN). |
| 167 | * |
| 168 | * Return: 0 on success, a negative error code on failure. |
| 169 | */ |
| 170 | int (*populate_devmem_pfn)(struct drm_pagemap_devmem *devmem_allocation, |
| 171 | unsigned long npages, unsigned long *pfn); |
| 172 | |
| 173 | /** |
| 174 | * @copy_to_devmem: Copy to device memory (required for migration) |
| 175 | * @pages: Pointer to array of device memory pages (destination) |
| 176 | * @pagemap_addr: Pointer to array of DMA information (source) |
| 177 | * @npages: Number of pages to copy |
| 178 | * @pre_migrate_fence: dma-fence to wait for before migration start. |
| 179 | * May be NULL. |
| 180 | * |
| 181 | * Copy pages to device memory. If the order of a @pagemap_addr entry |
| 182 | * is greater than 0, the entry is populated but subsequent entries |
| 183 | * within the range of that order are not populated. |
| 184 | * |
| 185 | * Return: 0 on success, a negative error code on failure. |
| 186 | */ |
| 187 | int (*copy_to_devmem)(struct page **pages, |
| 188 | struct drm_pagemap_addr *pagemap_addr, |
| 189 | unsigned long npages, |
| 190 | struct dma_fence *pre_migrate_fence); |
| 191 | |
| 192 | /** |
| 193 | * @copy_to_ram: Copy to system RAM (required for migration) |
| 194 | * @pages: Pointer to array of device memory pages (source) |
| 195 | * @pagemap_addr: Pointer to array of DMA information (destination) |
| 196 | * @npages: Number of pages to copy |
| 197 | * @pre_migrate_fence: dma-fence to wait for before migration start. |
| 198 | * May be NULL. |
| 199 | * |
| 200 | * Copy pages to system RAM. If the order of a @pagemap_addr entry |
| 201 | * is greater than 0, the entry is populated but subsequent entries |
| 202 | * within the range of that order are not populated. |
| 203 | * |
| 204 | * Return: 0 on success, a negative error code on failure. |
| 205 | */ |
| 206 | int (*copy_to_ram)(struct page **pages, |
| 207 | struct drm_pagemap_addr *pagemap_addr, |
| 208 | unsigned long npages, |
| 209 | struct dma_fence *pre_migrate_fence); |
| 210 | }; |
| 211 | |
| 212 | #if IS_ENABLED(CONFIG_ZONE_DEVICE) |
| 213 | |
| 214 | struct drm_pagemap *drm_pagemap_page_to_dpagemap(struct page *page); |
| 215 | |
| 216 | #else |
| 217 | |
| 218 | static inline struct drm_pagemap *drm_pagemap_page_to_dpagemap(struct page *page) |
| 219 | { |
| 220 | return NULL; |
| 221 | } |
| 222 | |
| 223 | #endif /* IS_ENABLED(CONFIG_ZONE_DEVICE) */ |
| 224 | |
| 225 | /** |
| 226 | * struct drm_pagemap_devmem - Structure representing a GPU SVM device memory allocation |
| 227 | * |
| 228 | * @dev: Pointer to the device structure which device memory allocation belongs to |
| 229 | * @mm: Pointer to the mm_struct for the address space |
| 230 | * @detached: device memory allocations is detached from device pages |
| 231 | * @ops: Pointer to the operations structure for GPU SVM device memory |
| 232 | * @dpagemap: The struct drm_pagemap of the pages this allocation belongs to. |
| 233 | * @size: Size of device memory allocation |
| 234 | * @timeslice_expiration: Timeslice expiration in jiffies |
| 235 | * @pre_migrate_fence: Fence to wait for or pipeline behind before migration starts. |
| 236 | * (May be NULL). |
| 237 | */ |
| 238 | struct drm_pagemap_devmem { |
| 239 | struct device *dev; |
| 240 | struct mm_struct *mm; |
| 241 | struct completion detached; |
| 242 | const struct drm_pagemap_devmem_ops *ops; |
| 243 | struct drm_pagemap *dpagemap; |
| 244 | size_t size; |
| 245 | u64 timeslice_expiration; |
| 246 | struct dma_fence *pre_migrate_fence; |
| 247 | }; |
| 248 | |
| 249 | #if IS_ENABLED(CONFIG_ZONE_DEVICE) |
| 250 | |
| 251 | int drm_pagemap_migrate_to_devmem(struct drm_pagemap_devmem *devmem_allocation, |
| 252 | struct mm_struct *mm, |
| 253 | unsigned long start, unsigned long end, |
| 254 | unsigned long timeslice_ms, |
| 255 | void *pgmap_owner); |
| 256 | |
| 257 | int drm_pagemap_evict_to_ram(struct drm_pagemap_devmem *devmem_allocation); |
| 258 | |
| 259 | const struct dev_pagemap_ops *drm_pagemap_pagemap_ops_get(void); |
| 260 | |
| 261 | void drm_pagemap_devmem_init(struct drm_pagemap_devmem *devmem_allocation, |
| 262 | struct device *dev, struct mm_struct *mm, |
| 263 | const struct drm_pagemap_devmem_ops *ops, |
| 264 | struct drm_pagemap *dpagemap, size_t size, |
| 265 | struct dma_fence *pre_migrate_fence); |
| 266 | |
| 267 | int drm_pagemap_populate_mm(struct drm_pagemap *dpagemap, |
| 268 | unsigned long start, unsigned long end, |
| 269 | struct mm_struct *mm, |
| 270 | unsigned long timeslice_ms); |
| 271 | |
| 272 | #endif /* IS_ENABLED(CONFIG_ZONE_DEVICE) */ |
| 273 | |
| 274 | #endif |
| 275 | |