| 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
| 2 | #ifndef _LINUX_HUGE_MM_H |
| 3 | #define _LINUX_HUGE_MM_H |
| 4 | |
| 5 | #include <linux/mm_types.h> |
| 6 | |
| 7 | #include <linux/fs.h> /* only for vma_is_dax() */ |
| 8 | #include <linux/kobject.h> |
| 9 | |
| 10 | vm_fault_t do_huge_pmd_anonymous_page(struct vm_fault *vmf); |
| 11 | int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm, |
| 12 | pmd_t *dst_pmd, pmd_t *src_pmd, unsigned long addr, |
| 13 | struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma); |
| 14 | bool huge_pmd_set_accessed(struct vm_fault *vmf); |
| 15 | int copy_huge_pud(struct mm_struct *dst_mm, struct mm_struct *src_mm, |
| 16 | pud_t *dst_pud, pud_t *src_pud, unsigned long addr, |
| 17 | struct vm_area_struct *vma); |
| 18 | |
| 19 | #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD |
| 20 | void huge_pud_set_accessed(struct vm_fault *vmf, pud_t orig_pud); |
| 21 | #else |
| 22 | static inline void huge_pud_set_accessed(struct vm_fault *vmf, pud_t orig_pud) |
| 23 | { |
| 24 | } |
| 25 | #endif |
| 26 | |
| 27 | vm_fault_t do_huge_pmd_wp_page(struct vm_fault *vmf); |
| 28 | bool madvise_free_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma, |
| 29 | pmd_t *pmd, unsigned long addr, unsigned long next); |
| 30 | int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma, pmd_t *pmd, |
| 31 | unsigned long addr); |
| 32 | int zap_huge_pud(struct mmu_gather *tlb, struct vm_area_struct *vma, pud_t *pud, |
| 33 | unsigned long addr); |
| 34 | bool move_huge_pmd(struct vm_area_struct *vma, unsigned long old_addr, |
| 35 | unsigned long new_addr, pmd_t *old_pmd, pmd_t *new_pmd); |
| 36 | int change_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma, |
| 37 | pmd_t *pmd, unsigned long addr, pgprot_t newprot, |
| 38 | unsigned long cp_flags); |
| 39 | |
| 40 | vm_fault_t vmf_insert_pfn_pmd(struct vm_fault *vmf, unsigned long pfn, |
| 41 | bool write); |
| 42 | vm_fault_t vmf_insert_pfn_pud(struct vm_fault *vmf, unsigned long pfn, |
| 43 | bool write); |
| 44 | vm_fault_t vmf_insert_folio_pmd(struct vm_fault *vmf, struct folio *folio, |
| 45 | bool write); |
| 46 | vm_fault_t vmf_insert_folio_pud(struct vm_fault *vmf, struct folio *folio, |
| 47 | bool write); |
| 48 | |
| 49 | enum transparent_hugepage_flag { |
| 50 | TRANSPARENT_HUGEPAGE_UNSUPPORTED, |
| 51 | TRANSPARENT_HUGEPAGE_FLAG, |
| 52 | TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG, |
| 53 | TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, |
| 54 | TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, |
| 55 | TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, |
| 56 | TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, |
| 57 | TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG, |
| 58 | TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG, |
| 59 | }; |
| 60 | |
| 61 | struct kobject; |
| 62 | struct kobj_attribute; |
| 63 | |
| 64 | ssize_t single_hugepage_flag_store(struct kobject *kobj, |
| 65 | struct kobj_attribute *attr, |
| 66 | const char *buf, size_t count, |
| 67 | enum transparent_hugepage_flag flag); |
| 68 | ssize_t single_hugepage_flag_show(struct kobject *kobj, |
| 69 | struct kobj_attribute *attr, char *buf, |
| 70 | enum transparent_hugepage_flag flag); |
| 71 | extern struct kobj_attribute shmem_enabled_attr; |
| 72 | extern struct kobj_attribute thpsize_shmem_enabled_attr; |
| 73 | |
| 74 | /* |
| 75 | * Mask of all large folio orders supported for anonymous THP; all orders up to |
| 76 | * and including PMD_ORDER, except order-0 (which is not "huge") and order-1 |
| 77 | * (which is a limitation of the THP implementation). |
| 78 | */ |
| 79 | #define THP_ORDERS_ALL_ANON ((BIT(PMD_ORDER + 1) - 1) & ~(BIT(0) | BIT(1))) |
| 80 | |
| 81 | /* |
| 82 | * Mask of all large folio orders supported for file THP. Folios in a DAX |
| 83 | * file is never split and the MAX_PAGECACHE_ORDER limit does not apply to |
| 84 | * it. Same to PFNMAPs where there's neither page* nor pagecache. |
| 85 | */ |
| 86 | #define THP_ORDERS_ALL_SPECIAL \ |
| 87 | (BIT(PMD_ORDER) | BIT(PUD_ORDER)) |
| 88 | #define THP_ORDERS_ALL_FILE_DEFAULT \ |
| 89 | ((BIT(MAX_PAGECACHE_ORDER + 1) - 1) & ~BIT(0)) |
| 90 | |
| 91 | /* |
| 92 | * Mask of all large folio orders supported for THP. |
| 93 | */ |
| 94 | #define THP_ORDERS_ALL \ |
| 95 | (THP_ORDERS_ALL_ANON | THP_ORDERS_ALL_SPECIAL | THP_ORDERS_ALL_FILE_DEFAULT) |
| 96 | |
| 97 | enum tva_type { |
| 98 | TVA_SMAPS, /* Exposing "THPeligible:" in smaps. */ |
| 99 | TVA_PAGEFAULT, /* Serving a page fault. */ |
| 100 | TVA_KHUGEPAGED, /* Khugepaged collapse. */ |
| 101 | TVA_FORCED_COLLAPSE, /* Forced collapse (e.g. MADV_COLLAPSE). */ |
| 102 | }; |
| 103 | |
| 104 | #define thp_vma_allowable_order(vma, vm_flags, type, order) \ |
| 105 | (!!thp_vma_allowable_orders(vma, vm_flags, type, BIT(order))) |
| 106 | |
| 107 | #define split_folio(f) split_folio_to_list(f, NULL) |
| 108 | |
| 109 | #ifdef CONFIG_PGTABLE_HAS_HUGE_LEAVES |
| 110 | #define HPAGE_PMD_SHIFT PMD_SHIFT |
| 111 | #define HPAGE_PUD_SHIFT PUD_SHIFT |
| 112 | #else |
| 113 | #define HPAGE_PMD_SHIFT ({ BUILD_BUG(); 0; }) |
| 114 | #define HPAGE_PUD_SHIFT ({ BUILD_BUG(); 0; }) |
| 115 | #endif |
| 116 | |
| 117 | #define HPAGE_PMD_ORDER (HPAGE_PMD_SHIFT-PAGE_SHIFT) |
| 118 | #define HPAGE_PMD_NR (1<<HPAGE_PMD_ORDER) |
| 119 | #define HPAGE_PMD_MASK (~(HPAGE_PMD_SIZE - 1)) |
| 120 | #define HPAGE_PMD_SIZE ((1UL) << HPAGE_PMD_SHIFT) |
| 121 | |
| 122 | #define HPAGE_PUD_ORDER (HPAGE_PUD_SHIFT-PAGE_SHIFT) |
| 123 | #define HPAGE_PUD_NR (1<<HPAGE_PUD_ORDER) |
| 124 | #define HPAGE_PUD_MASK (~(HPAGE_PUD_SIZE - 1)) |
| 125 | #define HPAGE_PUD_SIZE ((1UL) << HPAGE_PUD_SHIFT) |
| 126 | |
| 127 | enum mthp_stat_item { |
| 128 | MTHP_STAT_ANON_FAULT_ALLOC, |
| 129 | MTHP_STAT_ANON_FAULT_FALLBACK, |
| 130 | MTHP_STAT_ANON_FAULT_FALLBACK_CHARGE, |
| 131 | MTHP_STAT_ZSWPOUT, |
| 132 | MTHP_STAT_SWPIN, |
| 133 | MTHP_STAT_SWPIN_FALLBACK, |
| 134 | MTHP_STAT_SWPIN_FALLBACK_CHARGE, |
| 135 | MTHP_STAT_SWPOUT, |
| 136 | MTHP_STAT_SWPOUT_FALLBACK, |
| 137 | MTHP_STAT_SHMEM_ALLOC, |
| 138 | MTHP_STAT_SHMEM_FALLBACK, |
| 139 | MTHP_STAT_SHMEM_FALLBACK_CHARGE, |
| 140 | MTHP_STAT_SPLIT, |
| 141 | MTHP_STAT_SPLIT_FAILED, |
| 142 | MTHP_STAT_SPLIT_DEFERRED, |
| 143 | MTHP_STAT_NR_ANON, |
| 144 | MTHP_STAT_NR_ANON_PARTIALLY_MAPPED, |
| 145 | __MTHP_STAT_COUNT |
| 146 | }; |
| 147 | |
| 148 | #if defined(CONFIG_TRANSPARENT_HUGEPAGE) && defined(CONFIG_SYSFS) |
| 149 | struct mthp_stat { |
| 150 | unsigned long stats[ilog2(MAX_PTRS_PER_PTE) + 1][__MTHP_STAT_COUNT]; |
| 151 | }; |
| 152 | |
| 153 | DECLARE_PER_CPU(struct mthp_stat, mthp_stats); |
| 154 | |
| 155 | static inline void mod_mthp_stat(int order, enum mthp_stat_item item, int delta) |
| 156 | { |
| 157 | if (order <= 0 || order > PMD_ORDER) |
| 158 | return; |
| 159 | |
| 160 | this_cpu_add(mthp_stats.stats[order][item], delta); |
| 161 | } |
| 162 | |
| 163 | static inline void count_mthp_stat(int order, enum mthp_stat_item item) |
| 164 | { |
| 165 | mod_mthp_stat(order, item, delta: 1); |
| 166 | } |
| 167 | |
| 168 | #else |
| 169 | static inline void mod_mthp_stat(int order, enum mthp_stat_item item, int delta) |
| 170 | { |
| 171 | } |
| 172 | |
| 173 | static inline void count_mthp_stat(int order, enum mthp_stat_item item) |
| 174 | { |
| 175 | } |
| 176 | #endif |
| 177 | |
| 178 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE |
| 179 | |
| 180 | extern unsigned long transparent_hugepage_flags; |
| 181 | extern unsigned long huge_anon_orders_always; |
| 182 | extern unsigned long huge_anon_orders_madvise; |
| 183 | extern unsigned long huge_anon_orders_inherit; |
| 184 | |
| 185 | static inline bool hugepage_global_enabled(void) |
| 186 | { |
| 187 | return transparent_hugepage_flags & |
| 188 | ((1<<TRANSPARENT_HUGEPAGE_FLAG) | |
| 189 | (1<<TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG)); |
| 190 | } |
| 191 | |
| 192 | static inline bool hugepage_global_always(void) |
| 193 | { |
| 194 | return transparent_hugepage_flags & |
| 195 | (1<<TRANSPARENT_HUGEPAGE_FLAG); |
| 196 | } |
| 197 | |
| 198 | static inline int highest_order(unsigned long orders) |
| 199 | { |
| 200 | return fls_long(l: orders) - 1; |
| 201 | } |
| 202 | |
| 203 | static inline int next_order(unsigned long *orders, int prev) |
| 204 | { |
| 205 | *orders &= ~BIT(prev); |
| 206 | return highest_order(orders: *orders); |
| 207 | } |
| 208 | |
| 209 | /* |
| 210 | * Do the below checks: |
| 211 | * - For file vma, check if the linear page offset of vma is |
| 212 | * order-aligned within the file. The hugepage is |
| 213 | * guaranteed to be order-aligned within the file, but we must |
| 214 | * check that the order-aligned addresses in the VMA map to |
| 215 | * order-aligned offsets within the file, else the hugepage will |
| 216 | * not be mappable. |
| 217 | * - For all vmas, check if the haddr is in an aligned hugepage |
| 218 | * area. |
| 219 | */ |
| 220 | static inline bool thp_vma_suitable_order(struct vm_area_struct *vma, |
| 221 | unsigned long addr, int order) |
| 222 | { |
| 223 | unsigned long hpage_size = PAGE_SIZE << order; |
| 224 | unsigned long haddr; |
| 225 | |
| 226 | /* Don't have to check pgoff for anonymous vma */ |
| 227 | if (!vma_is_anonymous(vma)) { |
| 228 | if (!IS_ALIGNED((vma->vm_start >> PAGE_SHIFT) - vma->vm_pgoff, |
| 229 | hpage_size >> PAGE_SHIFT)) |
| 230 | return false; |
| 231 | } |
| 232 | |
| 233 | haddr = ALIGN_DOWN(addr, hpage_size); |
| 234 | |
| 235 | if (haddr < vma->vm_start || haddr + hpage_size > vma->vm_end) |
| 236 | return false; |
| 237 | return true; |
| 238 | } |
| 239 | |
| 240 | /* |
| 241 | * Filter the bitfield of input orders to the ones suitable for use in the vma. |
| 242 | * See thp_vma_suitable_order(). |
| 243 | * All orders that pass the checks are returned as a bitfield. |
| 244 | */ |
| 245 | static inline unsigned long thp_vma_suitable_orders(struct vm_area_struct *vma, |
| 246 | unsigned long addr, unsigned long orders) |
| 247 | { |
| 248 | int order; |
| 249 | |
| 250 | /* |
| 251 | * Iterate over orders, highest to lowest, removing orders that don't |
| 252 | * meet alignment requirements from the set. Exit loop at first order |
| 253 | * that meets requirements, since all lower orders must also meet |
| 254 | * requirements. |
| 255 | */ |
| 256 | |
| 257 | order = highest_order(orders); |
| 258 | |
| 259 | while (orders) { |
| 260 | if (thp_vma_suitable_order(vma, addr, order)) |
| 261 | break; |
| 262 | order = next_order(orders: &orders, prev: order); |
| 263 | } |
| 264 | |
| 265 | return orders; |
| 266 | } |
| 267 | |
| 268 | unsigned long __thp_vma_allowable_orders(struct vm_area_struct *vma, |
| 269 | vm_flags_t vm_flags, |
| 270 | enum tva_type type, |
| 271 | unsigned long orders); |
| 272 | |
| 273 | /** |
| 274 | * thp_vma_allowable_orders - determine hugepage orders that are allowed for vma |
| 275 | * @vma: the vm area to check |
| 276 | * @vm_flags: use these vm_flags instead of vma->vm_flags |
| 277 | * @type: TVA type |
| 278 | * @orders: bitfield of all orders to consider |
| 279 | * |
| 280 | * Calculates the intersection of the requested hugepage orders and the allowed |
| 281 | * hugepage orders for the provided vma. Permitted orders are encoded as a set |
| 282 | * bit at the corresponding bit position (bit-2 corresponds to order-2, bit-3 |
| 283 | * corresponds to order-3, etc). Order-0 is never considered a hugepage order. |
| 284 | * |
| 285 | * Return: bitfield of orders allowed for hugepage in the vma. 0 if no hugepage |
| 286 | * orders are allowed. |
| 287 | */ |
| 288 | static inline |
| 289 | unsigned long thp_vma_allowable_orders(struct vm_area_struct *vma, |
| 290 | vm_flags_t vm_flags, |
| 291 | enum tva_type type, |
| 292 | unsigned long orders) |
| 293 | { |
| 294 | /* |
| 295 | * Optimization to check if required orders are enabled early. Only |
| 296 | * forced collapse ignores sysfs configs. |
| 297 | */ |
| 298 | if (type != TVA_FORCED_COLLAPSE && vma_is_anonymous(vma)) { |
| 299 | unsigned long mask = READ_ONCE(huge_anon_orders_always); |
| 300 | |
| 301 | if (vm_flags & VM_HUGEPAGE) |
| 302 | mask |= READ_ONCE(huge_anon_orders_madvise); |
| 303 | if (hugepage_global_always() || |
| 304 | ((vm_flags & VM_HUGEPAGE) && hugepage_global_enabled())) |
| 305 | mask |= READ_ONCE(huge_anon_orders_inherit); |
| 306 | |
| 307 | orders &= mask; |
| 308 | if (!orders) |
| 309 | return 0; |
| 310 | } |
| 311 | |
| 312 | return __thp_vma_allowable_orders(vma, vm_flags, type, orders); |
| 313 | } |
| 314 | |
| 315 | struct thpsize { |
| 316 | struct kobject kobj; |
| 317 | struct list_head node; |
| 318 | int order; |
| 319 | }; |
| 320 | |
| 321 | #define to_thpsize(kobj) container_of(kobj, struct thpsize, kobj) |
| 322 | |
| 323 | #define transparent_hugepage_use_zero_page() \ |
| 324 | (transparent_hugepage_flags & \ |
| 325 | (1<<TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG)) |
| 326 | |
| 327 | /* |
| 328 | * Check whether THPs are explicitly disabled for this VMA, for example, |
| 329 | * through madvise or prctl. |
| 330 | */ |
| 331 | static inline bool vma_thp_disabled(struct vm_area_struct *vma, |
| 332 | vm_flags_t vm_flags, bool forced_collapse) |
| 333 | { |
| 334 | /* Are THPs disabled for this VMA? */ |
| 335 | if (vm_flags & VM_NOHUGEPAGE) |
| 336 | return true; |
| 337 | /* Are THPs disabled for all VMAs in the whole process? */ |
| 338 | if (mm_flags_test(MMF_DISABLE_THP_COMPLETELY, mm: vma->vm_mm)) |
| 339 | return true; |
| 340 | /* |
| 341 | * Are THPs disabled only for VMAs where we didn't get an explicit |
| 342 | * advise to use them? |
| 343 | */ |
| 344 | if (vm_flags & VM_HUGEPAGE) |
| 345 | return false; |
| 346 | /* |
| 347 | * Forcing a collapse (e.g., madv_collapse), is a clear advice to |
| 348 | * use THPs. |
| 349 | */ |
| 350 | if (forced_collapse) |
| 351 | return false; |
| 352 | return mm_flags_test(MMF_DISABLE_THP_EXCEPT_ADVISED, mm: vma->vm_mm); |
| 353 | } |
| 354 | |
| 355 | static inline bool thp_disabled_by_hw(void) |
| 356 | { |
| 357 | /* If the hardware/firmware marked hugepage support disabled. */ |
| 358 | return transparent_hugepage_flags & (1 << TRANSPARENT_HUGEPAGE_UNSUPPORTED); |
| 359 | } |
| 360 | |
| 361 | unsigned long thp_get_unmapped_area(struct file *filp, unsigned long addr, |
| 362 | unsigned long len, unsigned long pgoff, unsigned long flags); |
| 363 | unsigned long thp_get_unmapped_area_vmflags(struct file *filp, unsigned long addr, |
| 364 | unsigned long len, unsigned long pgoff, unsigned long flags, |
| 365 | vm_flags_t vm_flags); |
| 366 | |
| 367 | enum split_type { |
| 368 | SPLIT_TYPE_UNIFORM, |
| 369 | SPLIT_TYPE_NON_UNIFORM, |
| 370 | }; |
| 371 | |
| 372 | int __split_huge_page_to_list_to_order(struct page *page, struct list_head *list, |
| 373 | unsigned int new_order); |
| 374 | int folio_split_unmapped(struct folio *folio, unsigned int new_order); |
| 375 | unsigned int min_order_for_split(struct folio *folio); |
| 376 | int split_folio_to_list(struct folio *folio, struct list_head *list); |
| 377 | int folio_check_splittable(struct folio *folio, unsigned int new_order, |
| 378 | enum split_type split_type); |
| 379 | int folio_split(struct folio *folio, unsigned int new_order, struct page *page, |
| 380 | struct list_head *list); |
| 381 | |
| 382 | static inline int split_huge_page_to_list_to_order(struct page *page, struct list_head *list, |
| 383 | unsigned int new_order) |
| 384 | { |
| 385 | return __split_huge_page_to_list_to_order(page, list, new_order); |
| 386 | } |
| 387 | static inline int split_huge_page_to_order(struct page *page, unsigned int new_order) |
| 388 | { |
| 389 | return split_huge_page_to_list_to_order(page, NULL, new_order); |
| 390 | } |
| 391 | |
| 392 | /** |
| 393 | * try_folio_split_to_order() - try to split a @folio at @page to @new_order |
| 394 | * using non uniform split. |
| 395 | * @folio: folio to be split |
| 396 | * @page: split to @new_order at the given page |
| 397 | * @new_order: the target split order |
| 398 | * |
| 399 | * Try to split a @folio at @page using non uniform split to @new_order, if |
| 400 | * non uniform split is not supported, fall back to uniform split. After-split |
| 401 | * folios are put back to LRU list. Use min_order_for_split() to get the lower |
| 402 | * bound of @new_order. |
| 403 | * |
| 404 | * Return: 0 - split is successful, otherwise split failed. |
| 405 | */ |
| 406 | static inline int try_folio_split_to_order(struct folio *folio, |
| 407 | struct page *page, unsigned int new_order) |
| 408 | { |
| 409 | if (folio_check_splittable(folio, new_order, split_type: SPLIT_TYPE_NON_UNIFORM)) |
| 410 | return split_huge_page_to_order(page: &folio->page, new_order); |
| 411 | return folio_split(folio, new_order, page, NULL); |
| 412 | } |
| 413 | static inline int split_huge_page(struct page *page) |
| 414 | { |
| 415 | return split_huge_page_to_list_to_order(page, NULL, new_order: 0); |
| 416 | } |
| 417 | void deferred_split_folio(struct folio *folio, bool partially_mapped); |
| 418 | #ifdef CONFIG_MEMCG |
| 419 | void reparent_deferred_split_queue(struct mem_cgroup *memcg); |
| 420 | #endif |
| 421 | |
| 422 | void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, |
| 423 | unsigned long address, bool freeze); |
| 424 | |
| 425 | /** |
| 426 | * pmd_is_huge() - Is this PMD either a huge PMD entry or a software leaf entry? |
| 427 | * @pmd: The PMD to check. |
| 428 | * |
| 429 | * A huge PMD entry is a non-empty entry which is present and marked huge or a |
| 430 | * software leaf entry. This check be performed without the appropriate locks |
| 431 | * held, in which case the condition should be rechecked after they are |
| 432 | * acquired. |
| 433 | * |
| 434 | * Returns: true if this PMD is huge, false otherwise. |
| 435 | */ |
| 436 | static inline bool pmd_is_huge(pmd_t pmd) |
| 437 | { |
| 438 | if (pmd_present(pmd)) { |
| 439 | return pmd_trans_huge(pmd); |
| 440 | } else if (!pmd_none(pmd)) { |
| 441 | /* |
| 442 | * Non-present PMDs must be valid huge non-present entries. We |
| 443 | * cannot assert that here due to header dependency issues. |
| 444 | */ |
| 445 | return true; |
| 446 | } |
| 447 | |
| 448 | return false; |
| 449 | } |
| 450 | |
| 451 | #define split_huge_pmd(__vma, __pmd, __address) \ |
| 452 | do { \ |
| 453 | pmd_t *____pmd = (__pmd); \ |
| 454 | if (pmd_is_huge(*____pmd)) \ |
| 455 | __split_huge_pmd(__vma, __pmd, __address, \ |
| 456 | false); \ |
| 457 | } while (0) |
| 458 | |
| 459 | void split_huge_pmd_address(struct vm_area_struct *vma, unsigned long address, |
| 460 | bool freeze); |
| 461 | |
| 462 | void __split_huge_pud(struct vm_area_struct *vma, pud_t *pud, |
| 463 | unsigned long address); |
| 464 | |
| 465 | #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD |
| 466 | int change_huge_pud(struct mmu_gather *tlb, struct vm_area_struct *vma, |
| 467 | pud_t *pudp, unsigned long addr, pgprot_t newprot, |
| 468 | unsigned long cp_flags); |
| 469 | #else |
| 470 | static inline int |
| 471 | change_huge_pud(struct mmu_gather *tlb, struct vm_area_struct *vma, |
| 472 | pud_t *pudp, unsigned long addr, pgprot_t newprot, |
| 473 | unsigned long cp_flags) { return 0; } |
| 474 | #endif |
| 475 | |
| 476 | #define split_huge_pud(__vma, __pud, __address) \ |
| 477 | do { \ |
| 478 | pud_t *____pud = (__pud); \ |
| 479 | if (pud_trans_huge(*____pud)) \ |
| 480 | __split_huge_pud(__vma, __pud, __address); \ |
| 481 | } while (0) |
| 482 | |
| 483 | int hugepage_madvise(struct vm_area_struct *vma, vm_flags_t *vm_flags, |
| 484 | int advice); |
| 485 | int madvise_collapse(struct vm_area_struct *vma, unsigned long start, |
| 486 | unsigned long end, bool *lock_dropped); |
| 487 | void vma_adjust_trans_huge(struct vm_area_struct *vma, unsigned long start, |
| 488 | unsigned long end, struct vm_area_struct *next); |
| 489 | spinlock_t *__pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma); |
| 490 | spinlock_t *__pud_trans_huge_lock(pud_t *pud, struct vm_area_struct *vma); |
| 491 | |
| 492 | /* mmap_lock must be held on entry */ |
| 493 | static inline spinlock_t *pmd_trans_huge_lock(pmd_t *pmd, |
| 494 | struct vm_area_struct *vma) |
| 495 | { |
| 496 | if (pmd_is_huge(pmd: *pmd)) |
| 497 | return __pmd_trans_huge_lock(pmd, vma); |
| 498 | |
| 499 | return NULL; |
| 500 | } |
| 501 | static inline spinlock_t *pud_trans_huge_lock(pud_t *pud, |
| 502 | struct vm_area_struct *vma) |
| 503 | { |
| 504 | if (pud_trans_huge(pud: *pud)) |
| 505 | return __pud_trans_huge_lock(pud, vma); |
| 506 | else |
| 507 | return NULL; |
| 508 | } |
| 509 | |
| 510 | /** |
| 511 | * folio_test_pmd_mappable - Can we map this folio with a PMD? |
| 512 | * @folio: The folio to test |
| 513 | * |
| 514 | * Return: true - @folio can be mapped, false - @folio cannot be mapped. |
| 515 | */ |
| 516 | static inline bool folio_test_pmd_mappable(struct folio *folio) |
| 517 | { |
| 518 | return folio_order(folio) >= HPAGE_PMD_ORDER; |
| 519 | } |
| 520 | |
| 521 | vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf); |
| 522 | |
| 523 | vm_fault_t do_huge_pmd_device_private(struct vm_fault *vmf); |
| 524 | |
| 525 | extern struct folio *huge_zero_folio; |
| 526 | extern unsigned long huge_zero_pfn; |
| 527 | |
| 528 | static inline bool is_huge_zero_folio(const struct folio *folio) |
| 529 | { |
| 530 | VM_WARN_ON_ONCE(!folio); |
| 531 | |
| 532 | return READ_ONCE(huge_zero_folio) == folio; |
| 533 | } |
| 534 | |
| 535 | static inline bool is_huge_zero_pfn(unsigned long pfn) |
| 536 | { |
| 537 | return READ_ONCE(huge_zero_pfn) == (pfn & ~(HPAGE_PMD_NR - 1)); |
| 538 | } |
| 539 | |
| 540 | static inline bool is_huge_zero_pmd(pmd_t pmd) |
| 541 | { |
| 542 | return pmd_present(pmd) && is_huge_zero_pfn(pfn: pmd_pfn(pmd)); |
| 543 | } |
| 544 | |
| 545 | struct folio *mm_get_huge_zero_folio(struct mm_struct *mm); |
| 546 | void mm_put_huge_zero_folio(struct mm_struct *mm); |
| 547 | |
| 548 | static inline struct folio *get_persistent_huge_zero_folio(void) |
| 549 | { |
| 550 | if (!IS_ENABLED(CONFIG_PERSISTENT_HUGE_ZERO_FOLIO)) |
| 551 | return NULL; |
| 552 | |
| 553 | if (unlikely(!huge_zero_folio)) |
| 554 | return NULL; |
| 555 | |
| 556 | return huge_zero_folio; |
| 557 | } |
| 558 | |
| 559 | static inline bool thp_migration_supported(void) |
| 560 | { |
| 561 | return IS_ENABLED(CONFIG_ARCH_ENABLE_THP_MIGRATION); |
| 562 | } |
| 563 | |
| 564 | void split_huge_pmd_locked(struct vm_area_struct *vma, unsigned long address, |
| 565 | pmd_t *pmd, bool freeze); |
| 566 | bool unmap_huge_pmd_locked(struct vm_area_struct *vma, unsigned long addr, |
| 567 | pmd_t *pmdp, struct folio *folio); |
| 568 | void map_anon_folio_pmd_nopf(struct folio *folio, pmd_t *pmd, |
| 569 | struct vm_area_struct *vma, unsigned long haddr); |
| 570 | |
| 571 | #else /* CONFIG_TRANSPARENT_HUGEPAGE */ |
| 572 | |
| 573 | static inline bool folio_test_pmd_mappable(struct folio *folio) |
| 574 | { |
| 575 | return false; |
| 576 | } |
| 577 | |
| 578 | static inline bool thp_vma_suitable_order(struct vm_area_struct *vma, |
| 579 | unsigned long addr, int order) |
| 580 | { |
| 581 | return false; |
| 582 | } |
| 583 | |
| 584 | static inline unsigned long thp_vma_suitable_orders(struct vm_area_struct *vma, |
| 585 | unsigned long addr, unsigned long orders) |
| 586 | { |
| 587 | return 0; |
| 588 | } |
| 589 | |
| 590 | static inline unsigned long thp_vma_allowable_orders(struct vm_area_struct *vma, |
| 591 | vm_flags_t vm_flags, |
| 592 | enum tva_type type, |
| 593 | unsigned long orders) |
| 594 | { |
| 595 | return 0; |
| 596 | } |
| 597 | |
| 598 | #define transparent_hugepage_flags 0UL |
| 599 | |
| 600 | #define thp_get_unmapped_area NULL |
| 601 | |
| 602 | static inline unsigned long |
| 603 | thp_get_unmapped_area_vmflags(struct file *filp, unsigned long addr, |
| 604 | unsigned long len, unsigned long pgoff, |
| 605 | unsigned long flags, vm_flags_t vm_flags) |
| 606 | { |
| 607 | return 0; |
| 608 | } |
| 609 | |
| 610 | static inline bool |
| 611 | can_split_folio(struct folio *folio, int caller_pins, int *pextra_pins) |
| 612 | { |
| 613 | return false; |
| 614 | } |
| 615 | static inline int |
| 616 | split_huge_page_to_list_to_order(struct page *page, struct list_head *list, |
| 617 | unsigned int new_order) |
| 618 | { |
| 619 | VM_WARN_ON_ONCE_PAGE(1, page); |
| 620 | return -EINVAL; |
| 621 | } |
| 622 | static inline int split_huge_page_to_order(struct page *page, unsigned int new_order) |
| 623 | { |
| 624 | VM_WARN_ON_ONCE_PAGE(1, page); |
| 625 | return -EINVAL; |
| 626 | } |
| 627 | static inline int split_huge_page(struct page *page) |
| 628 | { |
| 629 | VM_WARN_ON_ONCE_PAGE(1, page); |
| 630 | return -EINVAL; |
| 631 | } |
| 632 | |
| 633 | static inline unsigned int min_order_for_split(struct folio *folio) |
| 634 | { |
| 635 | VM_WARN_ON_ONCE_FOLIO(1, folio); |
| 636 | return 0; |
| 637 | } |
| 638 | |
| 639 | static inline int split_folio_to_list(struct folio *folio, struct list_head *list) |
| 640 | { |
| 641 | VM_WARN_ON_ONCE_FOLIO(1, folio); |
| 642 | return -EINVAL; |
| 643 | } |
| 644 | |
| 645 | static inline int try_folio_split_to_order(struct folio *folio, |
| 646 | struct page *page, unsigned int new_order) |
| 647 | { |
| 648 | VM_WARN_ON_ONCE_FOLIO(1, folio); |
| 649 | return -EINVAL; |
| 650 | } |
| 651 | |
| 652 | static inline void deferred_split_folio(struct folio *folio, bool partially_mapped) {} |
| 653 | static inline void reparent_deferred_split_queue(struct mem_cgroup *memcg) {} |
| 654 | #define split_huge_pmd(__vma, __pmd, __address) \ |
| 655 | do { } while (0) |
| 656 | |
| 657 | static inline void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, |
| 658 | unsigned long address, bool freeze) {} |
| 659 | static inline void split_huge_pmd_address(struct vm_area_struct *vma, |
| 660 | unsigned long address, bool freeze) {} |
| 661 | static inline void split_huge_pmd_locked(struct vm_area_struct *vma, |
| 662 | unsigned long address, pmd_t *pmd, |
| 663 | bool freeze) {} |
| 664 | |
| 665 | static inline bool unmap_huge_pmd_locked(struct vm_area_struct *vma, |
| 666 | unsigned long addr, pmd_t *pmdp, |
| 667 | struct folio *folio) |
| 668 | { |
| 669 | return false; |
| 670 | } |
| 671 | |
| 672 | #define split_huge_pud(__vma, __pmd, __address) \ |
| 673 | do { } while (0) |
| 674 | |
| 675 | static inline int hugepage_madvise(struct vm_area_struct *vma, |
| 676 | vm_flags_t *vm_flags, int advice) |
| 677 | { |
| 678 | return -EINVAL; |
| 679 | } |
| 680 | |
| 681 | static inline int madvise_collapse(struct vm_area_struct *vma, |
| 682 | unsigned long start, |
| 683 | unsigned long end, bool *lock_dropped) |
| 684 | { |
| 685 | return -EINVAL; |
| 686 | } |
| 687 | |
| 688 | static inline void vma_adjust_trans_huge(struct vm_area_struct *vma, |
| 689 | unsigned long start, |
| 690 | unsigned long end, |
| 691 | struct vm_area_struct *next) |
| 692 | { |
| 693 | } |
| 694 | static inline spinlock_t *pmd_trans_huge_lock(pmd_t *pmd, |
| 695 | struct vm_area_struct *vma) |
| 696 | { |
| 697 | return NULL; |
| 698 | } |
| 699 | static inline spinlock_t *pud_trans_huge_lock(pud_t *pud, |
| 700 | struct vm_area_struct *vma) |
| 701 | { |
| 702 | return NULL; |
| 703 | } |
| 704 | |
| 705 | static inline vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf) |
| 706 | { |
| 707 | return 0; |
| 708 | } |
| 709 | |
| 710 | static inline vm_fault_t do_huge_pmd_device_private(struct vm_fault *vmf) |
| 711 | { |
| 712 | return 0; |
| 713 | } |
| 714 | |
| 715 | static inline bool is_huge_zero_folio(const struct folio *folio) |
| 716 | { |
| 717 | return false; |
| 718 | } |
| 719 | |
| 720 | static inline bool is_huge_zero_pfn(unsigned long pfn) |
| 721 | { |
| 722 | return false; |
| 723 | } |
| 724 | |
| 725 | static inline bool is_huge_zero_pmd(pmd_t pmd) |
| 726 | { |
| 727 | return false; |
| 728 | } |
| 729 | |
| 730 | static inline void mm_put_huge_zero_folio(struct mm_struct *mm) |
| 731 | { |
| 732 | return; |
| 733 | } |
| 734 | |
| 735 | static inline bool thp_migration_supported(void) |
| 736 | { |
| 737 | return false; |
| 738 | } |
| 739 | |
| 740 | static inline int highest_order(unsigned long orders) |
| 741 | { |
| 742 | return 0; |
| 743 | } |
| 744 | |
| 745 | static inline int next_order(unsigned long *orders, int prev) |
| 746 | { |
| 747 | return 0; |
| 748 | } |
| 749 | |
| 750 | static inline void __split_huge_pud(struct vm_area_struct *vma, pud_t *pud, |
| 751 | unsigned long address) |
| 752 | { |
| 753 | } |
| 754 | |
| 755 | static inline int change_huge_pud(struct mmu_gather *tlb, |
| 756 | struct vm_area_struct *vma, pud_t *pudp, |
| 757 | unsigned long addr, pgprot_t newprot, |
| 758 | unsigned long cp_flags) |
| 759 | { |
| 760 | return 0; |
| 761 | } |
| 762 | |
| 763 | static inline struct folio *get_persistent_huge_zero_folio(void) |
| 764 | { |
| 765 | return NULL; |
| 766 | } |
| 767 | |
| 768 | static inline bool pmd_is_huge(pmd_t pmd) |
| 769 | { |
| 770 | return false; |
| 771 | } |
| 772 | #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ |
| 773 | |
| 774 | static inline int split_folio_to_list_to_order(struct folio *folio, |
| 775 | struct list_head *list, int new_order) |
| 776 | { |
| 777 | return split_huge_page_to_list_to_order(page: &folio->page, list, new_order); |
| 778 | } |
| 779 | |
| 780 | static inline int split_folio_to_order(struct folio *folio, int new_order) |
| 781 | { |
| 782 | return split_folio_to_list_to_order(folio, NULL, new_order); |
| 783 | } |
| 784 | |
| 785 | /** |
| 786 | * largest_zero_folio - Get the largest zero size folio available |
| 787 | * |
| 788 | * This function shall be used when mm_get_huge_zero_folio() cannot be |
| 789 | * used as there is no appropriate mm lifetime to tie the huge zero folio |
| 790 | * from the caller. |
| 791 | * |
| 792 | * Deduce the size of the folio with folio_size instead of assuming the |
| 793 | * folio size. |
| 794 | * |
| 795 | * Return: pointer to PMD sized zero folio if CONFIG_PERSISTENT_HUGE_ZERO_FOLIO |
| 796 | * is enabled or a single page sized zero folio |
| 797 | */ |
| 798 | static inline struct folio *largest_zero_folio(void) |
| 799 | { |
| 800 | struct folio *folio = get_persistent_huge_zero_folio(); |
| 801 | |
| 802 | if (folio) |
| 803 | return folio; |
| 804 | |
| 805 | return page_folio(ZERO_PAGE(0)); |
| 806 | } |
| 807 | #endif /* _LINUX_HUGE_MM_H */ |
| 808 | |