| 1 | // SPDX-License-Identifier: GPL-2.0 |
| 2 | #include <linux/pagewalk.h> |
| 3 | #include <linux/mm_inline.h> |
| 4 | #include <linux/hugetlb.h> |
| 5 | #include <linux/huge_mm.h> |
| 6 | #include <linux/mount.h> |
| 7 | #include <linux/ksm.h> |
| 8 | #include <linux/seq_file.h> |
| 9 | #include <linux/highmem.h> |
| 10 | #include <linux/ptrace.h> |
| 11 | #include <linux/slab.h> |
| 12 | #include <linux/pagemap.h> |
| 13 | #include <linux/mempolicy.h> |
| 14 | #include <linux/rmap.h> |
| 15 | #include <linux/swap.h> |
| 16 | #include <linux/sched/mm.h> |
| 17 | #include <linux/leafops.h> |
| 18 | #include <linux/mmu_notifier.h> |
| 19 | #include <linux/page_idle.h> |
| 20 | #include <linux/shmem_fs.h> |
| 21 | #include <linux/uaccess.h> |
| 22 | #include <linux/pkeys.h> |
| 23 | #include <linux/minmax.h> |
| 24 | #include <linux/overflow.h> |
| 25 | #include <linux/buildid.h> |
| 26 | |
| 27 | #include <asm/elf.h> |
| 28 | #include <asm/tlb.h> |
| 29 | #include <asm/tlbflush.h> |
| 30 | #include "internal.h" |
| 31 | |
| 32 | #define SENTINEL_VMA_END -1 |
| 33 | #define SENTINEL_VMA_GATE -2 |
| 34 | |
| 35 | #define SEQ_PUT_DEC(str, val) \ |
| 36 | seq_put_decimal_ull_width(m, str, (val) << (PAGE_SHIFT-10), 8) |
| 37 | void task_mem(struct seq_file *m, struct mm_struct *mm) |
| 38 | { |
| 39 | unsigned long text, lib, swap, anon, file, shmem; |
| 40 | unsigned long hiwater_vm, total_vm, , ; |
| 41 | |
| 42 | anon = get_mm_counter_sum(mm, member: MM_ANONPAGES); |
| 43 | file = get_mm_counter_sum(mm, member: MM_FILEPAGES); |
| 44 | shmem = get_mm_counter_sum(mm, member: MM_SHMEMPAGES); |
| 45 | |
| 46 | /* |
| 47 | * Note: to minimize their overhead, mm maintains hiwater_vm and |
| 48 | * hiwater_rss only when about to *lower* total_vm or rss. Any |
| 49 | * collector of these hiwater stats must therefore get total_vm |
| 50 | * and rss too, which will usually be the higher. Barriers? not |
| 51 | * worth the effort, such snapshots can always be inconsistent. |
| 52 | */ |
| 53 | hiwater_vm = total_vm = mm->total_vm; |
| 54 | if (hiwater_vm < mm->hiwater_vm) |
| 55 | hiwater_vm = mm->hiwater_vm; |
| 56 | hiwater_rss = total_rss = anon + file + shmem; |
| 57 | if (hiwater_rss < mm->hiwater_rss) |
| 58 | hiwater_rss = mm->hiwater_rss; |
| 59 | |
| 60 | /* split executable areas between text and lib */ |
| 61 | text = PAGE_ALIGN(mm->end_code) - (mm->start_code & PAGE_MASK); |
| 62 | text = min(text, mm->exec_vm << PAGE_SHIFT); |
| 63 | lib = (mm->exec_vm << PAGE_SHIFT) - text; |
| 64 | |
| 65 | swap = get_mm_counter_sum(mm, member: MM_SWAPENTS); |
| 66 | SEQ_PUT_DEC("VmPeak:\t" , hiwater_vm); |
| 67 | SEQ_PUT_DEC(" kB\nVmSize:\t" , total_vm); |
| 68 | SEQ_PUT_DEC(" kB\nVmLck:\t" , mm->locked_vm); |
| 69 | SEQ_PUT_DEC(" kB\nVmPin:\t" , atomic64_read(&mm->pinned_vm)); |
| 70 | SEQ_PUT_DEC(" kB\nVmHWM:\t" , hiwater_rss); |
| 71 | SEQ_PUT_DEC(" kB\nVmRSS:\t" , total_rss); |
| 72 | SEQ_PUT_DEC(" kB\nRssAnon:\t" , anon); |
| 73 | SEQ_PUT_DEC(" kB\nRssFile:\t" , file); |
| 74 | SEQ_PUT_DEC(" kB\nRssShmem:\t" , shmem); |
| 75 | SEQ_PUT_DEC(" kB\nVmData:\t" , mm->data_vm); |
| 76 | SEQ_PUT_DEC(" kB\nVmStk:\t" , mm->stack_vm); |
| 77 | seq_put_decimal_ull_width(m, |
| 78 | delimiter: " kB\nVmExe:\t" , num: text >> 10, width: 8); |
| 79 | seq_put_decimal_ull_width(m, |
| 80 | delimiter: " kB\nVmLib:\t" , num: lib >> 10, width: 8); |
| 81 | seq_put_decimal_ull_width(m, |
| 82 | delimiter: " kB\nVmPTE:\t" , num: mm_pgtables_bytes(mm) >> 10, width: 8); |
| 83 | SEQ_PUT_DEC(" kB\nVmSwap:\t" , swap); |
| 84 | seq_puts(m, s: " kB\n" ); |
| 85 | hugetlb_report_usage(m, mm); |
| 86 | } |
| 87 | #undef SEQ_PUT_DEC |
| 88 | |
| 89 | unsigned long task_vsize(struct mm_struct *mm) |
| 90 | { |
| 91 | return PAGE_SIZE * mm->total_vm; |
| 92 | } |
| 93 | |
| 94 | unsigned long task_statm(struct mm_struct *mm, |
| 95 | unsigned long *shared, unsigned long *text, |
| 96 | unsigned long *data, unsigned long *resident) |
| 97 | { |
| 98 | *shared = get_mm_counter_sum(mm, member: MM_FILEPAGES) + |
| 99 | get_mm_counter_sum(mm, member: MM_SHMEMPAGES); |
| 100 | *text = (PAGE_ALIGN(mm->end_code) - (mm->start_code & PAGE_MASK)) |
| 101 | >> PAGE_SHIFT; |
| 102 | *data = mm->data_vm + mm->stack_vm; |
| 103 | *resident = *shared + get_mm_counter_sum(mm, member: MM_ANONPAGES); |
| 104 | return mm->total_vm; |
| 105 | } |
| 106 | |
| 107 | #ifdef CONFIG_NUMA |
| 108 | /* |
| 109 | * Save get_task_policy() for show_numa_map(). |
| 110 | */ |
| 111 | static void hold_task_mempolicy(struct proc_maps_private *priv) |
| 112 | { |
| 113 | struct task_struct *task = priv->task; |
| 114 | |
| 115 | task_lock(p: task); |
| 116 | priv->task_mempolicy = get_task_policy(p: task); |
| 117 | mpol_get(pol: priv->task_mempolicy); |
| 118 | task_unlock(p: task); |
| 119 | } |
| 120 | static void release_task_mempolicy(struct proc_maps_private *priv) |
| 121 | { |
| 122 | mpol_put(pol: priv->task_mempolicy); |
| 123 | } |
| 124 | #else |
| 125 | static void hold_task_mempolicy(struct proc_maps_private *priv) |
| 126 | { |
| 127 | } |
| 128 | static void release_task_mempolicy(struct proc_maps_private *priv) |
| 129 | { |
| 130 | } |
| 131 | #endif |
| 132 | |
| 133 | #ifdef CONFIG_PER_VMA_LOCK |
| 134 | |
| 135 | static void reset_lock_ctx(struct proc_maps_locking_ctx *lock_ctx) |
| 136 | { |
| 137 | lock_ctx->locked_vma = NULL; |
| 138 | lock_ctx->mmap_locked = false; |
| 139 | } |
| 140 | |
| 141 | static void unlock_ctx_vma(struct proc_maps_locking_ctx *lock_ctx) |
| 142 | { |
| 143 | if (lock_ctx->locked_vma) { |
| 144 | vma_end_read(vma: lock_ctx->locked_vma); |
| 145 | lock_ctx->locked_vma = NULL; |
| 146 | } |
| 147 | } |
| 148 | |
| 149 | static const struct seq_operations proc_pid_maps_op; |
| 150 | |
| 151 | static inline bool lock_vma_range(struct seq_file *m, |
| 152 | struct proc_maps_locking_ctx *lock_ctx) |
| 153 | { |
| 154 | /* |
| 155 | * smaps and numa_maps perform page table walk, therefore require |
| 156 | * mmap_lock but maps can be read with locking just the vma and |
| 157 | * walking the vma tree under rcu read protection. |
| 158 | */ |
| 159 | if (m->op != &proc_pid_maps_op) { |
| 160 | if (mmap_read_lock_killable(mm: lock_ctx->mm)) |
| 161 | return false; |
| 162 | |
| 163 | lock_ctx->mmap_locked = true; |
| 164 | } else { |
| 165 | rcu_read_lock(); |
| 166 | reset_lock_ctx(lock_ctx); |
| 167 | } |
| 168 | |
| 169 | return true; |
| 170 | } |
| 171 | |
| 172 | static inline void unlock_vma_range(struct proc_maps_locking_ctx *lock_ctx) |
| 173 | { |
| 174 | if (lock_ctx->mmap_locked) { |
| 175 | mmap_read_unlock(mm: lock_ctx->mm); |
| 176 | } else { |
| 177 | unlock_ctx_vma(lock_ctx); |
| 178 | rcu_read_unlock(); |
| 179 | } |
| 180 | } |
| 181 | |
| 182 | static struct vm_area_struct *get_next_vma(struct proc_maps_private *priv, |
| 183 | loff_t last_pos) |
| 184 | { |
| 185 | struct proc_maps_locking_ctx *lock_ctx = &priv->lock_ctx; |
| 186 | struct vm_area_struct *vma; |
| 187 | |
| 188 | if (lock_ctx->mmap_locked) |
| 189 | return vma_next(vmi: &priv->iter); |
| 190 | |
| 191 | unlock_ctx_vma(lock_ctx); |
| 192 | vma = lock_next_vma(mm: lock_ctx->mm, iter: &priv->iter, address: last_pos); |
| 193 | if (!IS_ERR_OR_NULL(ptr: vma)) |
| 194 | lock_ctx->locked_vma = vma; |
| 195 | |
| 196 | return vma; |
| 197 | } |
| 198 | |
| 199 | static inline bool fallback_to_mmap_lock(struct proc_maps_private *priv, |
| 200 | loff_t pos) |
| 201 | { |
| 202 | struct proc_maps_locking_ctx *lock_ctx = &priv->lock_ctx; |
| 203 | |
| 204 | if (lock_ctx->mmap_locked) |
| 205 | return false; |
| 206 | |
| 207 | rcu_read_unlock(); |
| 208 | mmap_read_lock(mm: lock_ctx->mm); |
| 209 | /* Reinitialize the iterator after taking mmap_lock */ |
| 210 | vma_iter_set(vmi: &priv->iter, addr: pos); |
| 211 | lock_ctx->mmap_locked = true; |
| 212 | |
| 213 | return true; |
| 214 | } |
| 215 | |
| 216 | #else /* CONFIG_PER_VMA_LOCK */ |
| 217 | |
| 218 | static inline bool lock_vma_range(struct seq_file *m, |
| 219 | struct proc_maps_locking_ctx *lock_ctx) |
| 220 | { |
| 221 | return mmap_read_lock_killable(lock_ctx->mm) == 0; |
| 222 | } |
| 223 | |
| 224 | static inline void unlock_vma_range(struct proc_maps_locking_ctx *lock_ctx) |
| 225 | { |
| 226 | mmap_read_unlock(lock_ctx->mm); |
| 227 | } |
| 228 | |
| 229 | static struct vm_area_struct *get_next_vma(struct proc_maps_private *priv, |
| 230 | loff_t last_pos) |
| 231 | { |
| 232 | return vma_next(&priv->iter); |
| 233 | } |
| 234 | |
| 235 | static inline bool fallback_to_mmap_lock(struct proc_maps_private *priv, |
| 236 | loff_t pos) |
| 237 | { |
| 238 | return false; |
| 239 | } |
| 240 | |
| 241 | #endif /* CONFIG_PER_VMA_LOCK */ |
| 242 | |
| 243 | static struct vm_area_struct *proc_get_vma(struct seq_file *m, loff_t *ppos) |
| 244 | { |
| 245 | struct proc_maps_private *priv = m->private; |
| 246 | struct vm_area_struct *vma; |
| 247 | |
| 248 | retry: |
| 249 | vma = get_next_vma(priv, last_pos: *ppos); |
| 250 | /* EINTR of EAGAIN is possible */ |
| 251 | if (IS_ERR(ptr: vma)) { |
| 252 | if (PTR_ERR(ptr: vma) == -EAGAIN && fallback_to_mmap_lock(priv, pos: *ppos)) |
| 253 | goto retry; |
| 254 | |
| 255 | return vma; |
| 256 | } |
| 257 | |
| 258 | /* Store previous position to be able to restart if needed */ |
| 259 | priv->last_pos = *ppos; |
| 260 | if (vma) { |
| 261 | /* |
| 262 | * Track the end of the reported vma to ensure position changes |
| 263 | * even if previous vma was merged with the next vma and we |
| 264 | * found the extended vma with the same vm_start. |
| 265 | */ |
| 266 | *ppos = vma->vm_end; |
| 267 | } else { |
| 268 | *ppos = SENTINEL_VMA_GATE; |
| 269 | vma = get_gate_vma(mm: priv->lock_ctx.mm); |
| 270 | } |
| 271 | |
| 272 | return vma; |
| 273 | } |
| 274 | |
| 275 | static void *m_start(struct seq_file *m, loff_t *ppos) |
| 276 | { |
| 277 | struct proc_maps_private *priv = m->private; |
| 278 | struct proc_maps_locking_ctx *lock_ctx; |
| 279 | loff_t last_addr = *ppos; |
| 280 | struct mm_struct *mm; |
| 281 | |
| 282 | /* See m_next(). Zero at the start or after lseek. */ |
| 283 | if (last_addr == SENTINEL_VMA_END) |
| 284 | return NULL; |
| 285 | |
| 286 | priv->task = get_proc_task(inode: priv->inode); |
| 287 | if (!priv->task) |
| 288 | return ERR_PTR(error: -ESRCH); |
| 289 | |
| 290 | lock_ctx = &priv->lock_ctx; |
| 291 | mm = lock_ctx->mm; |
| 292 | if (!mm || !mmget_not_zero(mm)) { |
| 293 | put_task_struct(t: priv->task); |
| 294 | priv->task = NULL; |
| 295 | return NULL; |
| 296 | } |
| 297 | |
| 298 | if (!lock_vma_range(m, lock_ctx)) { |
| 299 | mmput(mm); |
| 300 | put_task_struct(t: priv->task); |
| 301 | priv->task = NULL; |
| 302 | return ERR_PTR(error: -EINTR); |
| 303 | } |
| 304 | |
| 305 | /* |
| 306 | * Reset current position if last_addr was set before |
| 307 | * and it's not a sentinel. |
| 308 | */ |
| 309 | if (last_addr > 0) |
| 310 | *ppos = last_addr = priv->last_pos; |
| 311 | vma_iter_init(vmi: &priv->iter, mm, addr: (unsigned long)last_addr); |
| 312 | hold_task_mempolicy(priv); |
| 313 | if (last_addr == SENTINEL_VMA_GATE) |
| 314 | return get_gate_vma(mm); |
| 315 | |
| 316 | return proc_get_vma(m, ppos); |
| 317 | } |
| 318 | |
| 319 | static void *m_next(struct seq_file *m, void *v, loff_t *ppos) |
| 320 | { |
| 321 | if (*ppos == SENTINEL_VMA_GATE) { |
| 322 | *ppos = SENTINEL_VMA_END; |
| 323 | return NULL; |
| 324 | } |
| 325 | return proc_get_vma(m, ppos); |
| 326 | } |
| 327 | |
| 328 | static void m_stop(struct seq_file *m, void *v) |
| 329 | { |
| 330 | struct proc_maps_private *priv = m->private; |
| 331 | struct mm_struct *mm = priv->lock_ctx.mm; |
| 332 | |
| 333 | if (!priv->task) |
| 334 | return; |
| 335 | |
| 336 | release_task_mempolicy(priv); |
| 337 | unlock_vma_range(lock_ctx: &priv->lock_ctx); |
| 338 | mmput(mm); |
| 339 | put_task_struct(t: priv->task); |
| 340 | priv->task = NULL; |
| 341 | } |
| 342 | |
| 343 | static int proc_maps_open(struct inode *inode, struct file *file, |
| 344 | const struct seq_operations *ops, int psize) |
| 345 | { |
| 346 | struct proc_maps_private *priv = __seq_open_private(file, ops, psize); |
| 347 | |
| 348 | if (!priv) |
| 349 | return -ENOMEM; |
| 350 | |
| 351 | priv->inode = inode; |
| 352 | priv->lock_ctx.mm = proc_mem_open(inode, PTRACE_MODE_READ); |
| 353 | if (IS_ERR(ptr: priv->lock_ctx.mm)) { |
| 354 | int err = PTR_ERR(ptr: priv->lock_ctx.mm); |
| 355 | |
| 356 | seq_release_private(inode, file); |
| 357 | return err; |
| 358 | } |
| 359 | |
| 360 | return 0; |
| 361 | } |
| 362 | |
| 363 | static int proc_map_release(struct inode *inode, struct file *file) |
| 364 | { |
| 365 | struct seq_file *seq = file->private_data; |
| 366 | struct proc_maps_private *priv = seq->private; |
| 367 | |
| 368 | if (priv->lock_ctx.mm) |
| 369 | mmdrop(mm: priv->lock_ctx.mm); |
| 370 | |
| 371 | return seq_release_private(inode, file); |
| 372 | } |
| 373 | |
| 374 | static int do_maps_open(struct inode *inode, struct file *file, |
| 375 | const struct seq_operations *ops) |
| 376 | { |
| 377 | return proc_maps_open(inode, file, ops, |
| 378 | psize: sizeof(struct proc_maps_private)); |
| 379 | } |
| 380 | |
| 381 | static void get_vma_name(struct vm_area_struct *vma, |
| 382 | const struct path **path, |
| 383 | const char **name, |
| 384 | const char **name_fmt) |
| 385 | { |
| 386 | struct anon_vma_name *anon_name = vma->vm_mm ? anon_vma_name(vma) : NULL; |
| 387 | |
| 388 | *name = NULL; |
| 389 | *path = NULL; |
| 390 | *name_fmt = NULL; |
| 391 | |
| 392 | /* |
| 393 | * Print the dentry name for named mappings, and a |
| 394 | * special [heap] marker for the heap: |
| 395 | */ |
| 396 | if (vma->vm_file) { |
| 397 | /* |
| 398 | * If user named this anon shared memory via |
| 399 | * prctl(PR_SET_VMA ..., use the provided name. |
| 400 | */ |
| 401 | if (anon_name) { |
| 402 | *name_fmt = "[anon_shmem:%s]" ; |
| 403 | *name = anon_name->name; |
| 404 | } else { |
| 405 | *path = file_user_path(f: vma->vm_file); |
| 406 | } |
| 407 | return; |
| 408 | } |
| 409 | |
| 410 | if (vma->vm_ops && vma->vm_ops->name) { |
| 411 | *name = vma->vm_ops->name(vma); |
| 412 | if (*name) |
| 413 | return; |
| 414 | } |
| 415 | |
| 416 | *name = arch_vma_name(vma); |
| 417 | if (*name) |
| 418 | return; |
| 419 | |
| 420 | if (!vma->vm_mm) { |
| 421 | *name = "[vdso]" ; |
| 422 | return; |
| 423 | } |
| 424 | |
| 425 | if (vma_is_initial_heap(vma)) { |
| 426 | *name = "[heap]" ; |
| 427 | return; |
| 428 | } |
| 429 | |
| 430 | if (vma_is_initial_stack(vma)) { |
| 431 | *name = "[stack]" ; |
| 432 | return; |
| 433 | } |
| 434 | |
| 435 | if (anon_name) { |
| 436 | *name_fmt = "[anon:%s]" ; |
| 437 | *name = anon_name->name; |
| 438 | return; |
| 439 | } |
| 440 | } |
| 441 | |
| 442 | static void (struct seq_file *m, |
| 443 | unsigned long start, unsigned long end, |
| 444 | vm_flags_t flags, unsigned long long pgoff, |
| 445 | dev_t dev, unsigned long ino) |
| 446 | { |
| 447 | seq_setwidth(m, size: 25 + sizeof(void *) * 6 - 1); |
| 448 | seq_put_hex_ll(m, NULL, v: start, width: 8); |
| 449 | seq_put_hex_ll(m, delimiter: "-" , v: end, width: 8); |
| 450 | seq_putc(m, c: ' '); |
| 451 | seq_putc(m, c: flags & VM_READ ? 'r' : '-'); |
| 452 | seq_putc(m, c: flags & VM_WRITE ? 'w' : '-'); |
| 453 | seq_putc(m, c: flags & VM_EXEC ? 'x' : '-'); |
| 454 | seq_putc(m, c: flags & VM_MAYSHARE ? 's' : 'p'); |
| 455 | seq_put_hex_ll(m, delimiter: " " , v: pgoff, width: 8); |
| 456 | seq_put_hex_ll(m, delimiter: " " , MAJOR(dev), width: 2); |
| 457 | seq_put_hex_ll(m, delimiter: ":" , MINOR(dev), width: 2); |
| 458 | seq_put_decimal_ull(m, delimiter: " " , num: ino); |
| 459 | seq_putc(m, c: ' '); |
| 460 | } |
| 461 | |
| 462 | static void |
| 463 | show_map_vma(struct seq_file *m, struct vm_area_struct *vma) |
| 464 | { |
| 465 | const struct path *path; |
| 466 | const char *name_fmt, *name; |
| 467 | vm_flags_t flags = vma->vm_flags; |
| 468 | unsigned long ino = 0; |
| 469 | unsigned long long pgoff = 0; |
| 470 | unsigned long start, end; |
| 471 | dev_t dev = 0; |
| 472 | |
| 473 | if (vma->vm_file) { |
| 474 | const struct inode *inode = file_user_inode(f: vma->vm_file); |
| 475 | |
| 476 | dev = inode->i_sb->s_dev; |
| 477 | ino = inode->i_ino; |
| 478 | pgoff = ((loff_t)vma->vm_pgoff) << PAGE_SHIFT; |
| 479 | } |
| 480 | |
| 481 | start = vma->vm_start; |
| 482 | end = vma->vm_end; |
| 483 | show_vma_header_prefix(m, start, end, flags, pgoff, dev, ino); |
| 484 | |
| 485 | get_vma_name(vma, path: &path, name: &name, name_fmt: &name_fmt); |
| 486 | if (path) { |
| 487 | seq_pad(m, c: ' '); |
| 488 | seq_path(m, path, "\n" ); |
| 489 | } else if (name_fmt) { |
| 490 | seq_pad(m, c: ' '); |
| 491 | seq_printf(m, fmt: name_fmt, name); |
| 492 | } else if (name) { |
| 493 | seq_pad(m, c: ' '); |
| 494 | seq_puts(m, s: name); |
| 495 | } |
| 496 | seq_putc(m, c: '\n'); |
| 497 | } |
| 498 | |
| 499 | static int show_map(struct seq_file *m, void *v) |
| 500 | { |
| 501 | show_map_vma(m, vma: v); |
| 502 | return 0; |
| 503 | } |
| 504 | |
| 505 | static const struct seq_operations proc_pid_maps_op = { |
| 506 | .start = m_start, |
| 507 | .next = m_next, |
| 508 | .stop = m_stop, |
| 509 | .show = show_map |
| 510 | }; |
| 511 | |
| 512 | static int pid_maps_open(struct inode *inode, struct file *file) |
| 513 | { |
| 514 | return do_maps_open(inode, file, ops: &proc_pid_maps_op); |
| 515 | } |
| 516 | |
| 517 | #define PROCMAP_QUERY_VMA_FLAGS ( \ |
| 518 | PROCMAP_QUERY_VMA_READABLE | \ |
| 519 | PROCMAP_QUERY_VMA_WRITABLE | \ |
| 520 | PROCMAP_QUERY_VMA_EXECUTABLE | \ |
| 521 | PROCMAP_QUERY_VMA_SHARED \ |
| 522 | ) |
| 523 | |
| 524 | #define PROCMAP_QUERY_VALID_FLAGS_MASK ( \ |
| 525 | PROCMAP_QUERY_COVERING_OR_NEXT_VMA | \ |
| 526 | PROCMAP_QUERY_FILE_BACKED_VMA | \ |
| 527 | PROCMAP_QUERY_VMA_FLAGS \ |
| 528 | ) |
| 529 | |
| 530 | #ifdef CONFIG_PER_VMA_LOCK |
| 531 | |
| 532 | static int query_vma_setup(struct proc_maps_locking_ctx *lock_ctx) |
| 533 | { |
| 534 | reset_lock_ctx(lock_ctx); |
| 535 | |
| 536 | return 0; |
| 537 | } |
| 538 | |
| 539 | static void query_vma_teardown(struct proc_maps_locking_ctx *lock_ctx) |
| 540 | { |
| 541 | if (lock_ctx->mmap_locked) { |
| 542 | mmap_read_unlock(mm: lock_ctx->mm); |
| 543 | lock_ctx->mmap_locked = false; |
| 544 | } else { |
| 545 | unlock_ctx_vma(lock_ctx); |
| 546 | } |
| 547 | } |
| 548 | |
| 549 | static struct vm_area_struct *query_vma_find_by_addr(struct proc_maps_locking_ctx *lock_ctx, |
| 550 | unsigned long addr) |
| 551 | { |
| 552 | struct mm_struct *mm = lock_ctx->mm; |
| 553 | struct vm_area_struct *vma; |
| 554 | struct vma_iterator vmi; |
| 555 | |
| 556 | if (lock_ctx->mmap_locked) |
| 557 | return find_vma(mm, addr); |
| 558 | |
| 559 | /* Unlock previously locked VMA and find the next one under RCU */ |
| 560 | unlock_ctx_vma(lock_ctx); |
| 561 | rcu_read_lock(); |
| 562 | vma_iter_init(vmi: &vmi, mm, addr); |
| 563 | vma = lock_next_vma(mm, iter: &vmi, address: addr); |
| 564 | rcu_read_unlock(); |
| 565 | |
| 566 | if (!vma) |
| 567 | return NULL; |
| 568 | |
| 569 | if (!IS_ERR(ptr: vma)) { |
| 570 | lock_ctx->locked_vma = vma; |
| 571 | return vma; |
| 572 | } |
| 573 | |
| 574 | if (PTR_ERR(ptr: vma) == -EAGAIN) { |
| 575 | /* Fallback to mmap_lock on vma->vm_refcnt overflow */ |
| 576 | mmap_read_lock(mm); |
| 577 | vma = find_vma(mm, addr); |
| 578 | lock_ctx->mmap_locked = true; |
| 579 | } |
| 580 | |
| 581 | return vma; |
| 582 | } |
| 583 | |
| 584 | #else /* CONFIG_PER_VMA_LOCK */ |
| 585 | |
| 586 | static int query_vma_setup(struct proc_maps_locking_ctx *lock_ctx) |
| 587 | { |
| 588 | return mmap_read_lock_killable(lock_ctx->mm); |
| 589 | } |
| 590 | |
| 591 | static void query_vma_teardown(struct proc_maps_locking_ctx *lock_ctx) |
| 592 | { |
| 593 | mmap_read_unlock(lock_ctx->mm); |
| 594 | } |
| 595 | |
| 596 | static struct vm_area_struct *query_vma_find_by_addr(struct proc_maps_locking_ctx *lock_ctx, |
| 597 | unsigned long addr) |
| 598 | { |
| 599 | return find_vma(lock_ctx->mm, addr); |
| 600 | } |
| 601 | |
| 602 | #endif /* CONFIG_PER_VMA_LOCK */ |
| 603 | |
| 604 | static struct vm_area_struct *query_matching_vma(struct proc_maps_locking_ctx *lock_ctx, |
| 605 | unsigned long addr, u32 flags) |
| 606 | { |
| 607 | struct vm_area_struct *vma; |
| 608 | |
| 609 | next_vma: |
| 610 | vma = query_vma_find_by_addr(lock_ctx, addr); |
| 611 | if (IS_ERR(ptr: vma)) |
| 612 | return vma; |
| 613 | |
| 614 | if (!vma) |
| 615 | goto no_vma; |
| 616 | |
| 617 | /* user requested only file-backed VMA, keep iterating */ |
| 618 | if ((flags & PROCMAP_QUERY_FILE_BACKED_VMA) && !vma->vm_file) |
| 619 | goto skip_vma; |
| 620 | |
| 621 | /* VMA permissions should satisfy query flags */ |
| 622 | if (flags & PROCMAP_QUERY_VMA_FLAGS) { |
| 623 | u32 perm = 0; |
| 624 | |
| 625 | if (flags & PROCMAP_QUERY_VMA_READABLE) |
| 626 | perm |= VM_READ; |
| 627 | if (flags & PROCMAP_QUERY_VMA_WRITABLE) |
| 628 | perm |= VM_WRITE; |
| 629 | if (flags & PROCMAP_QUERY_VMA_EXECUTABLE) |
| 630 | perm |= VM_EXEC; |
| 631 | if (flags & PROCMAP_QUERY_VMA_SHARED) |
| 632 | perm |= VM_MAYSHARE; |
| 633 | |
| 634 | if ((vma->vm_flags & perm) != perm) |
| 635 | goto skip_vma; |
| 636 | } |
| 637 | |
| 638 | /* found covering VMA or user is OK with the matching next VMA */ |
| 639 | if ((flags & PROCMAP_QUERY_COVERING_OR_NEXT_VMA) || vma->vm_start <= addr) |
| 640 | return vma; |
| 641 | |
| 642 | skip_vma: |
| 643 | /* |
| 644 | * If the user needs closest matching VMA, keep iterating. |
| 645 | */ |
| 646 | addr = vma->vm_end; |
| 647 | if (flags & PROCMAP_QUERY_COVERING_OR_NEXT_VMA) |
| 648 | goto next_vma; |
| 649 | |
| 650 | no_vma: |
| 651 | return ERR_PTR(error: -ENOENT); |
| 652 | } |
| 653 | |
| 654 | static int do_procmap_query(struct mm_struct *mm, void __user *uarg) |
| 655 | { |
| 656 | struct proc_maps_locking_ctx lock_ctx = { .mm = mm }; |
| 657 | struct procmap_query karg; |
| 658 | struct vm_area_struct *vma; |
| 659 | struct file *vm_file = NULL; |
| 660 | const char *name = NULL; |
| 661 | char build_id_buf[BUILD_ID_SIZE_MAX], *name_buf = NULL; |
| 662 | __u64 usize; |
| 663 | int err; |
| 664 | |
| 665 | if (copy_from_user(to: &usize, from: (void __user *)uarg, n: sizeof(usize))) |
| 666 | return -EFAULT; |
| 667 | /* argument struct can never be that large, reject abuse */ |
| 668 | if (usize > PAGE_SIZE) |
| 669 | return -E2BIG; |
| 670 | /* argument struct should have at least query_flags and query_addr fields */ |
| 671 | if (usize < offsetofend(struct procmap_query, query_addr)) |
| 672 | return -EINVAL; |
| 673 | err = copy_struct_from_user(dst: &karg, ksize: sizeof(karg), src: uarg, usize); |
| 674 | if (err) |
| 675 | return err; |
| 676 | |
| 677 | /* reject unknown flags */ |
| 678 | if (karg.query_flags & ~PROCMAP_QUERY_VALID_FLAGS_MASK) |
| 679 | return -EINVAL; |
| 680 | /* either both buffer address and size are set, or both should be zero */ |
| 681 | if (!!karg.vma_name_size != !!karg.vma_name_addr) |
| 682 | return -EINVAL; |
| 683 | if (!!karg.build_id_size != !!karg.build_id_addr) |
| 684 | return -EINVAL; |
| 685 | |
| 686 | if (!mm || !mmget_not_zero(mm)) |
| 687 | return -ESRCH; |
| 688 | |
| 689 | err = query_vma_setup(lock_ctx: &lock_ctx); |
| 690 | if (err) { |
| 691 | mmput(mm); |
| 692 | return err; |
| 693 | } |
| 694 | |
| 695 | vma = query_matching_vma(lock_ctx: &lock_ctx, addr: karg.query_addr, flags: karg.query_flags); |
| 696 | if (IS_ERR(ptr: vma)) { |
| 697 | err = PTR_ERR(ptr: vma); |
| 698 | vma = NULL; |
| 699 | goto out; |
| 700 | } |
| 701 | |
| 702 | karg.vma_start = vma->vm_start; |
| 703 | karg.vma_end = vma->vm_end; |
| 704 | |
| 705 | karg.vma_flags = 0; |
| 706 | if (vma->vm_flags & VM_READ) |
| 707 | karg.vma_flags |= PROCMAP_QUERY_VMA_READABLE; |
| 708 | if (vma->vm_flags & VM_WRITE) |
| 709 | karg.vma_flags |= PROCMAP_QUERY_VMA_WRITABLE; |
| 710 | if (vma->vm_flags & VM_EXEC) |
| 711 | karg.vma_flags |= PROCMAP_QUERY_VMA_EXECUTABLE; |
| 712 | if (vma->vm_flags & VM_MAYSHARE) |
| 713 | karg.vma_flags |= PROCMAP_QUERY_VMA_SHARED; |
| 714 | |
| 715 | karg.vma_page_size = vma_kernel_pagesize(vma); |
| 716 | |
| 717 | if (vma->vm_file) { |
| 718 | const struct inode *inode = file_user_inode(f: vma->vm_file); |
| 719 | |
| 720 | karg.vma_offset = ((__u64)vma->vm_pgoff) << PAGE_SHIFT; |
| 721 | karg.dev_major = MAJOR(inode->i_sb->s_dev); |
| 722 | karg.dev_minor = MINOR(inode->i_sb->s_dev); |
| 723 | karg.inode = inode->i_ino; |
| 724 | } else { |
| 725 | karg.vma_offset = 0; |
| 726 | karg.dev_major = 0; |
| 727 | karg.dev_minor = 0; |
| 728 | karg.inode = 0; |
| 729 | } |
| 730 | |
| 731 | if (karg.vma_name_size) { |
| 732 | size_t name_buf_sz = min_t(size_t, PATH_MAX, karg.vma_name_size); |
| 733 | const struct path *path; |
| 734 | const char *name_fmt; |
| 735 | size_t name_sz = 0; |
| 736 | |
| 737 | get_vma_name(vma, path: &path, name: &name, name_fmt: &name_fmt); |
| 738 | |
| 739 | if (path || name_fmt || name) { |
| 740 | name_buf = kmalloc(name_buf_sz, GFP_KERNEL); |
| 741 | if (!name_buf) { |
| 742 | err = -ENOMEM; |
| 743 | goto out; |
| 744 | } |
| 745 | } |
| 746 | if (path) { |
| 747 | name = d_path(path, name_buf, name_buf_sz); |
| 748 | if (IS_ERR(ptr: name)) { |
| 749 | err = PTR_ERR(ptr: name); |
| 750 | goto out; |
| 751 | } |
| 752 | name_sz = name_buf + name_buf_sz - name; |
| 753 | } else if (name || name_fmt) { |
| 754 | name_sz = 1 + snprintf(buf: name_buf, size: name_buf_sz, fmt: name_fmt ?: "%s" , name); |
| 755 | name = name_buf; |
| 756 | } |
| 757 | if (name_sz > name_buf_sz) { |
| 758 | err = -ENAMETOOLONG; |
| 759 | goto out; |
| 760 | } |
| 761 | karg.vma_name_size = name_sz; |
| 762 | } |
| 763 | |
| 764 | if (karg.build_id_size && vma->vm_file) |
| 765 | vm_file = get_file(f: vma->vm_file); |
| 766 | |
| 767 | /* unlock vma or mmap_lock, and put mm_struct before copying data to user */ |
| 768 | query_vma_teardown(lock_ctx: &lock_ctx); |
| 769 | mmput(mm); |
| 770 | |
| 771 | if (karg.build_id_size) { |
| 772 | __u32 build_id_sz; |
| 773 | |
| 774 | if (vm_file) |
| 775 | err = build_id_parse_file(file: vm_file, build_id: build_id_buf, size: &build_id_sz); |
| 776 | else |
| 777 | err = -ENOENT; |
| 778 | if (err) { |
| 779 | karg.build_id_size = 0; |
| 780 | } else { |
| 781 | if (karg.build_id_size < build_id_sz) { |
| 782 | err = -ENAMETOOLONG; |
| 783 | goto out; |
| 784 | } |
| 785 | karg.build_id_size = build_id_sz; |
| 786 | } |
| 787 | } |
| 788 | |
| 789 | if (vm_file) |
| 790 | fput(vm_file); |
| 791 | |
| 792 | if (karg.vma_name_size && copy_to_user(u64_to_user_ptr(karg.vma_name_addr), |
| 793 | from: name, n: karg.vma_name_size)) { |
| 794 | kfree(objp: name_buf); |
| 795 | return -EFAULT; |
| 796 | } |
| 797 | kfree(objp: name_buf); |
| 798 | |
| 799 | if (karg.build_id_size && copy_to_user(u64_to_user_ptr(karg.build_id_addr), |
| 800 | from: build_id_buf, n: karg.build_id_size)) |
| 801 | return -EFAULT; |
| 802 | |
| 803 | if (copy_to_user(to: uarg, from: &karg, min_t(size_t, sizeof(karg), usize))) |
| 804 | return -EFAULT; |
| 805 | |
| 806 | return 0; |
| 807 | |
| 808 | out: |
| 809 | query_vma_teardown(lock_ctx: &lock_ctx); |
| 810 | mmput(mm); |
| 811 | if (vm_file) |
| 812 | fput(vm_file); |
| 813 | kfree(objp: name_buf); |
| 814 | return err; |
| 815 | } |
| 816 | |
| 817 | static long procfs_procmap_ioctl(struct file *file, unsigned int cmd, unsigned long arg) |
| 818 | { |
| 819 | struct seq_file *seq = file->private_data; |
| 820 | struct proc_maps_private *priv = seq->private; |
| 821 | |
| 822 | switch (cmd) { |
| 823 | case PROCMAP_QUERY: |
| 824 | /* priv->lock_ctx.mm is set during file open operation */ |
| 825 | return do_procmap_query(mm: priv->lock_ctx.mm, uarg: (void __user *)arg); |
| 826 | default: |
| 827 | return -ENOIOCTLCMD; |
| 828 | } |
| 829 | } |
| 830 | |
| 831 | const struct file_operations proc_pid_maps_operations = { |
| 832 | .open = pid_maps_open, |
| 833 | .read = seq_read, |
| 834 | .llseek = seq_lseek, |
| 835 | .release = proc_map_release, |
| 836 | .unlocked_ioctl = procfs_procmap_ioctl, |
| 837 | .compat_ioctl = compat_ptr_ioctl, |
| 838 | }; |
| 839 | |
| 840 | /* |
| 841 | * Proportional Set Size(PSS): my share of RSS. |
| 842 | * |
| 843 | * PSS of a process is the count of pages it has in memory, where each |
| 844 | * page is divided by the number of processes sharing it. So if a |
| 845 | * process has 1000 pages all to itself, and 1000 shared with one other |
| 846 | * process, its PSS will be 1500. |
| 847 | * |
| 848 | * To keep (accumulated) division errors low, we adopt a 64bit |
| 849 | * fixed-point pss counter to minimize division errors. So (pss >> |
| 850 | * PSS_SHIFT) would be the real byte count. |
| 851 | * |
| 852 | * A shift of 12 before division means (assuming 4K page size): |
| 853 | * - 1M 3-user-pages add up to 8KB errors; |
| 854 | * - supports mapcount up to 2^24, or 16M; |
| 855 | * - supports PSS up to 2^52 bytes, or 4PB. |
| 856 | */ |
| 857 | #define PSS_SHIFT 12 |
| 858 | |
| 859 | #ifdef CONFIG_PROC_PAGE_MONITOR |
| 860 | struct mem_size_stats { |
| 861 | unsigned long resident; |
| 862 | unsigned long shared_clean; |
| 863 | unsigned long shared_dirty; |
| 864 | unsigned long private_clean; |
| 865 | unsigned long private_dirty; |
| 866 | unsigned long referenced; |
| 867 | unsigned long anonymous; |
| 868 | unsigned long lazyfree; |
| 869 | unsigned long anonymous_thp; |
| 870 | unsigned long shmem_thp; |
| 871 | unsigned long file_thp; |
| 872 | unsigned long swap; |
| 873 | unsigned long shared_hugetlb; |
| 874 | unsigned long private_hugetlb; |
| 875 | unsigned long ksm; |
| 876 | u64 pss; |
| 877 | u64 pss_anon; |
| 878 | u64 pss_file; |
| 879 | u64 pss_shmem; |
| 880 | u64 pss_dirty; |
| 881 | u64 pss_locked; |
| 882 | u64 swap_pss; |
| 883 | }; |
| 884 | |
| 885 | static void smaps_page_accumulate(struct mem_size_stats *mss, |
| 886 | struct folio *folio, unsigned long size, unsigned long pss, |
| 887 | bool dirty, bool locked, bool private) |
| 888 | { |
| 889 | mss->pss += pss; |
| 890 | |
| 891 | if (folio_test_anon(folio)) |
| 892 | mss->pss_anon += pss; |
| 893 | else if (folio_test_swapbacked(folio)) |
| 894 | mss->pss_shmem += pss; |
| 895 | else |
| 896 | mss->pss_file += pss; |
| 897 | |
| 898 | if (locked) |
| 899 | mss->pss_locked += pss; |
| 900 | |
| 901 | if (dirty || folio_test_dirty(folio)) { |
| 902 | mss->pss_dirty += pss; |
| 903 | if (private) |
| 904 | mss->private_dirty += size; |
| 905 | else |
| 906 | mss->shared_dirty += size; |
| 907 | } else { |
| 908 | if (private) |
| 909 | mss->private_clean += size; |
| 910 | else |
| 911 | mss->shared_clean += size; |
| 912 | } |
| 913 | } |
| 914 | |
| 915 | static void smaps_account(struct mem_size_stats *mss, struct page *page, |
| 916 | bool compound, bool young, bool dirty, bool locked, |
| 917 | bool present) |
| 918 | { |
| 919 | struct folio *folio = page_folio(page); |
| 920 | int i, nr = compound ? compound_nr(page) : 1; |
| 921 | unsigned long size = nr * PAGE_SIZE; |
| 922 | bool exclusive; |
| 923 | int mapcount; |
| 924 | |
| 925 | /* |
| 926 | * First accumulate quantities that depend only on |size| and the type |
| 927 | * of the compound page. |
| 928 | */ |
| 929 | if (folio_test_anon(folio)) { |
| 930 | mss->anonymous += size; |
| 931 | if (!folio_test_swapbacked(folio) && !dirty && |
| 932 | !folio_test_dirty(folio)) |
| 933 | mss->lazyfree += size; |
| 934 | } |
| 935 | |
| 936 | if (folio_test_ksm(folio)) |
| 937 | mss->ksm += size; |
| 938 | |
| 939 | mss->resident += size; |
| 940 | /* Accumulate the size in pages that have been accessed. */ |
| 941 | if (young || folio_test_young(folio) || folio_test_referenced(folio)) |
| 942 | mss->referenced += size; |
| 943 | |
| 944 | /* |
| 945 | * Then accumulate quantities that may depend on sharing, or that may |
| 946 | * differ page-by-page. |
| 947 | * |
| 948 | * refcount == 1 for present entries guarantees that the folio is mapped |
| 949 | * exactly once. For large folios this implies that exactly one |
| 950 | * PTE/PMD/... maps (a part of) this folio. |
| 951 | * |
| 952 | * Treat all non-present entries (where relying on the mapcount and |
| 953 | * refcount doesn't make sense) as "maybe shared, but not sure how |
| 954 | * often". We treat device private entries as being fake-present. |
| 955 | * |
| 956 | * Note that it would not be safe to read the mapcount especially for |
| 957 | * pages referenced by migration entries, even with the PTL held. |
| 958 | */ |
| 959 | if (folio_ref_count(folio) == 1 || !present) { |
| 960 | smaps_page_accumulate(mss, folio, size, pss: size << PSS_SHIFT, |
| 961 | dirty, locked, private: present); |
| 962 | return; |
| 963 | } |
| 964 | |
| 965 | if (IS_ENABLED(CONFIG_NO_PAGE_MAPCOUNT)) { |
| 966 | mapcount = folio_average_page_mapcount(folio); |
| 967 | exclusive = !folio_maybe_mapped_shared(folio); |
| 968 | } |
| 969 | |
| 970 | /* |
| 971 | * We obtain a snapshot of the mapcount. Without holding the folio lock |
| 972 | * this snapshot can be slightly wrong as we cannot always read the |
| 973 | * mapcount atomically. |
| 974 | */ |
| 975 | for (i = 0; i < nr; i++, page++) { |
| 976 | unsigned long pss = PAGE_SIZE << PSS_SHIFT; |
| 977 | |
| 978 | if (IS_ENABLED(CONFIG_PAGE_MAPCOUNT)) { |
| 979 | mapcount = folio_precise_page_mapcount(folio, page); |
| 980 | exclusive = mapcount < 2; |
| 981 | } |
| 982 | |
| 983 | if (mapcount >= 2) |
| 984 | pss /= mapcount; |
| 985 | smaps_page_accumulate(mss, folio, PAGE_SIZE, pss, |
| 986 | dirty, locked, private: exclusive); |
| 987 | } |
| 988 | } |
| 989 | |
| 990 | #ifdef CONFIG_SHMEM |
| 991 | static int smaps_pte_hole(unsigned long addr, unsigned long end, |
| 992 | __always_unused int depth, struct mm_walk *walk) |
| 993 | { |
| 994 | struct mem_size_stats *mss = walk->private; |
| 995 | struct vm_area_struct *vma = walk->vma; |
| 996 | |
| 997 | mss->swap += shmem_partial_swap_usage(mapping: walk->vma->vm_file->f_mapping, |
| 998 | start: linear_page_index(vma, address: addr), |
| 999 | end: linear_page_index(vma, address: end)); |
| 1000 | |
| 1001 | return 0; |
| 1002 | } |
| 1003 | #else |
| 1004 | #define smaps_pte_hole NULL |
| 1005 | #endif /* CONFIG_SHMEM */ |
| 1006 | |
| 1007 | static void smaps_pte_hole_lookup(unsigned long addr, struct mm_walk *walk) |
| 1008 | { |
| 1009 | #ifdef CONFIG_SHMEM |
| 1010 | if (walk->ops->pte_hole) { |
| 1011 | /* depth is not used */ |
| 1012 | smaps_pte_hole(addr, end: addr + PAGE_SIZE, depth: 0, walk); |
| 1013 | } |
| 1014 | #endif |
| 1015 | } |
| 1016 | |
| 1017 | static void smaps_pte_entry(pte_t *pte, unsigned long addr, |
| 1018 | struct mm_walk *walk) |
| 1019 | { |
| 1020 | struct mem_size_stats *mss = walk->private; |
| 1021 | struct vm_area_struct *vma = walk->vma; |
| 1022 | bool locked = !!(vma->vm_flags & VM_LOCKED); |
| 1023 | struct page *page = NULL; |
| 1024 | bool present = false, young = false, dirty = false; |
| 1025 | pte_t ptent = ptep_get(ptep: pte); |
| 1026 | |
| 1027 | if (pte_present(a: ptent)) { |
| 1028 | page = vm_normal_page(vma, addr, pte: ptent); |
| 1029 | young = pte_young(pte: ptent); |
| 1030 | dirty = pte_dirty(pte: ptent); |
| 1031 | present = true; |
| 1032 | } else if (pte_none(pte: ptent)) { |
| 1033 | smaps_pte_hole_lookup(addr, walk); |
| 1034 | } else { |
| 1035 | const softleaf_t entry = softleaf_from_pte(pte: ptent); |
| 1036 | |
| 1037 | if (softleaf_is_swap(entry)) { |
| 1038 | int mapcount; |
| 1039 | |
| 1040 | mss->swap += PAGE_SIZE; |
| 1041 | mapcount = swp_swapcount(entry); |
| 1042 | if (mapcount >= 2) { |
| 1043 | u64 pss_delta = (u64)PAGE_SIZE << PSS_SHIFT; |
| 1044 | |
| 1045 | do_div(pss_delta, mapcount); |
| 1046 | mss->swap_pss += pss_delta; |
| 1047 | } else { |
| 1048 | mss->swap_pss += (u64)PAGE_SIZE << PSS_SHIFT; |
| 1049 | } |
| 1050 | } else if (softleaf_has_pfn(entry)) { |
| 1051 | if (softleaf_is_device_private(entry)) |
| 1052 | present = true; |
| 1053 | page = softleaf_to_page(entry); |
| 1054 | } |
| 1055 | } |
| 1056 | |
| 1057 | if (!page) |
| 1058 | return; |
| 1059 | |
| 1060 | smaps_account(mss, page, compound: false, young, dirty, locked, present); |
| 1061 | } |
| 1062 | |
| 1063 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE |
| 1064 | static void smaps_pmd_entry(pmd_t *pmd, unsigned long addr, |
| 1065 | struct mm_walk *walk) |
| 1066 | { |
| 1067 | struct mem_size_stats *mss = walk->private; |
| 1068 | struct vm_area_struct *vma = walk->vma; |
| 1069 | bool locked = !!(vma->vm_flags & VM_LOCKED); |
| 1070 | struct page *page = NULL; |
| 1071 | bool present = false; |
| 1072 | struct folio *folio; |
| 1073 | |
| 1074 | if (pmd_none(pmd: *pmd)) |
| 1075 | return; |
| 1076 | if (pmd_present(pmd: *pmd)) { |
| 1077 | page = vm_normal_page_pmd(vma, addr, pmd: *pmd); |
| 1078 | present = true; |
| 1079 | } else if (unlikely(thp_migration_supported())) { |
| 1080 | const softleaf_t entry = softleaf_from_pmd(pmd: *pmd); |
| 1081 | |
| 1082 | if (softleaf_has_pfn(entry)) |
| 1083 | page = softleaf_to_page(entry); |
| 1084 | } |
| 1085 | if (IS_ERR_OR_NULL(ptr: page)) |
| 1086 | return; |
| 1087 | folio = page_folio(page); |
| 1088 | if (folio_test_anon(folio)) |
| 1089 | mss->anonymous_thp += HPAGE_PMD_SIZE; |
| 1090 | else if (folio_test_swapbacked(folio)) |
| 1091 | mss->shmem_thp += HPAGE_PMD_SIZE; |
| 1092 | else if (folio_is_zone_device(folio)) |
| 1093 | /* pass */; |
| 1094 | else |
| 1095 | mss->file_thp += HPAGE_PMD_SIZE; |
| 1096 | |
| 1097 | smaps_account(mss, page, compound: true, pmd_young(pmd: *pmd), pmd_dirty(pmd: *pmd), |
| 1098 | locked, present); |
| 1099 | } |
| 1100 | #else |
| 1101 | static void smaps_pmd_entry(pmd_t *pmd, unsigned long addr, |
| 1102 | struct mm_walk *walk) |
| 1103 | { |
| 1104 | } |
| 1105 | #endif |
| 1106 | |
| 1107 | static int smaps_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end, |
| 1108 | struct mm_walk *walk) |
| 1109 | { |
| 1110 | struct vm_area_struct *vma = walk->vma; |
| 1111 | pte_t *pte; |
| 1112 | spinlock_t *ptl; |
| 1113 | |
| 1114 | ptl = pmd_trans_huge_lock(pmd, vma); |
| 1115 | if (ptl) { |
| 1116 | smaps_pmd_entry(pmd, addr, walk); |
| 1117 | spin_unlock(lock: ptl); |
| 1118 | goto out; |
| 1119 | } |
| 1120 | |
| 1121 | pte = pte_offset_map_lock(mm: vma->vm_mm, pmd, addr, ptlp: &ptl); |
| 1122 | if (!pte) { |
| 1123 | walk->action = ACTION_AGAIN; |
| 1124 | return 0; |
| 1125 | } |
| 1126 | for (; addr != end; pte++, addr += PAGE_SIZE) |
| 1127 | smaps_pte_entry(pte, addr, walk); |
| 1128 | pte_unmap_unlock(pte - 1, ptl); |
| 1129 | out: |
| 1130 | cond_resched(); |
| 1131 | return 0; |
| 1132 | } |
| 1133 | |
| 1134 | static void show_smap_vma_flags(struct seq_file *m, struct vm_area_struct *vma) |
| 1135 | { |
| 1136 | /* |
| 1137 | * Don't forget to update Documentation/ on changes. |
| 1138 | * |
| 1139 | * The length of the second argument of mnemonics[] |
| 1140 | * needs to be 3 instead of previously set 2 |
| 1141 | * (i.e. from [BITS_PER_LONG][2] to [BITS_PER_LONG][3]) |
| 1142 | * to avoid spurious |
| 1143 | * -Werror=unterminated-string-initialization warning |
| 1144 | * with GCC 15 |
| 1145 | */ |
| 1146 | static const char mnemonics[BITS_PER_LONG][3] = { |
| 1147 | /* |
| 1148 | * In case if we meet a flag we don't know about. |
| 1149 | */ |
| 1150 | [0 ... (BITS_PER_LONG-1)] = "??" , |
| 1151 | |
| 1152 | [ilog2(VM_READ)] = "rd" , |
| 1153 | [ilog2(VM_WRITE)] = "wr" , |
| 1154 | [ilog2(VM_EXEC)] = "ex" , |
| 1155 | [ilog2(VM_SHARED)] = "sh" , |
| 1156 | [ilog2(VM_MAYREAD)] = "mr" , |
| 1157 | [ilog2(VM_MAYWRITE)] = "mw" , |
| 1158 | [ilog2(VM_MAYEXEC)] = "me" , |
| 1159 | [ilog2(VM_MAYSHARE)] = "ms" , |
| 1160 | [ilog2(VM_GROWSDOWN)] = "gd" , |
| 1161 | [ilog2(VM_PFNMAP)] = "pf" , |
| 1162 | [ilog2(VM_MAYBE_GUARD)] = "gu" , |
| 1163 | [ilog2(VM_LOCKED)] = "lo" , |
| 1164 | [ilog2(VM_IO)] = "io" , |
| 1165 | [ilog2(VM_SEQ_READ)] = "sr" , |
| 1166 | [ilog2(VM_RAND_READ)] = "rr" , |
| 1167 | [ilog2(VM_DONTCOPY)] = "dc" , |
| 1168 | [ilog2(VM_DONTEXPAND)] = "de" , |
| 1169 | [ilog2(VM_LOCKONFAULT)] = "lf" , |
| 1170 | [ilog2(VM_ACCOUNT)] = "ac" , |
| 1171 | [ilog2(VM_NORESERVE)] = "nr" , |
| 1172 | [ilog2(VM_HUGETLB)] = "ht" , |
| 1173 | [ilog2(VM_SYNC)] = "sf" , |
| 1174 | [ilog2(VM_ARCH_1)] = "ar" , |
| 1175 | [ilog2(VM_WIPEONFORK)] = "wf" , |
| 1176 | [ilog2(VM_DONTDUMP)] = "dd" , |
| 1177 | #ifdef CONFIG_ARM64_BTI |
| 1178 | [ilog2(VM_ARM64_BTI)] = "bt" , |
| 1179 | #endif |
| 1180 | #ifdef CONFIG_MEM_SOFT_DIRTY |
| 1181 | [ilog2(VM_SOFTDIRTY)] = "sd" , |
| 1182 | #endif |
| 1183 | [ilog2(VM_MIXEDMAP)] = "mm" , |
| 1184 | [ilog2(VM_HUGEPAGE)] = "hg" , |
| 1185 | [ilog2(VM_NOHUGEPAGE)] = "nh" , |
| 1186 | [ilog2(VM_MERGEABLE)] = "mg" , |
| 1187 | [ilog2(VM_UFFD_MISSING)]= "um" , |
| 1188 | [ilog2(VM_UFFD_WP)] = "uw" , |
| 1189 | #ifdef CONFIG_ARM64_MTE |
| 1190 | [ilog2(VM_MTE)] = "mt" , |
| 1191 | [ilog2(VM_MTE_ALLOWED)] = "" , |
| 1192 | #endif |
| 1193 | #ifdef CONFIG_ARCH_HAS_PKEYS |
| 1194 | /* These come out via ProtectionKey: */ |
| 1195 | [ilog2(VM_PKEY_BIT0)] = "" , |
| 1196 | [ilog2(VM_PKEY_BIT1)] = "" , |
| 1197 | [ilog2(VM_PKEY_BIT2)] = "" , |
| 1198 | #if CONFIG_ARCH_PKEY_BITS > 3 |
| 1199 | [ilog2(VM_PKEY_BIT3)] = "" , |
| 1200 | #endif |
| 1201 | #if CONFIG_ARCH_PKEY_BITS > 4 |
| 1202 | [ilog2(VM_PKEY_BIT4)] = "" , |
| 1203 | #endif |
| 1204 | #endif /* CONFIG_ARCH_HAS_PKEYS */ |
| 1205 | #ifdef CONFIG_HAVE_ARCH_USERFAULTFD_MINOR |
| 1206 | [ilog2(VM_UFFD_MINOR)] = "ui" , |
| 1207 | #endif /* CONFIG_HAVE_ARCH_USERFAULTFD_MINOR */ |
| 1208 | #ifdef CONFIG_ARCH_HAS_USER_SHADOW_STACK |
| 1209 | [ilog2(VM_SHADOW_STACK)] = "ss" , |
| 1210 | #endif |
| 1211 | #if defined(CONFIG_64BIT) || defined(CONFIG_PPC32) |
| 1212 | [ilog2(VM_DROPPABLE)] = "dp" , |
| 1213 | #endif |
| 1214 | #ifdef CONFIG_64BIT |
| 1215 | [ilog2(VM_SEALED)] = "sl" , |
| 1216 | #endif |
| 1217 | }; |
| 1218 | size_t i; |
| 1219 | |
| 1220 | seq_puts(m, s: "VmFlags: " ); |
| 1221 | for (i = 0; i < BITS_PER_LONG; i++) { |
| 1222 | if (!mnemonics[i][0]) |
| 1223 | continue; |
| 1224 | if (vma->vm_flags & (1UL << i)) |
| 1225 | seq_printf(m, fmt: "%s " , mnemonics[i]); |
| 1226 | } |
| 1227 | seq_putc(m, c: '\n'); |
| 1228 | } |
| 1229 | |
| 1230 | #ifdef CONFIG_HUGETLB_PAGE |
| 1231 | static int smaps_hugetlb_range(pte_t *pte, unsigned long hmask, |
| 1232 | unsigned long addr, unsigned long end, |
| 1233 | struct mm_walk *walk) |
| 1234 | { |
| 1235 | struct mem_size_stats *mss = walk->private; |
| 1236 | struct vm_area_struct *vma = walk->vma; |
| 1237 | struct folio *folio = NULL; |
| 1238 | bool present = false; |
| 1239 | spinlock_t *ptl; |
| 1240 | pte_t ptent; |
| 1241 | |
| 1242 | ptl = huge_pte_lock(h: hstate_vma(vma), mm: walk->mm, pte); |
| 1243 | ptent = huge_ptep_get(mm: walk->mm, addr, ptep: pte); |
| 1244 | if (pte_present(a: ptent)) { |
| 1245 | folio = page_folio(pte_page(ptent)); |
| 1246 | present = true; |
| 1247 | } else { |
| 1248 | const softleaf_t entry = softleaf_from_pte(pte: ptent); |
| 1249 | |
| 1250 | if (softleaf_has_pfn(entry)) |
| 1251 | folio = softleaf_to_folio(entry); |
| 1252 | } |
| 1253 | |
| 1254 | if (folio) { |
| 1255 | /* We treat non-present entries as "maybe shared". */ |
| 1256 | if (!present || folio_maybe_mapped_shared(folio) || |
| 1257 | hugetlb_pmd_shared(pte)) |
| 1258 | mss->shared_hugetlb += huge_page_size(h: hstate_vma(vma)); |
| 1259 | else |
| 1260 | mss->private_hugetlb += huge_page_size(h: hstate_vma(vma)); |
| 1261 | } |
| 1262 | spin_unlock(lock: ptl); |
| 1263 | return 0; |
| 1264 | } |
| 1265 | #else |
| 1266 | #define smaps_hugetlb_range NULL |
| 1267 | #endif /* HUGETLB_PAGE */ |
| 1268 | |
| 1269 | static const struct mm_walk_ops smaps_walk_ops = { |
| 1270 | .pmd_entry = smaps_pte_range, |
| 1271 | .hugetlb_entry = smaps_hugetlb_range, |
| 1272 | .walk_lock = PGWALK_RDLOCK, |
| 1273 | }; |
| 1274 | |
| 1275 | static const struct mm_walk_ops smaps_shmem_walk_ops = { |
| 1276 | .pmd_entry = smaps_pte_range, |
| 1277 | .hugetlb_entry = smaps_hugetlb_range, |
| 1278 | .pte_hole = smaps_pte_hole, |
| 1279 | .walk_lock = PGWALK_RDLOCK, |
| 1280 | }; |
| 1281 | |
| 1282 | /* |
| 1283 | * Gather mem stats from @vma with the indicated beginning |
| 1284 | * address @start, and keep them in @mss. |
| 1285 | * |
| 1286 | * Use vm_start of @vma as the beginning address if @start is 0. |
| 1287 | */ |
| 1288 | static void smap_gather_stats(struct vm_area_struct *vma, |
| 1289 | struct mem_size_stats *mss, unsigned long start) |
| 1290 | { |
| 1291 | const struct mm_walk_ops *ops = &smaps_walk_ops; |
| 1292 | |
| 1293 | /* Invalid start */ |
| 1294 | if (start >= vma->vm_end) |
| 1295 | return; |
| 1296 | |
| 1297 | if (vma->vm_file && shmem_mapping(mapping: vma->vm_file->f_mapping)) { |
| 1298 | /* |
| 1299 | * For shared or readonly shmem mappings we know that all |
| 1300 | * swapped out pages belong to the shmem object, and we can |
| 1301 | * obtain the swap value much more efficiently. For private |
| 1302 | * writable mappings, we might have COW pages that are |
| 1303 | * not affected by the parent swapped out pages of the shmem |
| 1304 | * object, so we have to distinguish them during the page walk. |
| 1305 | * Unless we know that the shmem object (or the part mapped by |
| 1306 | * our VMA) has no swapped out pages at all. |
| 1307 | */ |
| 1308 | unsigned long shmem_swapped = shmem_swap_usage(vma); |
| 1309 | |
| 1310 | if (!start && (!shmem_swapped || (vma->vm_flags & VM_SHARED) || |
| 1311 | !(vma->vm_flags & VM_WRITE))) { |
| 1312 | mss->swap += shmem_swapped; |
| 1313 | } else { |
| 1314 | ops = &smaps_shmem_walk_ops; |
| 1315 | } |
| 1316 | } |
| 1317 | |
| 1318 | /* mmap_lock is held in m_start */ |
| 1319 | if (!start) |
| 1320 | walk_page_vma(vma, ops, private: mss); |
| 1321 | else |
| 1322 | walk_page_range(mm: vma->vm_mm, start, end: vma->vm_end, ops, private: mss); |
| 1323 | } |
| 1324 | |
| 1325 | #define SEQ_PUT_DEC(str, val) \ |
| 1326 | seq_put_decimal_ull_width(m, str, (val) >> 10, 8) |
| 1327 | |
| 1328 | /* Show the contents common for smaps and smaps_rollup */ |
| 1329 | static void __show_smap(struct seq_file *m, const struct mem_size_stats *mss, |
| 1330 | bool rollup_mode) |
| 1331 | { |
| 1332 | SEQ_PUT_DEC("Rss: " , mss->resident); |
| 1333 | SEQ_PUT_DEC(" kB\nPss: " , mss->pss >> PSS_SHIFT); |
| 1334 | SEQ_PUT_DEC(" kB\nPss_Dirty: " , mss->pss_dirty >> PSS_SHIFT); |
| 1335 | if (rollup_mode) { |
| 1336 | /* |
| 1337 | * These are meaningful only for smaps_rollup, otherwise two of |
| 1338 | * them are zero, and the other one is the same as Pss. |
| 1339 | */ |
| 1340 | SEQ_PUT_DEC(" kB\nPss_Anon: " , |
| 1341 | mss->pss_anon >> PSS_SHIFT); |
| 1342 | SEQ_PUT_DEC(" kB\nPss_File: " , |
| 1343 | mss->pss_file >> PSS_SHIFT); |
| 1344 | SEQ_PUT_DEC(" kB\nPss_Shmem: " , |
| 1345 | mss->pss_shmem >> PSS_SHIFT); |
| 1346 | } |
| 1347 | SEQ_PUT_DEC(" kB\nShared_Clean: " , mss->shared_clean); |
| 1348 | SEQ_PUT_DEC(" kB\nShared_Dirty: " , mss->shared_dirty); |
| 1349 | SEQ_PUT_DEC(" kB\nPrivate_Clean: " , mss->private_clean); |
| 1350 | SEQ_PUT_DEC(" kB\nPrivate_Dirty: " , mss->private_dirty); |
| 1351 | SEQ_PUT_DEC(" kB\nReferenced: " , mss->referenced); |
| 1352 | SEQ_PUT_DEC(" kB\nAnonymous: " , mss->anonymous); |
| 1353 | SEQ_PUT_DEC(" kB\nKSM: " , mss->ksm); |
| 1354 | SEQ_PUT_DEC(" kB\nLazyFree: " , mss->lazyfree); |
| 1355 | SEQ_PUT_DEC(" kB\nAnonHugePages: " , mss->anonymous_thp); |
| 1356 | SEQ_PUT_DEC(" kB\nShmemPmdMapped: " , mss->shmem_thp); |
| 1357 | SEQ_PUT_DEC(" kB\nFilePmdMapped: " , mss->file_thp); |
| 1358 | SEQ_PUT_DEC(" kB\nShared_Hugetlb: " , mss->shared_hugetlb); |
| 1359 | seq_put_decimal_ull_width(m, delimiter: " kB\nPrivate_Hugetlb: " , |
| 1360 | num: mss->private_hugetlb >> 10, width: 7); |
| 1361 | SEQ_PUT_DEC(" kB\nSwap: " , mss->swap); |
| 1362 | SEQ_PUT_DEC(" kB\nSwapPss: " , |
| 1363 | mss->swap_pss >> PSS_SHIFT); |
| 1364 | SEQ_PUT_DEC(" kB\nLocked: " , |
| 1365 | mss->pss_locked >> PSS_SHIFT); |
| 1366 | seq_puts(m, s: " kB\n" ); |
| 1367 | } |
| 1368 | |
| 1369 | static int show_smap(struct seq_file *m, void *v) |
| 1370 | { |
| 1371 | struct vm_area_struct *vma = v; |
| 1372 | struct mem_size_stats mss = {}; |
| 1373 | |
| 1374 | smap_gather_stats(vma, mss: &mss, start: 0); |
| 1375 | |
| 1376 | show_map_vma(m, vma); |
| 1377 | |
| 1378 | SEQ_PUT_DEC("Size: " , vma->vm_end - vma->vm_start); |
| 1379 | SEQ_PUT_DEC(" kB\nKernelPageSize: " , vma_kernel_pagesize(vma)); |
| 1380 | SEQ_PUT_DEC(" kB\nMMUPageSize: " , vma_mmu_pagesize(vma)); |
| 1381 | seq_puts(m, s: " kB\n" ); |
| 1382 | |
| 1383 | __show_smap(m, mss: &mss, rollup_mode: false); |
| 1384 | |
| 1385 | seq_printf(m, fmt: "THPeligible: %8u\n" , |
| 1386 | !!thp_vma_allowable_orders(vma, vm_flags: vma->vm_flags, type: TVA_SMAPS, |
| 1387 | THP_ORDERS_ALL)); |
| 1388 | |
| 1389 | if (arch_pkeys_enabled()) |
| 1390 | seq_printf(m, fmt: "ProtectionKey: %8u\n" , vma_pkey(vma)); |
| 1391 | show_smap_vma_flags(m, vma); |
| 1392 | |
| 1393 | return 0; |
| 1394 | } |
| 1395 | |
| 1396 | static int show_smaps_rollup(struct seq_file *m, void *v) |
| 1397 | { |
| 1398 | struct proc_maps_private *priv = m->private; |
| 1399 | struct mem_size_stats mss = {}; |
| 1400 | struct mm_struct *mm = priv->lock_ctx.mm; |
| 1401 | struct vm_area_struct *vma; |
| 1402 | unsigned long vma_start = 0, last_vma_end = 0; |
| 1403 | int ret = 0; |
| 1404 | VMA_ITERATOR(vmi, mm, 0); |
| 1405 | |
| 1406 | priv->task = get_proc_task(inode: priv->inode); |
| 1407 | if (!priv->task) |
| 1408 | return -ESRCH; |
| 1409 | |
| 1410 | if (!mm || !mmget_not_zero(mm)) { |
| 1411 | ret = -ESRCH; |
| 1412 | goto out_put_task; |
| 1413 | } |
| 1414 | |
| 1415 | ret = mmap_read_lock_killable(mm); |
| 1416 | if (ret) |
| 1417 | goto out_put_mm; |
| 1418 | |
| 1419 | hold_task_mempolicy(priv); |
| 1420 | vma = vma_next(vmi: &vmi); |
| 1421 | |
| 1422 | if (unlikely(!vma)) |
| 1423 | goto empty_set; |
| 1424 | |
| 1425 | vma_start = vma->vm_start; |
| 1426 | do { |
| 1427 | smap_gather_stats(vma, mss: &mss, start: 0); |
| 1428 | last_vma_end = vma->vm_end; |
| 1429 | |
| 1430 | /* |
| 1431 | * Release mmap_lock temporarily if someone wants to |
| 1432 | * access it for write request. |
| 1433 | */ |
| 1434 | if (mmap_lock_is_contended(mm)) { |
| 1435 | vma_iter_invalidate(vmi: &vmi); |
| 1436 | mmap_read_unlock(mm); |
| 1437 | ret = mmap_read_lock_killable(mm); |
| 1438 | if (ret) { |
| 1439 | release_task_mempolicy(priv); |
| 1440 | goto out_put_mm; |
| 1441 | } |
| 1442 | |
| 1443 | /* |
| 1444 | * After dropping the lock, there are four cases to |
| 1445 | * consider. See the following example for explanation. |
| 1446 | * |
| 1447 | * +------+------+-----------+ |
| 1448 | * | VMA1 | VMA2 | VMA3 | |
| 1449 | * +------+------+-----------+ |
| 1450 | * | | | | |
| 1451 | * 4k 8k 16k 400k |
| 1452 | * |
| 1453 | * Suppose we drop the lock after reading VMA2 due to |
| 1454 | * contention, then we get: |
| 1455 | * |
| 1456 | * last_vma_end = 16k |
| 1457 | * |
| 1458 | * 1) VMA2 is freed, but VMA3 exists: |
| 1459 | * |
| 1460 | * vma_next(vmi) will return VMA3. |
| 1461 | * In this case, just continue from VMA3. |
| 1462 | * |
| 1463 | * 2) VMA2 still exists: |
| 1464 | * |
| 1465 | * vma_next(vmi) will return VMA3. |
| 1466 | * In this case, just continue from VMA3. |
| 1467 | * |
| 1468 | * 3) No more VMAs can be found: |
| 1469 | * |
| 1470 | * vma_next(vmi) will return NULL. |
| 1471 | * No more things to do, just break. |
| 1472 | * |
| 1473 | * 4) (last_vma_end - 1) is the middle of a vma (VMA'): |
| 1474 | * |
| 1475 | * vma_next(vmi) will return VMA' whose range |
| 1476 | * contains last_vma_end. |
| 1477 | * Iterate VMA' from last_vma_end. |
| 1478 | */ |
| 1479 | vma = vma_next(vmi: &vmi); |
| 1480 | /* Case 3 above */ |
| 1481 | if (!vma) |
| 1482 | break; |
| 1483 | |
| 1484 | /* Case 1 and 2 above */ |
| 1485 | if (vma->vm_start >= last_vma_end) { |
| 1486 | smap_gather_stats(vma, mss: &mss, start: 0); |
| 1487 | last_vma_end = vma->vm_end; |
| 1488 | continue; |
| 1489 | } |
| 1490 | |
| 1491 | /* Case 4 above */ |
| 1492 | if (vma->vm_end > last_vma_end) { |
| 1493 | smap_gather_stats(vma, mss: &mss, start: last_vma_end); |
| 1494 | last_vma_end = vma->vm_end; |
| 1495 | } |
| 1496 | } |
| 1497 | } for_each_vma(vmi, vma); |
| 1498 | |
| 1499 | empty_set: |
| 1500 | show_vma_header_prefix(m, start: vma_start, end: last_vma_end, flags: 0, pgoff: 0, dev: 0, ino: 0); |
| 1501 | seq_pad(m, c: ' '); |
| 1502 | seq_puts(m, s: "[rollup]\n" ); |
| 1503 | |
| 1504 | __show_smap(m, mss: &mss, rollup_mode: true); |
| 1505 | |
| 1506 | release_task_mempolicy(priv); |
| 1507 | mmap_read_unlock(mm); |
| 1508 | |
| 1509 | out_put_mm: |
| 1510 | mmput(mm); |
| 1511 | out_put_task: |
| 1512 | put_task_struct(t: priv->task); |
| 1513 | priv->task = NULL; |
| 1514 | |
| 1515 | return ret; |
| 1516 | } |
| 1517 | #undef SEQ_PUT_DEC |
| 1518 | |
| 1519 | static const struct seq_operations proc_pid_smaps_op = { |
| 1520 | .start = m_start, |
| 1521 | .next = m_next, |
| 1522 | .stop = m_stop, |
| 1523 | .show = show_smap |
| 1524 | }; |
| 1525 | |
| 1526 | static int pid_smaps_open(struct inode *inode, struct file *file) |
| 1527 | { |
| 1528 | return do_maps_open(inode, file, ops: &proc_pid_smaps_op); |
| 1529 | } |
| 1530 | |
| 1531 | static int smaps_rollup_open(struct inode *inode, struct file *file) |
| 1532 | { |
| 1533 | int ret; |
| 1534 | struct proc_maps_private *priv; |
| 1535 | |
| 1536 | priv = kzalloc(sizeof(*priv), GFP_KERNEL_ACCOUNT); |
| 1537 | if (!priv) |
| 1538 | return -ENOMEM; |
| 1539 | |
| 1540 | ret = single_open(file, show_smaps_rollup, priv); |
| 1541 | if (ret) |
| 1542 | goto out_free; |
| 1543 | |
| 1544 | priv->inode = inode; |
| 1545 | priv->lock_ctx.mm = proc_mem_open(inode, PTRACE_MODE_READ); |
| 1546 | if (IS_ERR_OR_NULL(ptr: priv->lock_ctx.mm)) { |
| 1547 | ret = priv->lock_ctx.mm ? PTR_ERR(ptr: priv->lock_ctx.mm) : -ESRCH; |
| 1548 | |
| 1549 | single_release(inode, file); |
| 1550 | goto out_free; |
| 1551 | } |
| 1552 | |
| 1553 | return 0; |
| 1554 | |
| 1555 | out_free: |
| 1556 | kfree(objp: priv); |
| 1557 | return ret; |
| 1558 | } |
| 1559 | |
| 1560 | static int smaps_rollup_release(struct inode *inode, struct file *file) |
| 1561 | { |
| 1562 | struct seq_file *seq = file->private_data; |
| 1563 | struct proc_maps_private *priv = seq->private; |
| 1564 | |
| 1565 | if (priv->lock_ctx.mm) |
| 1566 | mmdrop(mm: priv->lock_ctx.mm); |
| 1567 | |
| 1568 | kfree(objp: priv); |
| 1569 | return single_release(inode, file); |
| 1570 | } |
| 1571 | |
| 1572 | const struct file_operations proc_pid_smaps_operations = { |
| 1573 | .open = pid_smaps_open, |
| 1574 | .read = seq_read, |
| 1575 | .llseek = seq_lseek, |
| 1576 | .release = proc_map_release, |
| 1577 | }; |
| 1578 | |
| 1579 | const struct file_operations proc_pid_smaps_rollup_operations = { |
| 1580 | .open = smaps_rollup_open, |
| 1581 | .read = seq_read, |
| 1582 | .llseek = seq_lseek, |
| 1583 | .release = smaps_rollup_release, |
| 1584 | }; |
| 1585 | |
| 1586 | enum clear_refs_types { |
| 1587 | CLEAR_REFS_ALL = 1, |
| 1588 | CLEAR_REFS_ANON, |
| 1589 | CLEAR_REFS_MAPPED, |
| 1590 | CLEAR_REFS_SOFT_DIRTY, |
| 1591 | , |
| 1592 | CLEAR_REFS_LAST, |
| 1593 | }; |
| 1594 | |
| 1595 | struct clear_refs_private { |
| 1596 | enum clear_refs_types type; |
| 1597 | }; |
| 1598 | |
| 1599 | static inline bool pte_is_pinned(struct vm_area_struct *vma, unsigned long addr, pte_t pte) |
| 1600 | { |
| 1601 | struct folio *folio; |
| 1602 | |
| 1603 | if (!pte_write(pte)) |
| 1604 | return false; |
| 1605 | if (!is_cow_mapping(flags: vma->vm_flags)) |
| 1606 | return false; |
| 1607 | if (likely(!mm_flags_test(MMF_HAS_PINNED, vma->vm_mm))) |
| 1608 | return false; |
| 1609 | folio = vm_normal_folio(vma, addr, pte); |
| 1610 | if (!folio) |
| 1611 | return false; |
| 1612 | return folio_maybe_dma_pinned(folio); |
| 1613 | } |
| 1614 | |
| 1615 | static inline void clear_soft_dirty(struct vm_area_struct *vma, |
| 1616 | unsigned long addr, pte_t *pte) |
| 1617 | { |
| 1618 | if (!pgtable_supports_soft_dirty()) |
| 1619 | return; |
| 1620 | /* |
| 1621 | * The soft-dirty tracker uses #PF-s to catch writes |
| 1622 | * to pages, so write-protect the pte as well. See the |
| 1623 | * Documentation/admin-guide/mm/soft-dirty.rst for full description |
| 1624 | * of how soft-dirty works. |
| 1625 | */ |
| 1626 | pte_t ptent = ptep_get(ptep: pte); |
| 1627 | |
| 1628 | if (pte_none(pte: ptent)) |
| 1629 | return; |
| 1630 | |
| 1631 | if (pte_present(a: ptent)) { |
| 1632 | pte_t old_pte; |
| 1633 | |
| 1634 | if (pte_is_pinned(vma, addr, pte: ptent)) |
| 1635 | return; |
| 1636 | old_pte = ptep_modify_prot_start(vma, addr, ptep: pte); |
| 1637 | ptent = pte_wrprotect(pte: old_pte); |
| 1638 | ptent = pte_clear_soft_dirty(pte: ptent); |
| 1639 | ptep_modify_prot_commit(vma, addr, ptep: pte, old_pte, pte: ptent); |
| 1640 | } else { |
| 1641 | ptent = pte_swp_clear_soft_dirty(pte: ptent); |
| 1642 | set_pte_at(vma->vm_mm, addr, pte, ptent); |
| 1643 | } |
| 1644 | } |
| 1645 | |
| 1646 | #if defined(CONFIG_TRANSPARENT_HUGEPAGE) |
| 1647 | static inline void clear_soft_dirty_pmd(struct vm_area_struct *vma, |
| 1648 | unsigned long addr, pmd_t *pmdp) |
| 1649 | { |
| 1650 | pmd_t old, pmd = *pmdp; |
| 1651 | |
| 1652 | if (!pgtable_supports_soft_dirty()) |
| 1653 | return; |
| 1654 | |
| 1655 | if (pmd_present(pmd)) { |
| 1656 | /* See comment in change_huge_pmd() */ |
| 1657 | old = pmdp_invalidate(vma, address: addr, pmdp); |
| 1658 | if (pmd_dirty(pmd: old)) |
| 1659 | pmd = pmd_mkdirty(pmd); |
| 1660 | if (pmd_young(pmd: old)) |
| 1661 | pmd = pmd_mkyoung(pmd); |
| 1662 | |
| 1663 | pmd = pmd_wrprotect(pmd); |
| 1664 | pmd = pmd_clear_soft_dirty(pmd); |
| 1665 | |
| 1666 | set_pmd_at(mm: vma->vm_mm, addr, pmdp, pmd); |
| 1667 | } else if (pmd_is_migration_entry(pmd)) { |
| 1668 | pmd = pmd_swp_clear_soft_dirty(pmd); |
| 1669 | set_pmd_at(mm: vma->vm_mm, addr, pmdp, pmd); |
| 1670 | } |
| 1671 | } |
| 1672 | #else |
| 1673 | static inline void clear_soft_dirty_pmd(struct vm_area_struct *vma, |
| 1674 | unsigned long addr, pmd_t *pmdp) |
| 1675 | { |
| 1676 | } |
| 1677 | #endif |
| 1678 | |
| 1679 | static int clear_refs_pte_range(pmd_t *pmd, unsigned long addr, |
| 1680 | unsigned long end, struct mm_walk *walk) |
| 1681 | { |
| 1682 | struct clear_refs_private *cp = walk->private; |
| 1683 | struct vm_area_struct *vma = walk->vma; |
| 1684 | pte_t *pte, ptent; |
| 1685 | spinlock_t *ptl; |
| 1686 | struct folio *folio; |
| 1687 | |
| 1688 | ptl = pmd_trans_huge_lock(pmd, vma); |
| 1689 | if (ptl) { |
| 1690 | if (cp->type == CLEAR_REFS_SOFT_DIRTY) { |
| 1691 | clear_soft_dirty_pmd(vma, addr, pmdp: pmd); |
| 1692 | goto out; |
| 1693 | } |
| 1694 | |
| 1695 | if (!pmd_present(pmd: *pmd)) |
| 1696 | goto out; |
| 1697 | |
| 1698 | folio = pmd_folio(*pmd); |
| 1699 | |
| 1700 | /* Clear accessed and referenced bits. */ |
| 1701 | pmdp_test_and_clear_young(vma, addr, pmdp: pmd); |
| 1702 | folio_test_clear_young(folio); |
| 1703 | folio_clear_referenced(folio); |
| 1704 | out: |
| 1705 | spin_unlock(lock: ptl); |
| 1706 | return 0; |
| 1707 | } |
| 1708 | |
| 1709 | pte = pte_offset_map_lock(mm: vma->vm_mm, pmd, addr, ptlp: &ptl); |
| 1710 | if (!pte) { |
| 1711 | walk->action = ACTION_AGAIN; |
| 1712 | return 0; |
| 1713 | } |
| 1714 | for (; addr != end; pte++, addr += PAGE_SIZE) { |
| 1715 | ptent = ptep_get(ptep: pte); |
| 1716 | |
| 1717 | if (cp->type == CLEAR_REFS_SOFT_DIRTY) { |
| 1718 | clear_soft_dirty(vma, addr, pte); |
| 1719 | continue; |
| 1720 | } |
| 1721 | |
| 1722 | if (!pte_present(a: ptent)) |
| 1723 | continue; |
| 1724 | |
| 1725 | folio = vm_normal_folio(vma, addr, pte: ptent); |
| 1726 | if (!folio) |
| 1727 | continue; |
| 1728 | |
| 1729 | /* Clear accessed and referenced bits. */ |
| 1730 | ptep_test_and_clear_young(vma, addr, ptep: pte); |
| 1731 | folio_test_clear_young(folio); |
| 1732 | folio_clear_referenced(folio); |
| 1733 | } |
| 1734 | pte_unmap_unlock(pte - 1, ptl); |
| 1735 | cond_resched(); |
| 1736 | return 0; |
| 1737 | } |
| 1738 | |
| 1739 | static int clear_refs_test_walk(unsigned long start, unsigned long end, |
| 1740 | struct mm_walk *walk) |
| 1741 | { |
| 1742 | struct clear_refs_private *cp = walk->private; |
| 1743 | struct vm_area_struct *vma = walk->vma; |
| 1744 | |
| 1745 | if (vma->vm_flags & VM_PFNMAP) |
| 1746 | return 1; |
| 1747 | |
| 1748 | /* |
| 1749 | * Writing 1 to /proc/pid/clear_refs affects all pages. |
| 1750 | * Writing 2 to /proc/pid/clear_refs only affects anonymous pages. |
| 1751 | * Writing 3 to /proc/pid/clear_refs only affects file mapped pages. |
| 1752 | * Writing 4 to /proc/pid/clear_refs affects all pages. |
| 1753 | */ |
| 1754 | if (cp->type == CLEAR_REFS_ANON && vma->vm_file) |
| 1755 | return 1; |
| 1756 | if (cp->type == CLEAR_REFS_MAPPED && !vma->vm_file) |
| 1757 | return 1; |
| 1758 | return 0; |
| 1759 | } |
| 1760 | |
| 1761 | static const struct mm_walk_ops clear_refs_walk_ops = { |
| 1762 | .pmd_entry = clear_refs_pte_range, |
| 1763 | .test_walk = clear_refs_test_walk, |
| 1764 | .walk_lock = PGWALK_WRLOCK, |
| 1765 | }; |
| 1766 | |
| 1767 | static ssize_t clear_refs_write(struct file *file, const char __user *buf, |
| 1768 | size_t count, loff_t *ppos) |
| 1769 | { |
| 1770 | struct task_struct *task; |
| 1771 | char buffer[PROC_NUMBUF] = {}; |
| 1772 | struct mm_struct *mm; |
| 1773 | struct vm_area_struct *vma; |
| 1774 | enum clear_refs_types type; |
| 1775 | int itype; |
| 1776 | int rv; |
| 1777 | |
| 1778 | if (count > sizeof(buffer) - 1) |
| 1779 | count = sizeof(buffer) - 1; |
| 1780 | if (copy_from_user(to: buffer, from: buf, n: count)) |
| 1781 | return -EFAULT; |
| 1782 | rv = kstrtoint(s: strstrip(str: buffer), base: 10, res: &itype); |
| 1783 | if (rv < 0) |
| 1784 | return rv; |
| 1785 | type = (enum clear_refs_types)itype; |
| 1786 | if (type < CLEAR_REFS_ALL || type >= CLEAR_REFS_LAST) |
| 1787 | return -EINVAL; |
| 1788 | |
| 1789 | task = get_proc_task(inode: file_inode(f: file)); |
| 1790 | if (!task) |
| 1791 | return -ESRCH; |
| 1792 | mm = get_task_mm(task); |
| 1793 | if (mm) { |
| 1794 | VMA_ITERATOR(vmi, mm, 0); |
| 1795 | struct mmu_notifier_range range; |
| 1796 | struct clear_refs_private cp = { |
| 1797 | .type = type, |
| 1798 | }; |
| 1799 | |
| 1800 | if (mmap_write_lock_killable(mm)) { |
| 1801 | count = -EINTR; |
| 1802 | goto out_mm; |
| 1803 | } |
| 1804 | if (type == CLEAR_REFS_MM_HIWATER_RSS) { |
| 1805 | /* |
| 1806 | * Writing 5 to /proc/pid/clear_refs resets the peak |
| 1807 | * resident set size to this mm's current rss value. |
| 1808 | */ |
| 1809 | reset_mm_hiwater_rss(mm); |
| 1810 | goto out_unlock; |
| 1811 | } |
| 1812 | |
| 1813 | if (type == CLEAR_REFS_SOFT_DIRTY) { |
| 1814 | for_each_vma(vmi, vma) { |
| 1815 | if (!(vma->vm_flags & VM_SOFTDIRTY)) |
| 1816 | continue; |
| 1817 | vm_flags_clear(vma, VM_SOFTDIRTY); |
| 1818 | vma_set_page_prot(vma); |
| 1819 | } |
| 1820 | |
| 1821 | inc_tlb_flush_pending(mm); |
| 1822 | mmu_notifier_range_init(range: &range, event: MMU_NOTIFY_SOFT_DIRTY, |
| 1823 | flags: 0, mm, start: 0, end: -1UL); |
| 1824 | mmu_notifier_invalidate_range_start(range: &range); |
| 1825 | } |
| 1826 | walk_page_range(mm, start: 0, end: -1, ops: &clear_refs_walk_ops, private: &cp); |
| 1827 | if (type == CLEAR_REFS_SOFT_DIRTY) { |
| 1828 | mmu_notifier_invalidate_range_end(range: &range); |
| 1829 | flush_tlb_mm(mm); |
| 1830 | dec_tlb_flush_pending(mm); |
| 1831 | } |
| 1832 | out_unlock: |
| 1833 | mmap_write_unlock(mm); |
| 1834 | out_mm: |
| 1835 | mmput(mm); |
| 1836 | } |
| 1837 | put_task_struct(t: task); |
| 1838 | |
| 1839 | return count; |
| 1840 | } |
| 1841 | |
| 1842 | const struct file_operations proc_clear_refs_operations = { |
| 1843 | .write = clear_refs_write, |
| 1844 | .llseek = noop_llseek, |
| 1845 | }; |
| 1846 | |
| 1847 | typedef struct { |
| 1848 | u64 pme; |
| 1849 | } pagemap_entry_t; |
| 1850 | |
| 1851 | struct pagemapread { |
| 1852 | int pos, len; /* units: PM_ENTRY_BYTES, not bytes */ |
| 1853 | pagemap_entry_t *buffer; |
| 1854 | bool show_pfn; |
| 1855 | }; |
| 1856 | |
| 1857 | #define PAGEMAP_WALK_SIZE (PMD_SIZE) |
| 1858 | #define PAGEMAP_WALK_MASK (PMD_MASK) |
| 1859 | |
| 1860 | #define PM_ENTRY_BYTES sizeof(pagemap_entry_t) |
| 1861 | #define PM_PFRAME_BITS 55 |
| 1862 | #define PM_PFRAME_MASK GENMASK_ULL(PM_PFRAME_BITS - 1, 0) |
| 1863 | #define PM_SOFT_DIRTY BIT_ULL(55) |
| 1864 | #define PM_MMAP_EXCLUSIVE BIT_ULL(56) |
| 1865 | #define PM_UFFD_WP BIT_ULL(57) |
| 1866 | #define PM_GUARD_REGION BIT_ULL(58) |
| 1867 | #define PM_FILE BIT_ULL(61) |
| 1868 | #define PM_SWAP BIT_ULL(62) |
| 1869 | #define PM_PRESENT BIT_ULL(63) |
| 1870 | |
| 1871 | #define PM_END_OF_BUFFER 1 |
| 1872 | |
| 1873 | static inline pagemap_entry_t make_pme(u64 frame, u64 flags) |
| 1874 | { |
| 1875 | return (pagemap_entry_t) { .pme = (frame & PM_PFRAME_MASK) | flags }; |
| 1876 | } |
| 1877 | |
| 1878 | static int add_to_pagemap(pagemap_entry_t *pme, struct pagemapread *pm) |
| 1879 | { |
| 1880 | pm->buffer[pm->pos++] = *pme; |
| 1881 | if (pm->pos >= pm->len) |
| 1882 | return PM_END_OF_BUFFER; |
| 1883 | return 0; |
| 1884 | } |
| 1885 | |
| 1886 | static bool __folio_page_mapped_exclusively(struct folio *folio, struct page *page) |
| 1887 | { |
| 1888 | if (IS_ENABLED(CONFIG_PAGE_MAPCOUNT)) |
| 1889 | return folio_precise_page_mapcount(folio, page) == 1; |
| 1890 | return !folio_maybe_mapped_shared(folio); |
| 1891 | } |
| 1892 | |
| 1893 | static int pagemap_pte_hole(unsigned long start, unsigned long end, |
| 1894 | __always_unused int depth, struct mm_walk *walk) |
| 1895 | { |
| 1896 | struct pagemapread *pm = walk->private; |
| 1897 | unsigned long addr = start; |
| 1898 | int err = 0; |
| 1899 | |
| 1900 | while (addr < end) { |
| 1901 | struct vm_area_struct *vma = find_vma(mm: walk->mm, addr); |
| 1902 | pagemap_entry_t pme = make_pme(frame: 0, flags: 0); |
| 1903 | /* End of address space hole, which we mark as non-present. */ |
| 1904 | unsigned long hole_end; |
| 1905 | |
| 1906 | if (vma) |
| 1907 | hole_end = min(end, vma->vm_start); |
| 1908 | else |
| 1909 | hole_end = end; |
| 1910 | |
| 1911 | for (; addr < hole_end; addr += PAGE_SIZE) { |
| 1912 | err = add_to_pagemap(pme: &pme, pm); |
| 1913 | if (err) |
| 1914 | goto out; |
| 1915 | } |
| 1916 | |
| 1917 | if (!vma) |
| 1918 | break; |
| 1919 | |
| 1920 | /* Addresses in the VMA. */ |
| 1921 | if (vma->vm_flags & VM_SOFTDIRTY) |
| 1922 | pme = make_pme(frame: 0, PM_SOFT_DIRTY); |
| 1923 | for (; addr < min(end, vma->vm_end); addr += PAGE_SIZE) { |
| 1924 | err = add_to_pagemap(pme: &pme, pm); |
| 1925 | if (err) |
| 1926 | goto out; |
| 1927 | } |
| 1928 | } |
| 1929 | out: |
| 1930 | return err; |
| 1931 | } |
| 1932 | |
| 1933 | static pagemap_entry_t pte_to_pagemap_entry(struct pagemapread *pm, |
| 1934 | struct vm_area_struct *vma, unsigned long addr, pte_t pte) |
| 1935 | { |
| 1936 | u64 frame = 0, flags = 0; |
| 1937 | struct page *page = NULL; |
| 1938 | struct folio *folio; |
| 1939 | |
| 1940 | if (pte_none(pte)) |
| 1941 | goto out; |
| 1942 | |
| 1943 | if (pte_present(a: pte)) { |
| 1944 | if (pm->show_pfn) |
| 1945 | frame = pte_pfn(pte); |
| 1946 | flags |= PM_PRESENT; |
| 1947 | page = vm_normal_page(vma, addr, pte); |
| 1948 | if (pte_soft_dirty(pte)) |
| 1949 | flags |= PM_SOFT_DIRTY; |
| 1950 | if (pte_uffd_wp(pte)) |
| 1951 | flags |= PM_UFFD_WP; |
| 1952 | } else { |
| 1953 | softleaf_t entry; |
| 1954 | |
| 1955 | if (pte_swp_soft_dirty(pte)) |
| 1956 | flags |= PM_SOFT_DIRTY; |
| 1957 | if (pte_swp_uffd_wp(pte)) |
| 1958 | flags |= PM_UFFD_WP; |
| 1959 | entry = softleaf_from_pte(pte); |
| 1960 | if (pm->show_pfn) { |
| 1961 | pgoff_t offset; |
| 1962 | |
| 1963 | /* |
| 1964 | * For PFN swap offsets, keeping the offset field |
| 1965 | * to be PFN only to be compatible with old smaps. |
| 1966 | */ |
| 1967 | if (softleaf_has_pfn(entry)) |
| 1968 | offset = softleaf_to_pfn(entry); |
| 1969 | else |
| 1970 | offset = swp_offset(entry); |
| 1971 | frame = swp_type(entry) | |
| 1972 | (offset << MAX_SWAPFILES_SHIFT); |
| 1973 | } |
| 1974 | flags |= PM_SWAP; |
| 1975 | if (softleaf_has_pfn(entry)) |
| 1976 | page = softleaf_to_page(entry); |
| 1977 | if (softleaf_is_uffd_wp_marker(entry)) |
| 1978 | flags |= PM_UFFD_WP; |
| 1979 | if (softleaf_is_guard_marker(entry)) |
| 1980 | flags |= PM_GUARD_REGION; |
| 1981 | } |
| 1982 | |
| 1983 | if (page) { |
| 1984 | folio = page_folio(page); |
| 1985 | if (!folio_test_anon(folio)) |
| 1986 | flags |= PM_FILE; |
| 1987 | if ((flags & PM_PRESENT) && |
| 1988 | __folio_page_mapped_exclusively(folio, page)) |
| 1989 | flags |= PM_MMAP_EXCLUSIVE; |
| 1990 | } |
| 1991 | |
| 1992 | out: |
| 1993 | if (vma->vm_flags & VM_SOFTDIRTY) |
| 1994 | flags |= PM_SOFT_DIRTY; |
| 1995 | |
| 1996 | return make_pme(frame, flags); |
| 1997 | } |
| 1998 | |
| 1999 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE |
| 2000 | static int pagemap_pmd_range_thp(pmd_t *pmdp, unsigned long addr, |
| 2001 | unsigned long end, struct vm_area_struct *vma, |
| 2002 | struct pagemapread *pm) |
| 2003 | { |
| 2004 | unsigned int idx = (addr & ~PMD_MASK) >> PAGE_SHIFT; |
| 2005 | u64 flags = 0, frame = 0; |
| 2006 | pmd_t pmd = *pmdp; |
| 2007 | struct page *page = NULL; |
| 2008 | struct folio *folio = NULL; |
| 2009 | int err = 0; |
| 2010 | |
| 2011 | if (vma->vm_flags & VM_SOFTDIRTY) |
| 2012 | flags |= PM_SOFT_DIRTY; |
| 2013 | |
| 2014 | if (pmd_none(pmd)) |
| 2015 | goto populate_pagemap; |
| 2016 | |
| 2017 | if (pmd_present(pmd)) { |
| 2018 | page = pmd_page(pmd); |
| 2019 | |
| 2020 | flags |= PM_PRESENT; |
| 2021 | if (pmd_soft_dirty(pmd)) |
| 2022 | flags |= PM_SOFT_DIRTY; |
| 2023 | if (pmd_uffd_wp(pmd)) |
| 2024 | flags |= PM_UFFD_WP; |
| 2025 | if (pm->show_pfn) |
| 2026 | frame = pmd_pfn(pmd) + idx; |
| 2027 | } else if (thp_migration_supported()) { |
| 2028 | const softleaf_t entry = softleaf_from_pmd(pmd); |
| 2029 | unsigned long offset; |
| 2030 | |
| 2031 | if (pm->show_pfn) { |
| 2032 | if (softleaf_has_pfn(entry)) |
| 2033 | offset = softleaf_to_pfn(entry) + idx; |
| 2034 | else |
| 2035 | offset = swp_offset(entry) + idx; |
| 2036 | frame = swp_type(entry) | |
| 2037 | (offset << MAX_SWAPFILES_SHIFT); |
| 2038 | } |
| 2039 | flags |= PM_SWAP; |
| 2040 | if (pmd_swp_soft_dirty(pmd)) |
| 2041 | flags |= PM_SOFT_DIRTY; |
| 2042 | if (pmd_swp_uffd_wp(pmd)) |
| 2043 | flags |= PM_UFFD_WP; |
| 2044 | VM_WARN_ON_ONCE(!pmd_is_migration_entry(pmd)); |
| 2045 | page = softleaf_to_page(entry); |
| 2046 | } |
| 2047 | |
| 2048 | if (page) { |
| 2049 | folio = page_folio(page); |
| 2050 | if (!folio_test_anon(folio)) |
| 2051 | flags |= PM_FILE; |
| 2052 | } |
| 2053 | |
| 2054 | populate_pagemap: |
| 2055 | for (; addr != end; addr += PAGE_SIZE, idx++) { |
| 2056 | u64 cur_flags = flags; |
| 2057 | pagemap_entry_t pme; |
| 2058 | |
| 2059 | if (folio && (flags & PM_PRESENT) && |
| 2060 | __folio_page_mapped_exclusively(folio, page)) |
| 2061 | cur_flags |= PM_MMAP_EXCLUSIVE; |
| 2062 | |
| 2063 | pme = make_pme(frame, flags: cur_flags); |
| 2064 | err = add_to_pagemap(pme: &pme, pm); |
| 2065 | if (err) |
| 2066 | break; |
| 2067 | if (pm->show_pfn) { |
| 2068 | if (flags & PM_PRESENT) |
| 2069 | frame++; |
| 2070 | else if (flags & PM_SWAP) |
| 2071 | frame += (1 << MAX_SWAPFILES_SHIFT); |
| 2072 | } |
| 2073 | } |
| 2074 | return err; |
| 2075 | } |
| 2076 | #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ |
| 2077 | |
| 2078 | static int pagemap_pmd_range(pmd_t *pmdp, unsigned long addr, unsigned long end, |
| 2079 | struct mm_walk *walk) |
| 2080 | { |
| 2081 | struct vm_area_struct *vma = walk->vma; |
| 2082 | struct pagemapread *pm = walk->private; |
| 2083 | spinlock_t *ptl; |
| 2084 | pte_t *pte, *orig_pte; |
| 2085 | int err = 0; |
| 2086 | |
| 2087 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE |
| 2088 | ptl = pmd_trans_huge_lock(pmd: pmdp, vma); |
| 2089 | if (ptl) { |
| 2090 | err = pagemap_pmd_range_thp(pmdp, addr, end, vma, pm); |
| 2091 | spin_unlock(lock: ptl); |
| 2092 | return err; |
| 2093 | } |
| 2094 | #endif |
| 2095 | |
| 2096 | /* |
| 2097 | * We can assume that @vma always points to a valid one and @end never |
| 2098 | * goes beyond vma->vm_end. |
| 2099 | */ |
| 2100 | orig_pte = pte = pte_offset_map_lock(mm: walk->mm, pmd: pmdp, addr, ptlp: &ptl); |
| 2101 | if (!pte) { |
| 2102 | walk->action = ACTION_AGAIN; |
| 2103 | return err; |
| 2104 | } |
| 2105 | for (; addr < end; pte++, addr += PAGE_SIZE) { |
| 2106 | pagemap_entry_t pme; |
| 2107 | |
| 2108 | pme = pte_to_pagemap_entry(pm, vma, addr, pte: ptep_get(ptep: pte)); |
| 2109 | err = add_to_pagemap(pme: &pme, pm); |
| 2110 | if (err) |
| 2111 | break; |
| 2112 | } |
| 2113 | pte_unmap_unlock(orig_pte, ptl); |
| 2114 | |
| 2115 | cond_resched(); |
| 2116 | |
| 2117 | return err; |
| 2118 | } |
| 2119 | |
| 2120 | #ifdef CONFIG_HUGETLB_PAGE |
| 2121 | /* This function walks within one hugetlb entry in the single call */ |
| 2122 | static int pagemap_hugetlb_range(pte_t *ptep, unsigned long hmask, |
| 2123 | unsigned long addr, unsigned long end, |
| 2124 | struct mm_walk *walk) |
| 2125 | { |
| 2126 | struct pagemapread *pm = walk->private; |
| 2127 | struct vm_area_struct *vma = walk->vma; |
| 2128 | u64 flags = 0, frame = 0; |
| 2129 | spinlock_t *ptl; |
| 2130 | int err = 0; |
| 2131 | pte_t pte; |
| 2132 | |
| 2133 | if (vma->vm_flags & VM_SOFTDIRTY) |
| 2134 | flags |= PM_SOFT_DIRTY; |
| 2135 | |
| 2136 | ptl = huge_pte_lock(h: hstate_vma(vma), mm: walk->mm, pte: ptep); |
| 2137 | pte = huge_ptep_get(mm: walk->mm, addr, ptep); |
| 2138 | if (pte_present(a: pte)) { |
| 2139 | struct folio *folio = page_folio(pte_page(pte)); |
| 2140 | |
| 2141 | if (!folio_test_anon(folio)) |
| 2142 | flags |= PM_FILE; |
| 2143 | |
| 2144 | if (!folio_maybe_mapped_shared(folio) && |
| 2145 | !hugetlb_pmd_shared(pte: ptep)) |
| 2146 | flags |= PM_MMAP_EXCLUSIVE; |
| 2147 | |
| 2148 | if (huge_pte_uffd_wp(pte)) |
| 2149 | flags |= PM_UFFD_WP; |
| 2150 | |
| 2151 | flags |= PM_PRESENT; |
| 2152 | if (pm->show_pfn) |
| 2153 | frame = pte_pfn(pte) + |
| 2154 | ((addr & ~hmask) >> PAGE_SHIFT); |
| 2155 | } else if (pte_swp_uffd_wp_any(pte)) { |
| 2156 | flags |= PM_UFFD_WP; |
| 2157 | } |
| 2158 | |
| 2159 | for (; addr != end; addr += PAGE_SIZE) { |
| 2160 | pagemap_entry_t pme = make_pme(frame, flags); |
| 2161 | |
| 2162 | err = add_to_pagemap(pme: &pme, pm); |
| 2163 | if (err) |
| 2164 | break; |
| 2165 | if (pm->show_pfn && (flags & PM_PRESENT)) |
| 2166 | frame++; |
| 2167 | } |
| 2168 | |
| 2169 | spin_unlock(lock: ptl); |
| 2170 | cond_resched(); |
| 2171 | |
| 2172 | return err; |
| 2173 | } |
| 2174 | #else |
| 2175 | #define pagemap_hugetlb_range NULL |
| 2176 | #endif /* HUGETLB_PAGE */ |
| 2177 | |
| 2178 | static const struct mm_walk_ops pagemap_ops = { |
| 2179 | .pmd_entry = pagemap_pmd_range, |
| 2180 | .pte_hole = pagemap_pte_hole, |
| 2181 | .hugetlb_entry = pagemap_hugetlb_range, |
| 2182 | .walk_lock = PGWALK_RDLOCK, |
| 2183 | }; |
| 2184 | |
| 2185 | /* |
| 2186 | * /proc/pid/pagemap - an array mapping virtual pages to pfns |
| 2187 | * |
| 2188 | * For each page in the address space, this file contains one 64-bit entry |
| 2189 | * consisting of the following: |
| 2190 | * |
| 2191 | * Bits 0-54 page frame number (PFN) if present |
| 2192 | * Bits 0-4 swap type if swapped |
| 2193 | * Bits 5-54 swap offset if swapped |
| 2194 | * Bit 55 pte is soft-dirty (see Documentation/admin-guide/mm/soft-dirty.rst) |
| 2195 | * Bit 56 page exclusively mapped |
| 2196 | * Bit 57 pte is uffd-wp write-protected |
| 2197 | * Bit 58 pte is a guard region |
| 2198 | * Bits 59-60 zero |
| 2199 | * Bit 61 page is file-page or shared-anon |
| 2200 | * Bit 62 page swapped |
| 2201 | * Bit 63 page present |
| 2202 | * |
| 2203 | * If the page is not present but in swap, then the PFN contains an |
| 2204 | * encoding of the swap file number and the page's offset into the |
| 2205 | * swap. Unmapped pages return a null PFN. This allows determining |
| 2206 | * precisely which pages are mapped (or in swap) and comparing mapped |
| 2207 | * pages between processes. |
| 2208 | * |
| 2209 | * Efficient users of this interface will use /proc/pid/maps to |
| 2210 | * determine which areas of memory are actually mapped and llseek to |
| 2211 | * skip over unmapped regions. |
| 2212 | */ |
| 2213 | static ssize_t pagemap_read(struct file *file, char __user *buf, |
| 2214 | size_t count, loff_t *ppos) |
| 2215 | { |
| 2216 | struct mm_struct *mm = file->private_data; |
| 2217 | struct pagemapread pm; |
| 2218 | unsigned long src; |
| 2219 | unsigned long svpfn; |
| 2220 | unsigned long start_vaddr; |
| 2221 | unsigned long end_vaddr; |
| 2222 | int ret = 0, copied = 0; |
| 2223 | |
| 2224 | if (!mm || !mmget_not_zero(mm)) |
| 2225 | goto out; |
| 2226 | |
| 2227 | ret = -EINVAL; |
| 2228 | /* file position must be aligned */ |
| 2229 | if ((*ppos % PM_ENTRY_BYTES) || (count % PM_ENTRY_BYTES)) |
| 2230 | goto out_mm; |
| 2231 | |
| 2232 | ret = 0; |
| 2233 | if (!count) |
| 2234 | goto out_mm; |
| 2235 | |
| 2236 | /* do not disclose physical addresses: attack vector */ |
| 2237 | pm.show_pfn = file_ns_capable(file, ns: &init_user_ns, CAP_SYS_ADMIN); |
| 2238 | |
| 2239 | pm.len = (PAGEMAP_WALK_SIZE >> PAGE_SHIFT); |
| 2240 | pm.buffer = kmalloc_array(pm.len, PM_ENTRY_BYTES, GFP_KERNEL); |
| 2241 | ret = -ENOMEM; |
| 2242 | if (!pm.buffer) |
| 2243 | goto out_mm; |
| 2244 | |
| 2245 | src = *ppos; |
| 2246 | svpfn = src / PM_ENTRY_BYTES; |
| 2247 | end_vaddr = mm->task_size; |
| 2248 | |
| 2249 | /* watch out for wraparound */ |
| 2250 | start_vaddr = end_vaddr; |
| 2251 | if (svpfn <= (ULONG_MAX >> PAGE_SHIFT)) { |
| 2252 | unsigned long end; |
| 2253 | |
| 2254 | ret = mmap_read_lock_killable(mm); |
| 2255 | if (ret) |
| 2256 | goto out_free; |
| 2257 | start_vaddr = untagged_addr_remote(mm, svpfn << PAGE_SHIFT); |
| 2258 | mmap_read_unlock(mm); |
| 2259 | |
| 2260 | end = start_vaddr + ((count / PM_ENTRY_BYTES) << PAGE_SHIFT); |
| 2261 | if (end >= start_vaddr && end < mm->task_size) |
| 2262 | end_vaddr = end; |
| 2263 | } |
| 2264 | |
| 2265 | /* Ensure the address is inside the task */ |
| 2266 | if (start_vaddr > mm->task_size) |
| 2267 | start_vaddr = end_vaddr; |
| 2268 | |
| 2269 | ret = 0; |
| 2270 | while (count && (start_vaddr < end_vaddr)) { |
| 2271 | int len; |
| 2272 | unsigned long end; |
| 2273 | |
| 2274 | pm.pos = 0; |
| 2275 | end = (start_vaddr + PAGEMAP_WALK_SIZE) & PAGEMAP_WALK_MASK; |
| 2276 | /* overflow ? */ |
| 2277 | if (end < start_vaddr || end > end_vaddr) |
| 2278 | end = end_vaddr; |
| 2279 | ret = mmap_read_lock_killable(mm); |
| 2280 | if (ret) |
| 2281 | goto out_free; |
| 2282 | ret = walk_page_range(mm, start: start_vaddr, end, ops: &pagemap_ops, private: &pm); |
| 2283 | mmap_read_unlock(mm); |
| 2284 | start_vaddr = end; |
| 2285 | |
| 2286 | len = min(count, PM_ENTRY_BYTES * pm.pos); |
| 2287 | if (copy_to_user(to: buf, from: pm.buffer, n: len)) { |
| 2288 | ret = -EFAULT; |
| 2289 | goto out_free; |
| 2290 | } |
| 2291 | copied += len; |
| 2292 | buf += len; |
| 2293 | count -= len; |
| 2294 | } |
| 2295 | *ppos += copied; |
| 2296 | if (!ret || ret == PM_END_OF_BUFFER) |
| 2297 | ret = copied; |
| 2298 | |
| 2299 | out_free: |
| 2300 | kfree(objp: pm.buffer); |
| 2301 | out_mm: |
| 2302 | mmput(mm); |
| 2303 | out: |
| 2304 | return ret; |
| 2305 | } |
| 2306 | |
| 2307 | static int pagemap_open(struct inode *inode, struct file *file) |
| 2308 | { |
| 2309 | struct mm_struct *mm; |
| 2310 | |
| 2311 | mm = proc_mem_open(inode, PTRACE_MODE_READ); |
| 2312 | if (IS_ERR_OR_NULL(ptr: mm)) |
| 2313 | return mm ? PTR_ERR(ptr: mm) : -ESRCH; |
| 2314 | file->private_data = mm; |
| 2315 | return 0; |
| 2316 | } |
| 2317 | |
| 2318 | static int pagemap_release(struct inode *inode, struct file *file) |
| 2319 | { |
| 2320 | struct mm_struct *mm = file->private_data; |
| 2321 | |
| 2322 | if (mm) |
| 2323 | mmdrop(mm); |
| 2324 | return 0; |
| 2325 | } |
| 2326 | |
| 2327 | #define PM_SCAN_CATEGORIES (PAGE_IS_WPALLOWED | PAGE_IS_WRITTEN | \ |
| 2328 | PAGE_IS_FILE | PAGE_IS_PRESENT | \ |
| 2329 | PAGE_IS_SWAPPED | PAGE_IS_PFNZERO | \ |
| 2330 | PAGE_IS_HUGE | PAGE_IS_SOFT_DIRTY | \ |
| 2331 | PAGE_IS_GUARD) |
| 2332 | #define PM_SCAN_FLAGS (PM_SCAN_WP_MATCHING | PM_SCAN_CHECK_WPASYNC) |
| 2333 | |
| 2334 | struct pagemap_scan_private { |
| 2335 | struct pm_scan_arg arg; |
| 2336 | unsigned long masks_of_interest, cur_vma_category; |
| 2337 | struct page_region *vec_buf; |
| 2338 | unsigned long vec_buf_len, vec_buf_index, found_pages; |
| 2339 | struct page_region __user *vec_out; |
| 2340 | }; |
| 2341 | |
| 2342 | static unsigned long pagemap_page_category(struct pagemap_scan_private *p, |
| 2343 | struct vm_area_struct *vma, |
| 2344 | unsigned long addr, pte_t pte) |
| 2345 | { |
| 2346 | unsigned long categories; |
| 2347 | |
| 2348 | if (pte_none(pte)) |
| 2349 | return 0; |
| 2350 | |
| 2351 | if (pte_present(a: pte)) { |
| 2352 | struct page *page; |
| 2353 | |
| 2354 | categories = PAGE_IS_PRESENT; |
| 2355 | |
| 2356 | if (!pte_uffd_wp(pte)) |
| 2357 | categories |= PAGE_IS_WRITTEN; |
| 2358 | |
| 2359 | if (p->masks_of_interest & PAGE_IS_FILE) { |
| 2360 | page = vm_normal_page(vma, addr, pte); |
| 2361 | if (page && !PageAnon(page)) |
| 2362 | categories |= PAGE_IS_FILE; |
| 2363 | } |
| 2364 | |
| 2365 | if (is_zero_pfn(pfn: pte_pfn(pte))) |
| 2366 | categories |= PAGE_IS_PFNZERO; |
| 2367 | if (pte_soft_dirty(pte)) |
| 2368 | categories |= PAGE_IS_SOFT_DIRTY; |
| 2369 | } else { |
| 2370 | softleaf_t entry; |
| 2371 | |
| 2372 | categories = PAGE_IS_SWAPPED; |
| 2373 | |
| 2374 | if (!pte_swp_uffd_wp_any(pte)) |
| 2375 | categories |= PAGE_IS_WRITTEN; |
| 2376 | |
| 2377 | entry = softleaf_from_pte(pte); |
| 2378 | if (softleaf_is_guard_marker(entry)) |
| 2379 | categories |= PAGE_IS_GUARD; |
| 2380 | else if ((p->masks_of_interest & PAGE_IS_FILE) && |
| 2381 | softleaf_has_pfn(entry) && |
| 2382 | !folio_test_anon(folio: softleaf_to_folio(entry))) |
| 2383 | categories |= PAGE_IS_FILE; |
| 2384 | |
| 2385 | if (pte_swp_soft_dirty(pte)) |
| 2386 | categories |= PAGE_IS_SOFT_DIRTY; |
| 2387 | } |
| 2388 | |
| 2389 | return categories; |
| 2390 | } |
| 2391 | |
| 2392 | static void make_uffd_wp_pte(struct vm_area_struct *vma, |
| 2393 | unsigned long addr, pte_t *pte, pte_t ptent) |
| 2394 | { |
| 2395 | if (pte_present(a: ptent)) { |
| 2396 | pte_t old_pte; |
| 2397 | |
| 2398 | old_pte = ptep_modify_prot_start(vma, addr, ptep: pte); |
| 2399 | ptent = pte_mkuffd_wp(pte: old_pte); |
| 2400 | ptep_modify_prot_commit(vma, addr, ptep: pte, old_pte, pte: ptent); |
| 2401 | } else if (pte_none(pte: ptent)) { |
| 2402 | set_pte_at(vma->vm_mm, addr, pte, |
| 2403 | make_pte_marker(PTE_MARKER_UFFD_WP)); |
| 2404 | } else { |
| 2405 | ptent = pte_swp_mkuffd_wp(pte: ptent); |
| 2406 | set_pte_at(vma->vm_mm, addr, pte, ptent); |
| 2407 | } |
| 2408 | } |
| 2409 | |
| 2410 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE |
| 2411 | static unsigned long pagemap_thp_category(struct pagemap_scan_private *p, |
| 2412 | struct vm_area_struct *vma, |
| 2413 | unsigned long addr, pmd_t pmd) |
| 2414 | { |
| 2415 | unsigned long categories = PAGE_IS_HUGE; |
| 2416 | |
| 2417 | if (pmd_none(pmd)) |
| 2418 | return categories; |
| 2419 | |
| 2420 | if (pmd_present(pmd)) { |
| 2421 | struct page *page; |
| 2422 | |
| 2423 | categories |= PAGE_IS_PRESENT; |
| 2424 | if (!pmd_uffd_wp(pmd)) |
| 2425 | categories |= PAGE_IS_WRITTEN; |
| 2426 | |
| 2427 | if (p->masks_of_interest & PAGE_IS_FILE) { |
| 2428 | page = vm_normal_page_pmd(vma, addr, pmd); |
| 2429 | if (page && !PageAnon(page)) |
| 2430 | categories |= PAGE_IS_FILE; |
| 2431 | } |
| 2432 | |
| 2433 | if (is_huge_zero_pmd(pmd)) |
| 2434 | categories |= PAGE_IS_PFNZERO; |
| 2435 | if (pmd_soft_dirty(pmd)) |
| 2436 | categories |= PAGE_IS_SOFT_DIRTY; |
| 2437 | } else { |
| 2438 | categories |= PAGE_IS_SWAPPED; |
| 2439 | if (!pmd_swp_uffd_wp(pmd)) |
| 2440 | categories |= PAGE_IS_WRITTEN; |
| 2441 | if (pmd_swp_soft_dirty(pmd)) |
| 2442 | categories |= PAGE_IS_SOFT_DIRTY; |
| 2443 | |
| 2444 | if (p->masks_of_interest & PAGE_IS_FILE) { |
| 2445 | const softleaf_t entry = softleaf_from_pmd(pmd); |
| 2446 | |
| 2447 | if (softleaf_has_pfn(entry) && |
| 2448 | !folio_test_anon(folio: softleaf_to_folio(entry))) |
| 2449 | categories |= PAGE_IS_FILE; |
| 2450 | } |
| 2451 | } |
| 2452 | |
| 2453 | return categories; |
| 2454 | } |
| 2455 | |
| 2456 | static void make_uffd_wp_pmd(struct vm_area_struct *vma, |
| 2457 | unsigned long addr, pmd_t *pmdp) |
| 2458 | { |
| 2459 | pmd_t old, pmd = *pmdp; |
| 2460 | |
| 2461 | if (pmd_present(pmd)) { |
| 2462 | old = pmdp_invalidate_ad(vma, address: addr, pmdp); |
| 2463 | pmd = pmd_mkuffd_wp(pmd: old); |
| 2464 | set_pmd_at(mm: vma->vm_mm, addr, pmdp, pmd); |
| 2465 | } else if (pmd_is_migration_entry(pmd)) { |
| 2466 | pmd = pmd_swp_mkuffd_wp(pmd); |
| 2467 | set_pmd_at(mm: vma->vm_mm, addr, pmdp, pmd); |
| 2468 | } |
| 2469 | } |
| 2470 | #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ |
| 2471 | |
| 2472 | #ifdef CONFIG_HUGETLB_PAGE |
| 2473 | static unsigned long pagemap_hugetlb_category(pte_t pte) |
| 2474 | { |
| 2475 | unsigned long categories = PAGE_IS_HUGE; |
| 2476 | |
| 2477 | if (pte_none(pte)) |
| 2478 | return categories; |
| 2479 | |
| 2480 | /* |
| 2481 | * According to pagemap_hugetlb_range(), file-backed HugeTLB |
| 2482 | * page cannot be swapped. So PAGE_IS_FILE is not checked for |
| 2483 | * swapped pages. |
| 2484 | */ |
| 2485 | if (pte_present(a: pte)) { |
| 2486 | categories |= PAGE_IS_PRESENT; |
| 2487 | |
| 2488 | if (!huge_pte_uffd_wp(pte)) |
| 2489 | categories |= PAGE_IS_WRITTEN; |
| 2490 | if (!PageAnon(pte_page(pte))) |
| 2491 | categories |= PAGE_IS_FILE; |
| 2492 | if (is_zero_pfn(pfn: pte_pfn(pte))) |
| 2493 | categories |= PAGE_IS_PFNZERO; |
| 2494 | if (pte_soft_dirty(pte)) |
| 2495 | categories |= PAGE_IS_SOFT_DIRTY; |
| 2496 | } else { |
| 2497 | categories |= PAGE_IS_SWAPPED; |
| 2498 | |
| 2499 | if (!pte_swp_uffd_wp_any(pte)) |
| 2500 | categories |= PAGE_IS_WRITTEN; |
| 2501 | if (pte_swp_soft_dirty(pte)) |
| 2502 | categories |= PAGE_IS_SOFT_DIRTY; |
| 2503 | } |
| 2504 | |
| 2505 | return categories; |
| 2506 | } |
| 2507 | |
| 2508 | static void make_uffd_wp_huge_pte(struct vm_area_struct *vma, |
| 2509 | unsigned long addr, pte_t *ptep, |
| 2510 | pte_t ptent) |
| 2511 | { |
| 2512 | const unsigned long psize = huge_page_size(h: hstate_vma(vma)); |
| 2513 | softleaf_t entry; |
| 2514 | |
| 2515 | if (huge_pte_none(pte: ptent)) { |
| 2516 | set_huge_pte_at(mm: vma->vm_mm, addr, ptep, |
| 2517 | pte: make_pte_marker(PTE_MARKER_UFFD_WP), sz: psize); |
| 2518 | return; |
| 2519 | } |
| 2520 | |
| 2521 | entry = softleaf_from_pte(pte: ptent); |
| 2522 | if (softleaf_is_hwpoison(entry) || softleaf_is_marker(entry)) |
| 2523 | return; |
| 2524 | |
| 2525 | if (softleaf_is_migration(entry)) |
| 2526 | set_huge_pte_at(mm: vma->vm_mm, addr, ptep, |
| 2527 | pte: pte_swp_mkuffd_wp(pte: ptent), sz: psize); |
| 2528 | else |
| 2529 | huge_ptep_modify_prot_commit(vma, addr, ptep, old_pte: ptent, |
| 2530 | pte: huge_pte_mkuffd_wp(pte: ptent)); |
| 2531 | } |
| 2532 | #endif /* CONFIG_HUGETLB_PAGE */ |
| 2533 | |
| 2534 | #if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLB_PAGE) |
| 2535 | static void pagemap_scan_backout_range(struct pagemap_scan_private *p, |
| 2536 | unsigned long addr, unsigned long end) |
| 2537 | { |
| 2538 | struct page_region *cur_buf = &p->vec_buf[p->vec_buf_index]; |
| 2539 | |
| 2540 | if (!p->vec_buf) |
| 2541 | return; |
| 2542 | |
| 2543 | if (cur_buf->start != addr) |
| 2544 | cur_buf->end = addr; |
| 2545 | else |
| 2546 | cur_buf->start = cur_buf->end = 0; |
| 2547 | |
| 2548 | p->found_pages -= (end - addr) / PAGE_SIZE; |
| 2549 | } |
| 2550 | #endif |
| 2551 | |
| 2552 | static bool pagemap_scan_is_interesting_page(unsigned long categories, |
| 2553 | const struct pagemap_scan_private *p) |
| 2554 | { |
| 2555 | categories ^= p->arg.category_inverted; |
| 2556 | if ((categories & p->arg.category_mask) != p->arg.category_mask) |
| 2557 | return false; |
| 2558 | if (p->arg.category_anyof_mask && !(categories & p->arg.category_anyof_mask)) |
| 2559 | return false; |
| 2560 | |
| 2561 | return true; |
| 2562 | } |
| 2563 | |
| 2564 | static bool pagemap_scan_is_interesting_vma(unsigned long categories, |
| 2565 | const struct pagemap_scan_private *p) |
| 2566 | { |
| 2567 | unsigned long required = p->arg.category_mask & PAGE_IS_WPALLOWED; |
| 2568 | |
| 2569 | categories ^= p->arg.category_inverted; |
| 2570 | if ((categories & required) != required) |
| 2571 | return false; |
| 2572 | |
| 2573 | return true; |
| 2574 | } |
| 2575 | |
| 2576 | static int pagemap_scan_test_walk(unsigned long start, unsigned long end, |
| 2577 | struct mm_walk *walk) |
| 2578 | { |
| 2579 | struct pagemap_scan_private *p = walk->private; |
| 2580 | struct vm_area_struct *vma = walk->vma; |
| 2581 | unsigned long vma_category = 0; |
| 2582 | bool wp_allowed = userfaultfd_wp_async(vma) && |
| 2583 | userfaultfd_wp_use_markers(vma); |
| 2584 | |
| 2585 | if (!wp_allowed) { |
| 2586 | /* User requested explicit failure over wp-async capability */ |
| 2587 | if (p->arg.flags & PM_SCAN_CHECK_WPASYNC) |
| 2588 | return -EPERM; |
| 2589 | /* |
| 2590 | * User requires wr-protect, and allows silently skipping |
| 2591 | * unsupported vmas. |
| 2592 | */ |
| 2593 | if (p->arg.flags & PM_SCAN_WP_MATCHING) |
| 2594 | return 1; |
| 2595 | /* |
| 2596 | * Then the request doesn't involve wr-protects at all, |
| 2597 | * fall through to the rest checks, and allow vma walk. |
| 2598 | */ |
| 2599 | } |
| 2600 | |
| 2601 | if (vma->vm_flags & VM_PFNMAP) |
| 2602 | return 1; |
| 2603 | |
| 2604 | if (wp_allowed) |
| 2605 | vma_category |= PAGE_IS_WPALLOWED; |
| 2606 | |
| 2607 | if (vma->vm_flags & VM_SOFTDIRTY) |
| 2608 | vma_category |= PAGE_IS_SOFT_DIRTY; |
| 2609 | |
| 2610 | if (!pagemap_scan_is_interesting_vma(categories: vma_category, p)) |
| 2611 | return 1; |
| 2612 | |
| 2613 | p->cur_vma_category = vma_category; |
| 2614 | |
| 2615 | return 0; |
| 2616 | } |
| 2617 | |
| 2618 | static bool pagemap_scan_push_range(unsigned long categories, |
| 2619 | struct pagemap_scan_private *p, |
| 2620 | unsigned long addr, unsigned long end) |
| 2621 | { |
| 2622 | struct page_region *cur_buf = &p->vec_buf[p->vec_buf_index]; |
| 2623 | |
| 2624 | /* |
| 2625 | * When there is no output buffer provided at all, the sentinel values |
| 2626 | * won't match here. There is no other way for `cur_buf->end` to be |
| 2627 | * non-zero other than it being non-empty. |
| 2628 | */ |
| 2629 | if (addr == cur_buf->end && categories == cur_buf->categories) { |
| 2630 | cur_buf->end = end; |
| 2631 | return true; |
| 2632 | } |
| 2633 | |
| 2634 | if (cur_buf->end) { |
| 2635 | if (p->vec_buf_index >= p->vec_buf_len - 1) |
| 2636 | return false; |
| 2637 | |
| 2638 | cur_buf = &p->vec_buf[++p->vec_buf_index]; |
| 2639 | } |
| 2640 | |
| 2641 | cur_buf->start = addr; |
| 2642 | cur_buf->end = end; |
| 2643 | cur_buf->categories = categories; |
| 2644 | |
| 2645 | return true; |
| 2646 | } |
| 2647 | |
| 2648 | static int pagemap_scan_output(unsigned long categories, |
| 2649 | struct pagemap_scan_private *p, |
| 2650 | unsigned long addr, unsigned long *end) |
| 2651 | { |
| 2652 | unsigned long n_pages, total_pages; |
| 2653 | int ret = 0; |
| 2654 | |
| 2655 | if (!p->vec_buf) |
| 2656 | return 0; |
| 2657 | |
| 2658 | categories &= p->arg.return_mask; |
| 2659 | |
| 2660 | n_pages = (*end - addr) / PAGE_SIZE; |
| 2661 | if (check_add_overflow(p->found_pages, n_pages, &total_pages) || |
| 2662 | total_pages > p->arg.max_pages) { |
| 2663 | size_t n_too_much = total_pages - p->arg.max_pages; |
| 2664 | *end -= n_too_much * PAGE_SIZE; |
| 2665 | n_pages -= n_too_much; |
| 2666 | ret = -ENOSPC; |
| 2667 | } |
| 2668 | |
| 2669 | if (!pagemap_scan_push_range(categories, p, addr, end: *end)) { |
| 2670 | *end = addr; |
| 2671 | n_pages = 0; |
| 2672 | ret = -ENOSPC; |
| 2673 | } |
| 2674 | |
| 2675 | p->found_pages += n_pages; |
| 2676 | if (ret) |
| 2677 | p->arg.walk_end = *end; |
| 2678 | |
| 2679 | return ret; |
| 2680 | } |
| 2681 | |
| 2682 | static int pagemap_scan_thp_entry(pmd_t *pmd, unsigned long start, |
| 2683 | unsigned long end, struct mm_walk *walk) |
| 2684 | { |
| 2685 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE |
| 2686 | struct pagemap_scan_private *p = walk->private; |
| 2687 | struct vm_area_struct *vma = walk->vma; |
| 2688 | unsigned long categories; |
| 2689 | spinlock_t *ptl; |
| 2690 | int ret = 0; |
| 2691 | |
| 2692 | ptl = pmd_trans_huge_lock(pmd, vma); |
| 2693 | if (!ptl) |
| 2694 | return -ENOENT; |
| 2695 | |
| 2696 | categories = p->cur_vma_category | |
| 2697 | pagemap_thp_category(p, vma, addr: start, pmd: *pmd); |
| 2698 | |
| 2699 | if (!pagemap_scan_is_interesting_page(categories, p)) |
| 2700 | goto out_unlock; |
| 2701 | |
| 2702 | ret = pagemap_scan_output(categories, p, addr: start, end: &end); |
| 2703 | if (start == end) |
| 2704 | goto out_unlock; |
| 2705 | |
| 2706 | if (~p->arg.flags & PM_SCAN_WP_MATCHING) |
| 2707 | goto out_unlock; |
| 2708 | if (~categories & PAGE_IS_WRITTEN) |
| 2709 | goto out_unlock; |
| 2710 | |
| 2711 | /* |
| 2712 | * Break huge page into small pages if the WP operation |
| 2713 | * needs to be performed on a portion of the huge page. |
| 2714 | */ |
| 2715 | if (end != start + HPAGE_SIZE) { |
| 2716 | spin_unlock(lock: ptl); |
| 2717 | split_huge_pmd(vma, pmd, start); |
| 2718 | pagemap_scan_backout_range(p, addr: start, end); |
| 2719 | /* Report as if there was no THP */ |
| 2720 | return -ENOENT; |
| 2721 | } |
| 2722 | |
| 2723 | make_uffd_wp_pmd(vma, addr: start, pmdp: pmd); |
| 2724 | flush_tlb_range(vma, start, end); |
| 2725 | out_unlock: |
| 2726 | spin_unlock(lock: ptl); |
| 2727 | return ret; |
| 2728 | #else /* !CONFIG_TRANSPARENT_HUGEPAGE */ |
| 2729 | return -ENOENT; |
| 2730 | #endif |
| 2731 | } |
| 2732 | |
| 2733 | static int pagemap_scan_pmd_entry(pmd_t *pmd, unsigned long start, |
| 2734 | unsigned long end, struct mm_walk *walk) |
| 2735 | { |
| 2736 | struct pagemap_scan_private *p = walk->private; |
| 2737 | struct vm_area_struct *vma = walk->vma; |
| 2738 | unsigned long addr, flush_end = 0; |
| 2739 | pte_t *pte, *start_pte; |
| 2740 | spinlock_t *ptl; |
| 2741 | int ret; |
| 2742 | |
| 2743 | ret = pagemap_scan_thp_entry(pmd, start, end, walk); |
| 2744 | if (ret != -ENOENT) |
| 2745 | return ret; |
| 2746 | |
| 2747 | ret = 0; |
| 2748 | start_pte = pte = pte_offset_map_lock(mm: vma->vm_mm, pmd, addr: start, ptlp: &ptl); |
| 2749 | if (!pte) { |
| 2750 | walk->action = ACTION_AGAIN; |
| 2751 | return 0; |
| 2752 | } |
| 2753 | |
| 2754 | arch_enter_lazy_mmu_mode(); |
| 2755 | |
| 2756 | if ((p->arg.flags & PM_SCAN_WP_MATCHING) && !p->vec_out) { |
| 2757 | /* Fast path for performing exclusive WP */ |
| 2758 | for (addr = start; addr != end; pte++, addr += PAGE_SIZE) { |
| 2759 | pte_t ptent = ptep_get(ptep: pte); |
| 2760 | |
| 2761 | if ((pte_present(a: ptent) && pte_uffd_wp(pte: ptent)) || |
| 2762 | pte_swp_uffd_wp_any(pte: ptent)) |
| 2763 | continue; |
| 2764 | make_uffd_wp_pte(vma, addr, pte, ptent); |
| 2765 | if (!flush_end) |
| 2766 | start = addr; |
| 2767 | flush_end = addr + PAGE_SIZE; |
| 2768 | } |
| 2769 | goto flush_and_return; |
| 2770 | } |
| 2771 | |
| 2772 | if (!p->arg.category_anyof_mask && !p->arg.category_inverted && |
| 2773 | p->arg.category_mask == PAGE_IS_WRITTEN && |
| 2774 | p->arg.return_mask == PAGE_IS_WRITTEN) { |
| 2775 | for (addr = start; addr < end; pte++, addr += PAGE_SIZE) { |
| 2776 | unsigned long next = addr + PAGE_SIZE; |
| 2777 | pte_t ptent = ptep_get(ptep: pte); |
| 2778 | |
| 2779 | if ((pte_present(a: ptent) && pte_uffd_wp(pte: ptent)) || |
| 2780 | pte_swp_uffd_wp_any(pte: ptent)) |
| 2781 | continue; |
| 2782 | ret = pagemap_scan_output(categories: p->cur_vma_category | PAGE_IS_WRITTEN, |
| 2783 | p, addr, end: &next); |
| 2784 | if (next == addr) |
| 2785 | break; |
| 2786 | if (~p->arg.flags & PM_SCAN_WP_MATCHING) |
| 2787 | continue; |
| 2788 | make_uffd_wp_pte(vma, addr, pte, ptent); |
| 2789 | if (!flush_end) |
| 2790 | start = addr; |
| 2791 | flush_end = next; |
| 2792 | } |
| 2793 | goto flush_and_return; |
| 2794 | } |
| 2795 | |
| 2796 | for (addr = start; addr != end; pte++, addr += PAGE_SIZE) { |
| 2797 | pte_t ptent = ptep_get(ptep: pte); |
| 2798 | unsigned long categories = p->cur_vma_category | |
| 2799 | pagemap_page_category(p, vma, addr, pte: ptent); |
| 2800 | unsigned long next = addr + PAGE_SIZE; |
| 2801 | |
| 2802 | if (!pagemap_scan_is_interesting_page(categories, p)) |
| 2803 | continue; |
| 2804 | |
| 2805 | ret = pagemap_scan_output(categories, p, addr, end: &next); |
| 2806 | if (next == addr) |
| 2807 | break; |
| 2808 | |
| 2809 | if (~p->arg.flags & PM_SCAN_WP_MATCHING) |
| 2810 | continue; |
| 2811 | if (~categories & PAGE_IS_WRITTEN) |
| 2812 | continue; |
| 2813 | |
| 2814 | make_uffd_wp_pte(vma, addr, pte, ptent); |
| 2815 | if (!flush_end) |
| 2816 | start = addr; |
| 2817 | flush_end = next; |
| 2818 | } |
| 2819 | |
| 2820 | flush_and_return: |
| 2821 | if (flush_end) |
| 2822 | flush_tlb_range(vma, start, addr); |
| 2823 | |
| 2824 | arch_leave_lazy_mmu_mode(); |
| 2825 | pte_unmap_unlock(start_pte, ptl); |
| 2826 | |
| 2827 | cond_resched(); |
| 2828 | return ret; |
| 2829 | } |
| 2830 | |
| 2831 | #ifdef CONFIG_HUGETLB_PAGE |
| 2832 | static int pagemap_scan_hugetlb_entry(pte_t *ptep, unsigned long hmask, |
| 2833 | unsigned long start, unsigned long end, |
| 2834 | struct mm_walk *walk) |
| 2835 | { |
| 2836 | struct pagemap_scan_private *p = walk->private; |
| 2837 | struct vm_area_struct *vma = walk->vma; |
| 2838 | unsigned long categories; |
| 2839 | spinlock_t *ptl; |
| 2840 | int ret = 0; |
| 2841 | pte_t pte; |
| 2842 | |
| 2843 | if (~p->arg.flags & PM_SCAN_WP_MATCHING) { |
| 2844 | /* Go the short route when not write-protecting pages. */ |
| 2845 | |
| 2846 | pte = huge_ptep_get(mm: walk->mm, addr: start, ptep); |
| 2847 | categories = p->cur_vma_category | pagemap_hugetlb_category(pte); |
| 2848 | |
| 2849 | if (!pagemap_scan_is_interesting_page(categories, p)) |
| 2850 | return 0; |
| 2851 | |
| 2852 | return pagemap_scan_output(categories, p, addr: start, end: &end); |
| 2853 | } |
| 2854 | |
| 2855 | i_mmap_lock_write(mapping: vma->vm_file->f_mapping); |
| 2856 | ptl = huge_pte_lock(h: hstate_vma(vma), mm: vma->vm_mm, pte: ptep); |
| 2857 | |
| 2858 | pte = huge_ptep_get(mm: walk->mm, addr: start, ptep); |
| 2859 | categories = p->cur_vma_category | pagemap_hugetlb_category(pte); |
| 2860 | |
| 2861 | if (!pagemap_scan_is_interesting_page(categories, p)) |
| 2862 | goto out_unlock; |
| 2863 | |
| 2864 | ret = pagemap_scan_output(categories, p, addr: start, end: &end); |
| 2865 | if (start == end) |
| 2866 | goto out_unlock; |
| 2867 | |
| 2868 | if (~categories & PAGE_IS_WRITTEN) |
| 2869 | goto out_unlock; |
| 2870 | |
| 2871 | if (end != start + HPAGE_SIZE) { |
| 2872 | /* Partial HugeTLB page WP isn't possible. */ |
| 2873 | pagemap_scan_backout_range(p, addr: start, end); |
| 2874 | p->arg.walk_end = start; |
| 2875 | ret = 0; |
| 2876 | goto out_unlock; |
| 2877 | } |
| 2878 | |
| 2879 | make_uffd_wp_huge_pte(vma, addr: start, ptep, ptent: pte); |
| 2880 | flush_hugetlb_tlb_range(vma, start, end); |
| 2881 | |
| 2882 | out_unlock: |
| 2883 | spin_unlock(lock: ptl); |
| 2884 | i_mmap_unlock_write(mapping: vma->vm_file->f_mapping); |
| 2885 | |
| 2886 | return ret; |
| 2887 | } |
| 2888 | #else |
| 2889 | #define pagemap_scan_hugetlb_entry NULL |
| 2890 | #endif |
| 2891 | |
| 2892 | static int pagemap_scan_pte_hole(unsigned long addr, unsigned long end, |
| 2893 | int depth, struct mm_walk *walk) |
| 2894 | { |
| 2895 | struct pagemap_scan_private *p = walk->private; |
| 2896 | struct vm_area_struct *vma = walk->vma; |
| 2897 | int ret, err; |
| 2898 | |
| 2899 | if (!vma || !pagemap_scan_is_interesting_page(categories: p->cur_vma_category, p)) |
| 2900 | return 0; |
| 2901 | |
| 2902 | ret = pagemap_scan_output(categories: p->cur_vma_category, p, addr, end: &end); |
| 2903 | if (addr == end) |
| 2904 | return ret; |
| 2905 | |
| 2906 | if (~p->arg.flags & PM_SCAN_WP_MATCHING) |
| 2907 | return ret; |
| 2908 | |
| 2909 | err = uffd_wp_range(vma, start: addr, len: end - addr, enable_wp: true); |
| 2910 | if (err < 0) |
| 2911 | ret = err; |
| 2912 | |
| 2913 | return ret; |
| 2914 | } |
| 2915 | |
| 2916 | static const struct mm_walk_ops pagemap_scan_ops = { |
| 2917 | .test_walk = pagemap_scan_test_walk, |
| 2918 | .pmd_entry = pagemap_scan_pmd_entry, |
| 2919 | .pte_hole = pagemap_scan_pte_hole, |
| 2920 | .hugetlb_entry = pagemap_scan_hugetlb_entry, |
| 2921 | }; |
| 2922 | |
| 2923 | static int pagemap_scan_get_args(struct pm_scan_arg *arg, |
| 2924 | unsigned long uarg) |
| 2925 | { |
| 2926 | if (copy_from_user(to: arg, from: (void __user *)uarg, n: sizeof(*arg))) |
| 2927 | return -EFAULT; |
| 2928 | |
| 2929 | if (arg->size != sizeof(struct pm_scan_arg)) |
| 2930 | return -EINVAL; |
| 2931 | |
| 2932 | /* Validate requested features */ |
| 2933 | if (arg->flags & ~PM_SCAN_FLAGS) |
| 2934 | return -EINVAL; |
| 2935 | if ((arg->category_inverted | arg->category_mask | |
| 2936 | arg->category_anyof_mask | arg->return_mask) & ~PM_SCAN_CATEGORIES) |
| 2937 | return -EINVAL; |
| 2938 | |
| 2939 | arg->start = untagged_addr((unsigned long)arg->start); |
| 2940 | arg->end = untagged_addr((unsigned long)arg->end); |
| 2941 | arg->vec = untagged_addr((unsigned long)arg->vec); |
| 2942 | |
| 2943 | /* Validate memory pointers */ |
| 2944 | if (!IS_ALIGNED(arg->start, PAGE_SIZE)) |
| 2945 | return -EINVAL; |
| 2946 | if (!access_ok((void __user *)(long)arg->start, arg->end - arg->start)) |
| 2947 | return -EFAULT; |
| 2948 | if (!arg->vec && arg->vec_len) |
| 2949 | return -EINVAL; |
| 2950 | if (UINT_MAX == SIZE_MAX && arg->vec_len > SIZE_MAX) |
| 2951 | return -EINVAL; |
| 2952 | if (arg->vec && !access_ok((void __user *)(long)arg->vec, |
| 2953 | size_mul(arg->vec_len, sizeof(struct page_region)))) |
| 2954 | return -EFAULT; |
| 2955 | |
| 2956 | /* Fixup default values */ |
| 2957 | arg->end = ALIGN(arg->end, PAGE_SIZE); |
| 2958 | arg->walk_end = 0; |
| 2959 | if (!arg->max_pages) |
| 2960 | arg->max_pages = ULONG_MAX; |
| 2961 | |
| 2962 | return 0; |
| 2963 | } |
| 2964 | |
| 2965 | static int pagemap_scan_writeback_args(struct pm_scan_arg *arg, |
| 2966 | unsigned long uargl) |
| 2967 | { |
| 2968 | struct pm_scan_arg __user *uarg = (void __user *)uargl; |
| 2969 | |
| 2970 | if (copy_to_user(to: &uarg->walk_end, from: &arg->walk_end, n: sizeof(arg->walk_end))) |
| 2971 | return -EFAULT; |
| 2972 | |
| 2973 | return 0; |
| 2974 | } |
| 2975 | |
| 2976 | static int pagemap_scan_init_bounce_buffer(struct pagemap_scan_private *p) |
| 2977 | { |
| 2978 | if (!p->arg.vec_len) |
| 2979 | return 0; |
| 2980 | |
| 2981 | p->vec_buf_len = min_t(size_t, PAGEMAP_WALK_SIZE >> PAGE_SHIFT, |
| 2982 | p->arg.vec_len); |
| 2983 | p->vec_buf = kmalloc_array(p->vec_buf_len, sizeof(*p->vec_buf), |
| 2984 | GFP_KERNEL); |
| 2985 | if (!p->vec_buf) |
| 2986 | return -ENOMEM; |
| 2987 | |
| 2988 | p->vec_buf->start = p->vec_buf->end = 0; |
| 2989 | p->vec_out = (struct page_region __user *)(long)p->arg.vec; |
| 2990 | |
| 2991 | return 0; |
| 2992 | } |
| 2993 | |
| 2994 | static long pagemap_scan_flush_buffer(struct pagemap_scan_private *p) |
| 2995 | { |
| 2996 | const struct page_region *buf = p->vec_buf; |
| 2997 | long n = p->vec_buf_index; |
| 2998 | |
| 2999 | if (!p->vec_buf) |
| 3000 | return 0; |
| 3001 | |
| 3002 | if (buf[n].end != buf[n].start) |
| 3003 | n++; |
| 3004 | |
| 3005 | if (!n) |
| 3006 | return 0; |
| 3007 | |
| 3008 | if (copy_to_user(to: p->vec_out, from: buf, n: n * sizeof(*buf))) |
| 3009 | return -EFAULT; |
| 3010 | |
| 3011 | p->arg.vec_len -= n; |
| 3012 | p->vec_out += n; |
| 3013 | |
| 3014 | p->vec_buf_index = 0; |
| 3015 | p->vec_buf_len = min_t(size_t, p->vec_buf_len, p->arg.vec_len); |
| 3016 | p->vec_buf->start = p->vec_buf->end = 0; |
| 3017 | |
| 3018 | return n; |
| 3019 | } |
| 3020 | |
| 3021 | static long do_pagemap_scan(struct mm_struct *mm, unsigned long uarg) |
| 3022 | { |
| 3023 | struct pagemap_scan_private p = {0}; |
| 3024 | unsigned long walk_start; |
| 3025 | size_t n_ranges_out = 0; |
| 3026 | int ret; |
| 3027 | |
| 3028 | ret = pagemap_scan_get_args(arg: &p.arg, uarg); |
| 3029 | if (ret) |
| 3030 | return ret; |
| 3031 | |
| 3032 | p.masks_of_interest = p.arg.category_mask | p.arg.category_anyof_mask | |
| 3033 | p.arg.return_mask; |
| 3034 | ret = pagemap_scan_init_bounce_buffer(p: &p); |
| 3035 | if (ret) |
| 3036 | return ret; |
| 3037 | |
| 3038 | for (walk_start = p.arg.start; walk_start < p.arg.end; |
| 3039 | walk_start = p.arg.walk_end) { |
| 3040 | struct mmu_notifier_range range; |
| 3041 | long n_out; |
| 3042 | |
| 3043 | if (fatal_signal_pending(current)) { |
| 3044 | ret = -EINTR; |
| 3045 | break; |
| 3046 | } |
| 3047 | |
| 3048 | ret = mmap_read_lock_killable(mm); |
| 3049 | if (ret) |
| 3050 | break; |
| 3051 | |
| 3052 | /* Protection change for the range is going to happen. */ |
| 3053 | if (p.arg.flags & PM_SCAN_WP_MATCHING) { |
| 3054 | mmu_notifier_range_init(range: &range, event: MMU_NOTIFY_PROTECTION_VMA, flags: 0, |
| 3055 | mm, start: walk_start, end: p.arg.end); |
| 3056 | mmu_notifier_invalidate_range_start(range: &range); |
| 3057 | } |
| 3058 | |
| 3059 | ret = walk_page_range(mm, start: walk_start, end: p.arg.end, |
| 3060 | ops: &pagemap_scan_ops, private: &p); |
| 3061 | |
| 3062 | if (p.arg.flags & PM_SCAN_WP_MATCHING) |
| 3063 | mmu_notifier_invalidate_range_end(range: &range); |
| 3064 | |
| 3065 | mmap_read_unlock(mm); |
| 3066 | |
| 3067 | n_out = pagemap_scan_flush_buffer(p: &p); |
| 3068 | if (n_out < 0) |
| 3069 | ret = n_out; |
| 3070 | else |
| 3071 | n_ranges_out += n_out; |
| 3072 | |
| 3073 | if (ret != -ENOSPC) |
| 3074 | break; |
| 3075 | |
| 3076 | if (p.arg.vec_len == 0 || p.found_pages == p.arg.max_pages) |
| 3077 | break; |
| 3078 | } |
| 3079 | |
| 3080 | /* ENOSPC signifies early stop (buffer full) from the walk. */ |
| 3081 | if (!ret || ret == -ENOSPC) |
| 3082 | ret = n_ranges_out; |
| 3083 | |
| 3084 | /* The walk_end isn't set when ret is zero */ |
| 3085 | if (!p.arg.walk_end) |
| 3086 | p.arg.walk_end = p.arg.end; |
| 3087 | if (pagemap_scan_writeback_args(arg: &p.arg, uargl: uarg)) |
| 3088 | ret = -EFAULT; |
| 3089 | |
| 3090 | kfree(objp: p.vec_buf); |
| 3091 | return ret; |
| 3092 | } |
| 3093 | |
| 3094 | static long do_pagemap_cmd(struct file *file, unsigned int cmd, |
| 3095 | unsigned long arg) |
| 3096 | { |
| 3097 | struct mm_struct *mm = file->private_data; |
| 3098 | |
| 3099 | switch (cmd) { |
| 3100 | case PAGEMAP_SCAN: |
| 3101 | return do_pagemap_scan(mm, uarg: arg); |
| 3102 | |
| 3103 | default: |
| 3104 | return -EINVAL; |
| 3105 | } |
| 3106 | } |
| 3107 | |
| 3108 | const struct file_operations proc_pagemap_operations = { |
| 3109 | .llseek = mem_lseek, /* borrow this */ |
| 3110 | .read = pagemap_read, |
| 3111 | .open = pagemap_open, |
| 3112 | .release = pagemap_release, |
| 3113 | .unlocked_ioctl = do_pagemap_cmd, |
| 3114 | .compat_ioctl = do_pagemap_cmd, |
| 3115 | }; |
| 3116 | #endif /* CONFIG_PROC_PAGE_MONITOR */ |
| 3117 | |
| 3118 | #ifdef CONFIG_NUMA |
| 3119 | |
| 3120 | struct numa_maps { |
| 3121 | unsigned long pages; |
| 3122 | unsigned long anon; |
| 3123 | unsigned long active; |
| 3124 | unsigned long writeback; |
| 3125 | unsigned long mapcount_max; |
| 3126 | unsigned long dirty; |
| 3127 | unsigned long swapcache; |
| 3128 | unsigned long node[MAX_NUMNODES]; |
| 3129 | }; |
| 3130 | |
| 3131 | struct numa_maps_private { |
| 3132 | struct proc_maps_private proc_maps; |
| 3133 | struct numa_maps md; |
| 3134 | }; |
| 3135 | |
| 3136 | static void gather_stats(struct page *page, struct numa_maps *md, int pte_dirty, |
| 3137 | unsigned long nr_pages) |
| 3138 | { |
| 3139 | struct folio *folio = page_folio(page); |
| 3140 | int count; |
| 3141 | |
| 3142 | if (IS_ENABLED(CONFIG_PAGE_MAPCOUNT)) |
| 3143 | count = folio_precise_page_mapcount(folio, page); |
| 3144 | else |
| 3145 | count = folio_average_page_mapcount(folio); |
| 3146 | |
| 3147 | md->pages += nr_pages; |
| 3148 | if (pte_dirty || folio_test_dirty(folio)) |
| 3149 | md->dirty += nr_pages; |
| 3150 | |
| 3151 | if (folio_test_swapcache(folio)) |
| 3152 | md->swapcache += nr_pages; |
| 3153 | |
| 3154 | if (folio_test_active(folio) || folio_test_unevictable(folio)) |
| 3155 | md->active += nr_pages; |
| 3156 | |
| 3157 | if (folio_test_writeback(folio)) |
| 3158 | md->writeback += nr_pages; |
| 3159 | |
| 3160 | if (folio_test_anon(folio)) |
| 3161 | md->anon += nr_pages; |
| 3162 | |
| 3163 | if (count > md->mapcount_max) |
| 3164 | md->mapcount_max = count; |
| 3165 | |
| 3166 | md->node[folio_nid(folio)] += nr_pages; |
| 3167 | } |
| 3168 | |
| 3169 | static struct page *can_gather_numa_stats(pte_t pte, struct vm_area_struct *vma, |
| 3170 | unsigned long addr) |
| 3171 | { |
| 3172 | struct page *page; |
| 3173 | int nid; |
| 3174 | |
| 3175 | if (!pte_present(a: pte)) |
| 3176 | return NULL; |
| 3177 | |
| 3178 | page = vm_normal_page(vma, addr, pte); |
| 3179 | if (!page || is_zone_device_page(page)) |
| 3180 | return NULL; |
| 3181 | |
| 3182 | if (PageReserved(page)) |
| 3183 | return NULL; |
| 3184 | |
| 3185 | nid = page_to_nid(page); |
| 3186 | if (!node_isset(nid, node_states[N_MEMORY])) |
| 3187 | return NULL; |
| 3188 | |
| 3189 | return page; |
| 3190 | } |
| 3191 | |
| 3192 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE |
| 3193 | static struct page *can_gather_numa_stats_pmd(pmd_t pmd, |
| 3194 | struct vm_area_struct *vma, |
| 3195 | unsigned long addr) |
| 3196 | { |
| 3197 | struct page *page; |
| 3198 | int nid; |
| 3199 | |
| 3200 | if (!pmd_present(pmd)) |
| 3201 | return NULL; |
| 3202 | |
| 3203 | page = vm_normal_page_pmd(vma, addr, pmd); |
| 3204 | if (!page) |
| 3205 | return NULL; |
| 3206 | |
| 3207 | if (PageReserved(page)) |
| 3208 | return NULL; |
| 3209 | |
| 3210 | nid = page_to_nid(page); |
| 3211 | if (!node_isset(nid, node_states[N_MEMORY])) |
| 3212 | return NULL; |
| 3213 | |
| 3214 | return page; |
| 3215 | } |
| 3216 | #endif |
| 3217 | |
| 3218 | static int gather_pte_stats(pmd_t *pmd, unsigned long addr, |
| 3219 | unsigned long end, struct mm_walk *walk) |
| 3220 | { |
| 3221 | struct numa_maps *md = walk->private; |
| 3222 | struct vm_area_struct *vma = walk->vma; |
| 3223 | spinlock_t *ptl; |
| 3224 | pte_t *orig_pte; |
| 3225 | pte_t *pte; |
| 3226 | |
| 3227 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE |
| 3228 | ptl = pmd_trans_huge_lock(pmd, vma); |
| 3229 | if (ptl) { |
| 3230 | struct page *page; |
| 3231 | |
| 3232 | page = can_gather_numa_stats_pmd(pmd: *pmd, vma, addr); |
| 3233 | if (page) |
| 3234 | gather_stats(page, md, pmd_dirty(pmd: *pmd), |
| 3235 | HPAGE_PMD_SIZE/PAGE_SIZE); |
| 3236 | spin_unlock(lock: ptl); |
| 3237 | return 0; |
| 3238 | } |
| 3239 | #endif |
| 3240 | orig_pte = pte = pte_offset_map_lock(mm: walk->mm, pmd, addr, ptlp: &ptl); |
| 3241 | if (!pte) { |
| 3242 | walk->action = ACTION_AGAIN; |
| 3243 | return 0; |
| 3244 | } |
| 3245 | do { |
| 3246 | pte_t ptent = ptep_get(ptep: pte); |
| 3247 | struct page *page = can_gather_numa_stats(pte: ptent, vma, addr); |
| 3248 | if (!page) |
| 3249 | continue; |
| 3250 | gather_stats(page, md, pte_dirty: pte_dirty(pte: ptent), nr_pages: 1); |
| 3251 | |
| 3252 | } while (pte++, addr += PAGE_SIZE, addr != end); |
| 3253 | pte_unmap_unlock(orig_pte, ptl); |
| 3254 | cond_resched(); |
| 3255 | return 0; |
| 3256 | } |
| 3257 | #ifdef CONFIG_HUGETLB_PAGE |
| 3258 | static int gather_hugetlb_stats(pte_t *pte, unsigned long hmask, |
| 3259 | unsigned long addr, unsigned long end, struct mm_walk *walk) |
| 3260 | { |
| 3261 | pte_t huge_pte; |
| 3262 | struct numa_maps *md; |
| 3263 | struct page *page; |
| 3264 | spinlock_t *ptl; |
| 3265 | |
| 3266 | ptl = huge_pte_lock(h: hstate_vma(vma: walk->vma), mm: walk->mm, pte); |
| 3267 | huge_pte = huge_ptep_get(mm: walk->mm, addr, ptep: pte); |
| 3268 | if (!pte_present(a: huge_pte)) |
| 3269 | goto out; |
| 3270 | |
| 3271 | page = pte_page(huge_pte); |
| 3272 | |
| 3273 | md = walk->private; |
| 3274 | gather_stats(page, md, pte_dirty: pte_dirty(pte: huge_pte), nr_pages: 1); |
| 3275 | out: |
| 3276 | spin_unlock(lock: ptl); |
| 3277 | return 0; |
| 3278 | } |
| 3279 | |
| 3280 | #else |
| 3281 | static int gather_hugetlb_stats(pte_t *pte, unsigned long hmask, |
| 3282 | unsigned long addr, unsigned long end, struct mm_walk *walk) |
| 3283 | { |
| 3284 | return 0; |
| 3285 | } |
| 3286 | #endif |
| 3287 | |
| 3288 | static const struct mm_walk_ops show_numa_ops = { |
| 3289 | .hugetlb_entry = gather_hugetlb_stats, |
| 3290 | .pmd_entry = gather_pte_stats, |
| 3291 | .walk_lock = PGWALK_RDLOCK, |
| 3292 | }; |
| 3293 | |
| 3294 | /* |
| 3295 | * Display pages allocated per node and memory policy via /proc. |
| 3296 | */ |
| 3297 | static int show_numa_map(struct seq_file *m, void *v) |
| 3298 | { |
| 3299 | struct numa_maps_private *numa_priv = m->private; |
| 3300 | struct proc_maps_private *proc_priv = &numa_priv->proc_maps; |
| 3301 | struct vm_area_struct *vma = v; |
| 3302 | struct numa_maps *md = &numa_priv->md; |
| 3303 | struct file *file = vma->vm_file; |
| 3304 | struct mm_struct *mm = vma->vm_mm; |
| 3305 | char buffer[64]; |
| 3306 | struct mempolicy *pol; |
| 3307 | pgoff_t ilx; |
| 3308 | int nid; |
| 3309 | |
| 3310 | if (!mm) |
| 3311 | return 0; |
| 3312 | |
| 3313 | /* Ensure we start with an empty set of numa_maps statistics. */ |
| 3314 | memset(md, 0, sizeof(*md)); |
| 3315 | |
| 3316 | pol = __get_vma_policy(vma, addr: vma->vm_start, ilx: &ilx); |
| 3317 | if (pol) { |
| 3318 | mpol_to_str(buffer, maxlen: sizeof(buffer), pol); |
| 3319 | mpol_cond_put(pol); |
| 3320 | } else { |
| 3321 | mpol_to_str(buffer, maxlen: sizeof(buffer), pol: proc_priv->task_mempolicy); |
| 3322 | } |
| 3323 | |
| 3324 | seq_printf(m, fmt: "%08lx %s" , vma->vm_start, buffer); |
| 3325 | |
| 3326 | if (file) { |
| 3327 | seq_puts(m, s: " file=" ); |
| 3328 | seq_path(m, file_user_path(f: file), "\n\t= " ); |
| 3329 | } else if (vma_is_initial_heap(vma)) { |
| 3330 | seq_puts(m, s: " heap" ); |
| 3331 | } else if (vma_is_initial_stack(vma)) { |
| 3332 | seq_puts(m, s: " stack" ); |
| 3333 | } |
| 3334 | |
| 3335 | if (is_vm_hugetlb_page(vma)) |
| 3336 | seq_puts(m, s: " huge" ); |
| 3337 | |
| 3338 | /* mmap_lock is held by m_start */ |
| 3339 | walk_page_vma(vma, ops: &show_numa_ops, private: md); |
| 3340 | |
| 3341 | if (!md->pages) |
| 3342 | goto out; |
| 3343 | |
| 3344 | if (md->anon) |
| 3345 | seq_printf(m, fmt: " anon=%lu" , md->anon); |
| 3346 | |
| 3347 | if (md->dirty) |
| 3348 | seq_printf(m, fmt: " dirty=%lu" , md->dirty); |
| 3349 | |
| 3350 | if (md->pages != md->anon && md->pages != md->dirty) |
| 3351 | seq_printf(m, fmt: " mapped=%lu" , md->pages); |
| 3352 | |
| 3353 | if (md->mapcount_max > 1) |
| 3354 | seq_printf(m, fmt: " mapmax=%lu" , md->mapcount_max); |
| 3355 | |
| 3356 | if (md->swapcache) |
| 3357 | seq_printf(m, fmt: " swapcache=%lu" , md->swapcache); |
| 3358 | |
| 3359 | if (md->active < md->pages && !is_vm_hugetlb_page(vma)) |
| 3360 | seq_printf(m, fmt: " active=%lu" , md->active); |
| 3361 | |
| 3362 | if (md->writeback) |
| 3363 | seq_printf(m, fmt: " writeback=%lu" , md->writeback); |
| 3364 | |
| 3365 | for_each_node_state(nid, N_MEMORY) |
| 3366 | if (md->node[nid]) |
| 3367 | seq_printf(m, fmt: " N%d=%lu" , nid, md->node[nid]); |
| 3368 | |
| 3369 | seq_printf(m, fmt: " kernelpagesize_kB=%lu" , vma_kernel_pagesize(vma) >> 10); |
| 3370 | out: |
| 3371 | seq_putc(m, c: '\n'); |
| 3372 | return 0; |
| 3373 | } |
| 3374 | |
| 3375 | static const struct seq_operations proc_pid_numa_maps_op = { |
| 3376 | .start = m_start, |
| 3377 | .next = m_next, |
| 3378 | .stop = m_stop, |
| 3379 | .show = show_numa_map, |
| 3380 | }; |
| 3381 | |
| 3382 | static int pid_numa_maps_open(struct inode *inode, struct file *file) |
| 3383 | { |
| 3384 | return proc_maps_open(inode, file, ops: &proc_pid_numa_maps_op, |
| 3385 | psize: sizeof(struct numa_maps_private)); |
| 3386 | } |
| 3387 | |
| 3388 | const struct file_operations proc_pid_numa_maps_operations = { |
| 3389 | .open = pid_numa_maps_open, |
| 3390 | .read = seq_read, |
| 3391 | .llseek = seq_lseek, |
| 3392 | .release = proc_map_release, |
| 3393 | }; |
| 3394 | |
| 3395 | #endif /* CONFIG_NUMA */ |
| 3396 | |