| 1 | // SPDX-License-Identifier: GPL-2.0 |
| 2 | |
| 3 | #include <linux/mm.h> |
| 4 | #include <linux/file.h> |
| 5 | #include <linux/fdtable.h> |
| 6 | #include <linux/fs_struct.h> |
| 7 | #include <linux/mount.h> |
| 8 | #include <linux/ptrace.h> |
| 9 | #include <linux/slab.h> |
| 10 | #include <linux/seq_file.h> |
| 11 | #include <linux/sched/mm.h> |
| 12 | |
| 13 | #include "internal.h" |
| 14 | |
| 15 | /* |
| 16 | * Logic: we've got two memory sums for each process, "shared", and |
| 17 | * "non-shared". Shared memory may get counted more than once, for |
| 18 | * each process that owns it. Non-shared memory is counted |
| 19 | * accurately. |
| 20 | */ |
| 21 | void task_mem(struct seq_file *m, struct mm_struct *mm) |
| 22 | { |
| 23 | VMA_ITERATOR(vmi, mm, 0); |
| 24 | struct vm_area_struct *vma; |
| 25 | struct vm_region *region; |
| 26 | unsigned long bytes = 0, sbytes = 0, slack = 0, size; |
| 27 | |
| 28 | mmap_read_lock(mm); |
| 29 | for_each_vma(vmi, vma) { |
| 30 | bytes += kobjsize(vma); |
| 31 | |
| 32 | region = vma->vm_region; |
| 33 | if (region) { |
| 34 | size = kobjsize(region); |
| 35 | size += region->vm_end - region->vm_start; |
| 36 | } else { |
| 37 | size = vma->vm_end - vma->vm_start; |
| 38 | } |
| 39 | |
| 40 | if (atomic_read(v: &mm->mm_count) > 1 || |
| 41 | is_nommu_shared_mapping(vma->vm_flags)) { |
| 42 | sbytes += size; |
| 43 | } else { |
| 44 | bytes += size; |
| 45 | if (region) |
| 46 | slack = region->vm_end - vma->vm_end; |
| 47 | } |
| 48 | } |
| 49 | |
| 50 | if (atomic_read(v: &mm->mm_count) > 1) |
| 51 | sbytes += kobjsize(mm); |
| 52 | else |
| 53 | bytes += kobjsize(mm); |
| 54 | |
| 55 | if (current->fs && current->fs->users > 1) |
| 56 | sbytes += kobjsize(current->fs); |
| 57 | else |
| 58 | bytes += kobjsize(current->fs); |
| 59 | |
| 60 | if (current->files && atomic_read(v: ¤t->files->count) > 1) |
| 61 | sbytes += kobjsize(current->files); |
| 62 | else |
| 63 | bytes += kobjsize(current->files); |
| 64 | |
| 65 | if (current->sighand && refcount_read(r: ¤t->sighand->count) > 1) |
| 66 | sbytes += kobjsize(current->sighand); |
| 67 | else |
| 68 | bytes += kobjsize(current->sighand); |
| 69 | |
| 70 | bytes += kobjsize(current); /* includes kernel stack */ |
| 71 | |
| 72 | mmap_read_unlock(mm); |
| 73 | |
| 74 | seq_printf(m, |
| 75 | fmt: "Mem:\t%8lu bytes\n" |
| 76 | "Slack:\t%8lu bytes\n" |
| 77 | "Shared:\t%8lu bytes\n" , |
| 78 | bytes, slack, sbytes); |
| 79 | } |
| 80 | |
| 81 | unsigned long task_vsize(struct mm_struct *mm) |
| 82 | { |
| 83 | VMA_ITERATOR(vmi, mm, 0); |
| 84 | struct vm_area_struct *vma; |
| 85 | unsigned long vsize = 0; |
| 86 | |
| 87 | mmap_read_lock(mm); |
| 88 | for_each_vma(vmi, vma) |
| 89 | vsize += vma->vm_end - vma->vm_start; |
| 90 | mmap_read_unlock(mm); |
| 91 | return vsize; |
| 92 | } |
| 93 | |
| 94 | unsigned long task_statm(struct mm_struct *mm, |
| 95 | unsigned long *shared, unsigned long *text, |
| 96 | unsigned long *data, unsigned long *resident) |
| 97 | { |
| 98 | VMA_ITERATOR(vmi, mm, 0); |
| 99 | struct vm_area_struct *vma; |
| 100 | struct vm_region *region; |
| 101 | unsigned long size = kobjsize(mm); |
| 102 | |
| 103 | mmap_read_lock(mm); |
| 104 | for_each_vma(vmi, vma) { |
| 105 | size += kobjsize(vma); |
| 106 | region = vma->vm_region; |
| 107 | if (region) { |
| 108 | size += kobjsize(region); |
| 109 | size += region->vm_end - region->vm_start; |
| 110 | } |
| 111 | } |
| 112 | |
| 113 | *text = (PAGE_ALIGN(mm->end_code) - (mm->start_code & PAGE_MASK)) |
| 114 | >> PAGE_SHIFT; |
| 115 | *data = (PAGE_ALIGN(mm->start_stack) - (mm->start_data & PAGE_MASK)) |
| 116 | >> PAGE_SHIFT; |
| 117 | mmap_read_unlock(mm); |
| 118 | size >>= PAGE_SHIFT; |
| 119 | size += *text + *data; |
| 120 | *resident = size; |
| 121 | return size; |
| 122 | } |
| 123 | |
| 124 | /* |
| 125 | * display a single VMA to a sequenced file |
| 126 | */ |
| 127 | static int nommu_vma_show(struct seq_file *m, struct vm_area_struct *vma) |
| 128 | { |
| 129 | struct mm_struct *mm = vma->vm_mm; |
| 130 | unsigned long ino = 0; |
| 131 | struct file *file; |
| 132 | dev_t dev = 0; |
| 133 | int flags; |
| 134 | unsigned long long pgoff = 0; |
| 135 | |
| 136 | flags = vma->vm_flags; |
| 137 | file = vma->vm_file; |
| 138 | |
| 139 | if (file) { |
| 140 | struct inode *inode = file_inode(f: vma->vm_file); |
| 141 | dev = inode->i_sb->s_dev; |
| 142 | ino = inode->i_ino; |
| 143 | pgoff = (loff_t)vma->vm_pgoff << PAGE_SHIFT; |
| 144 | } |
| 145 | |
| 146 | seq_setwidth(m, size: 25 + sizeof(void *) * 6 - 1); |
| 147 | seq_printf(m, |
| 148 | fmt: "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu " , |
| 149 | vma->vm_start, |
| 150 | vma->vm_end, |
| 151 | flags & VM_READ ? 'r' : '-', |
| 152 | flags & VM_WRITE ? 'w' : '-', |
| 153 | flags & VM_EXEC ? 'x' : '-', |
| 154 | flags & VM_MAYSHARE ? flags & VM_SHARED ? 'S' : 's' : 'p', |
| 155 | pgoff, |
| 156 | MAJOR(dev), MINOR(dev), ino); |
| 157 | |
| 158 | if (file) { |
| 159 | seq_pad(m, c: ' '); |
| 160 | seq_path(m, file_user_path(f: file), "" ); |
| 161 | } else if (mm && vma_is_initial_stack(vma)) { |
| 162 | seq_pad(m, c: ' '); |
| 163 | seq_puts(m, s: "[stack]" ); |
| 164 | } |
| 165 | |
| 166 | seq_putc(m, c: '\n'); |
| 167 | return 0; |
| 168 | } |
| 169 | |
| 170 | /* |
| 171 | * display mapping lines for a particular process's /proc/pid/maps |
| 172 | */ |
| 173 | static int show_map(struct seq_file *m, void *_p) |
| 174 | { |
| 175 | return nommu_vma_show(m, vma: _p); |
| 176 | } |
| 177 | |
| 178 | static struct vm_area_struct *proc_get_vma(struct proc_maps_private *priv, |
| 179 | loff_t *ppos) |
| 180 | { |
| 181 | struct vm_area_struct *vma = vma_next(vmi: &priv->iter); |
| 182 | |
| 183 | if (vma) { |
| 184 | *ppos = vma->vm_start; |
| 185 | } else { |
| 186 | *ppos = -1UL; |
| 187 | } |
| 188 | |
| 189 | return vma; |
| 190 | } |
| 191 | |
| 192 | static void *m_start(struct seq_file *m, loff_t *ppos) |
| 193 | { |
| 194 | struct proc_maps_private *priv = m->private; |
| 195 | unsigned long last_addr = *ppos; |
| 196 | struct mm_struct *mm; |
| 197 | |
| 198 | /* See proc_get_vma(). Zero at the start or after lseek. */ |
| 199 | if (last_addr == -1UL) |
| 200 | return NULL; |
| 201 | |
| 202 | /* pin the task and mm whilst we play with them */ |
| 203 | priv->task = get_proc_task(inode: priv->inode); |
| 204 | if (!priv->task) |
| 205 | return ERR_PTR(error: -ESRCH); |
| 206 | |
| 207 | mm = priv->lock_ctx.mm; |
| 208 | if (!mm || !mmget_not_zero(mm)) { |
| 209 | put_task_struct(t: priv->task); |
| 210 | priv->task = NULL; |
| 211 | return NULL; |
| 212 | } |
| 213 | |
| 214 | if (mmap_read_lock_killable(mm)) { |
| 215 | mmput(mm); |
| 216 | put_task_struct(t: priv->task); |
| 217 | priv->task = NULL; |
| 218 | return ERR_PTR(error: -EINTR); |
| 219 | } |
| 220 | |
| 221 | vma_iter_init(vmi: &priv->iter, mm, addr: last_addr); |
| 222 | |
| 223 | return proc_get_vma(priv, ppos); |
| 224 | } |
| 225 | |
| 226 | static void m_stop(struct seq_file *m, void *v) |
| 227 | { |
| 228 | struct proc_maps_private *priv = m->private; |
| 229 | struct mm_struct *mm = priv->lock_ctx.mm; |
| 230 | |
| 231 | if (!priv->task) |
| 232 | return; |
| 233 | |
| 234 | mmap_read_unlock(mm); |
| 235 | mmput(mm); |
| 236 | put_task_struct(t: priv->task); |
| 237 | priv->task = NULL; |
| 238 | } |
| 239 | |
| 240 | static void *m_next(struct seq_file *m, void *_p, loff_t *ppos) |
| 241 | { |
| 242 | return proc_get_vma(priv: m->private, ppos); |
| 243 | } |
| 244 | |
| 245 | static const struct seq_operations proc_pid_maps_ops = { |
| 246 | .start = m_start, |
| 247 | .next = m_next, |
| 248 | .stop = m_stop, |
| 249 | .show = show_map |
| 250 | }; |
| 251 | |
| 252 | static int maps_open(struct inode *inode, struct file *file, |
| 253 | const struct seq_operations *ops) |
| 254 | { |
| 255 | struct proc_maps_private *priv; |
| 256 | |
| 257 | priv = __seq_open_private(file, ops, sizeof(*priv)); |
| 258 | if (!priv) |
| 259 | return -ENOMEM; |
| 260 | |
| 261 | priv->inode = inode; |
| 262 | priv->lock_ctx.mm = proc_mem_open(inode, PTRACE_MODE_READ); |
| 263 | if (IS_ERR_OR_NULL(ptr: priv->lock_ctx.mm)) { |
| 264 | int err = priv->lock_ctx.mm ? PTR_ERR(ptr: priv->lock_ctx.mm) : -ESRCH; |
| 265 | |
| 266 | seq_release_private(inode, file); |
| 267 | return err; |
| 268 | } |
| 269 | |
| 270 | return 0; |
| 271 | } |
| 272 | |
| 273 | |
| 274 | static int map_release(struct inode *inode, struct file *file) |
| 275 | { |
| 276 | struct seq_file *seq = file->private_data; |
| 277 | struct proc_maps_private *priv = seq->private; |
| 278 | |
| 279 | if (priv->lock_ctx.mm) |
| 280 | mmdrop(mm: priv->lock_ctx.mm); |
| 281 | |
| 282 | return seq_release_private(inode, file); |
| 283 | } |
| 284 | |
| 285 | static int pid_maps_open(struct inode *inode, struct file *file) |
| 286 | { |
| 287 | return maps_open(inode, file, ops: &proc_pid_maps_ops); |
| 288 | } |
| 289 | |
| 290 | const struct file_operations proc_pid_maps_operations = { |
| 291 | .open = pid_maps_open, |
| 292 | .read = seq_read, |
| 293 | .llseek = seq_lseek, |
| 294 | .release = map_release, |
| 295 | }; |
| 296 | |
| 297 | |