| 1 | // SPDX-License-Identifier: GPL-2.0-only |
| 2 | /* Copyright (c) 2024 Google */ |
| 3 | #include <linux/bpf.h> |
| 4 | #include <linux/btf_ids.h> |
| 5 | #include <linux/slab.h> |
| 6 | #include <linux/kernel.h> |
| 7 | #include <linux/seq_file.h> |
| 8 | |
| 9 | #include "../../mm/slab.h" /* kmem_cache, slab_caches and slab_mutex */ |
| 10 | |
| 11 | /* open-coded version */ |
| 12 | struct bpf_iter_kmem_cache { |
| 13 | __u64 __opaque[1]; |
| 14 | } __attribute__((aligned(8))); |
| 15 | |
| 16 | struct bpf_iter_kmem_cache_kern { |
| 17 | struct kmem_cache *pos; |
| 18 | } __attribute__((aligned(8))); |
| 19 | |
| 20 | #define KMEM_CACHE_POS_START ((void *)1L) |
| 21 | |
| 22 | __bpf_kfunc_start_defs(); |
| 23 | |
| 24 | __bpf_kfunc int bpf_iter_kmem_cache_new(struct bpf_iter_kmem_cache *it) |
| 25 | { |
| 26 | struct bpf_iter_kmem_cache_kern *kit = (void *)it; |
| 27 | |
| 28 | BUILD_BUG_ON(sizeof(*kit) > sizeof(*it)); |
| 29 | BUILD_BUG_ON(__alignof__(*kit) != __alignof__(*it)); |
| 30 | |
| 31 | kit->pos = KMEM_CACHE_POS_START; |
| 32 | return 0; |
| 33 | } |
| 34 | |
| 35 | __bpf_kfunc struct kmem_cache *bpf_iter_kmem_cache_next(struct bpf_iter_kmem_cache *it) |
| 36 | { |
| 37 | struct bpf_iter_kmem_cache_kern *kit = (void *)it; |
| 38 | struct kmem_cache *prev = kit->pos; |
| 39 | struct kmem_cache *next; |
| 40 | bool destroy = false; |
| 41 | |
| 42 | if (!prev) |
| 43 | return NULL; |
| 44 | |
| 45 | mutex_lock(&slab_mutex); |
| 46 | |
| 47 | if (list_empty(head: &slab_caches)) { |
| 48 | mutex_unlock(lock: &slab_mutex); |
| 49 | return NULL; |
| 50 | } |
| 51 | |
| 52 | if (prev == KMEM_CACHE_POS_START) |
| 53 | next = list_first_entry(&slab_caches, struct kmem_cache, list); |
| 54 | else if (list_last_entry(&slab_caches, struct kmem_cache, list) == prev) |
| 55 | next = NULL; |
| 56 | else |
| 57 | next = list_next_entry(prev, list); |
| 58 | |
| 59 | /* boot_caches have negative refcount, don't touch them */ |
| 60 | if (next && next->refcount > 0) |
| 61 | next->refcount++; |
| 62 | |
| 63 | /* Skip kmem_cache_destroy() for active entries */ |
| 64 | if (prev && prev != KMEM_CACHE_POS_START) { |
| 65 | if (prev->refcount > 1) |
| 66 | prev->refcount--; |
| 67 | else if (prev->refcount == 1) |
| 68 | destroy = true; |
| 69 | } |
| 70 | |
| 71 | mutex_unlock(lock: &slab_mutex); |
| 72 | |
| 73 | if (destroy) |
| 74 | kmem_cache_destroy(s: prev); |
| 75 | |
| 76 | kit->pos = next; |
| 77 | return next; |
| 78 | } |
| 79 | |
| 80 | __bpf_kfunc void bpf_iter_kmem_cache_destroy(struct bpf_iter_kmem_cache *it) |
| 81 | { |
| 82 | struct bpf_iter_kmem_cache_kern *kit = (void *)it; |
| 83 | struct kmem_cache *s = kit->pos; |
| 84 | bool destroy = false; |
| 85 | |
| 86 | if (s == NULL || s == KMEM_CACHE_POS_START) |
| 87 | return; |
| 88 | |
| 89 | mutex_lock(&slab_mutex); |
| 90 | |
| 91 | /* Skip kmem_cache_destroy() for active entries */ |
| 92 | if (s->refcount > 1) |
| 93 | s->refcount--; |
| 94 | else if (s->refcount == 1) |
| 95 | destroy = true; |
| 96 | |
| 97 | mutex_unlock(lock: &slab_mutex); |
| 98 | |
| 99 | if (destroy) |
| 100 | kmem_cache_destroy(s); |
| 101 | } |
| 102 | |
| 103 | __bpf_kfunc_end_defs(); |
| 104 | |
| 105 | struct bpf_iter__kmem_cache { |
| 106 | __bpf_md_ptr(struct bpf_iter_meta *, meta); |
| 107 | __bpf_md_ptr(struct kmem_cache *, s); |
| 108 | }; |
| 109 | |
| 110 | union kmem_cache_iter_priv { |
| 111 | struct bpf_iter_kmem_cache it; |
| 112 | struct bpf_iter_kmem_cache_kern kit; |
| 113 | }; |
| 114 | |
| 115 | static void *kmem_cache_iter_seq_start(struct seq_file *seq, loff_t *pos) |
| 116 | { |
| 117 | loff_t cnt = 0; |
| 118 | bool found = false; |
| 119 | struct kmem_cache *s; |
| 120 | union kmem_cache_iter_priv *p = seq->private; |
| 121 | |
| 122 | mutex_lock(&slab_mutex); |
| 123 | |
| 124 | /* Find an entry at the given position in the slab_caches list instead |
| 125 | * of keeping a reference (of the last visited entry, if any) out of |
| 126 | * slab_mutex. It might miss something if one is deleted in the middle |
| 127 | * while it releases the lock. But it should be rare and there's not |
| 128 | * much we can do about it. |
| 129 | */ |
| 130 | list_for_each_entry(s, &slab_caches, list) { |
| 131 | if (cnt == *pos) { |
| 132 | /* Make sure this entry remains in the list by getting |
| 133 | * a new reference count. Note that boot_cache entries |
| 134 | * have a negative refcount, so don't touch them. |
| 135 | */ |
| 136 | if (s->refcount > 0) |
| 137 | s->refcount++; |
| 138 | found = true; |
| 139 | break; |
| 140 | } |
| 141 | cnt++; |
| 142 | } |
| 143 | mutex_unlock(lock: &slab_mutex); |
| 144 | |
| 145 | if (!found) |
| 146 | s = NULL; |
| 147 | |
| 148 | p->kit.pos = s; |
| 149 | return s; |
| 150 | } |
| 151 | |
| 152 | static void kmem_cache_iter_seq_stop(struct seq_file *seq, void *v) |
| 153 | { |
| 154 | struct bpf_iter_meta meta; |
| 155 | struct bpf_iter__kmem_cache ctx = { |
| 156 | .meta = &meta, |
| 157 | .s = v, |
| 158 | }; |
| 159 | union kmem_cache_iter_priv *p = seq->private; |
| 160 | struct bpf_prog *prog; |
| 161 | |
| 162 | meta.seq = seq; |
| 163 | prog = bpf_iter_get_info(meta: &meta, in_stop: true); |
| 164 | if (prog && !ctx.s) |
| 165 | bpf_iter_run_prog(prog, ctx: &ctx); |
| 166 | |
| 167 | bpf_iter_kmem_cache_destroy(it: &p->it); |
| 168 | } |
| 169 | |
| 170 | static void *kmem_cache_iter_seq_next(struct seq_file *seq, void *v, loff_t *pos) |
| 171 | { |
| 172 | union kmem_cache_iter_priv *p = seq->private; |
| 173 | |
| 174 | ++*pos; |
| 175 | |
| 176 | return bpf_iter_kmem_cache_next(it: &p->it); |
| 177 | } |
| 178 | |
| 179 | static int kmem_cache_iter_seq_show(struct seq_file *seq, void *v) |
| 180 | { |
| 181 | struct bpf_iter_meta meta; |
| 182 | struct bpf_iter__kmem_cache ctx = { |
| 183 | .meta = &meta, |
| 184 | .s = v, |
| 185 | }; |
| 186 | struct bpf_prog *prog; |
| 187 | int ret = 0; |
| 188 | |
| 189 | meta.seq = seq; |
| 190 | prog = bpf_iter_get_info(meta: &meta, in_stop: false); |
| 191 | if (prog) |
| 192 | ret = bpf_iter_run_prog(prog, ctx: &ctx); |
| 193 | |
| 194 | return ret; |
| 195 | } |
| 196 | |
| 197 | static const struct seq_operations kmem_cache_iter_seq_ops = { |
| 198 | .start = kmem_cache_iter_seq_start, |
| 199 | .next = kmem_cache_iter_seq_next, |
| 200 | .stop = kmem_cache_iter_seq_stop, |
| 201 | .show = kmem_cache_iter_seq_show, |
| 202 | }; |
| 203 | |
| 204 | BTF_ID_LIST_GLOBAL_SINGLE(bpf_kmem_cache_btf_id, struct, kmem_cache) |
| 205 | |
| 206 | static const struct bpf_iter_seq_info kmem_cache_iter_seq_info = { |
| 207 | .seq_ops = &kmem_cache_iter_seq_ops, |
| 208 | .seq_priv_size = sizeof(union kmem_cache_iter_priv), |
| 209 | }; |
| 210 | |
| 211 | static void bpf_iter_kmem_cache_show_fdinfo(const struct bpf_iter_aux_info *aux, |
| 212 | struct seq_file *seq) |
| 213 | { |
| 214 | seq_puts(m: seq, s: "kmem_cache iter\n" ); |
| 215 | } |
| 216 | |
| 217 | DEFINE_BPF_ITER_FUNC(kmem_cache, struct bpf_iter_meta *meta, |
| 218 | struct kmem_cache *s) |
| 219 | |
| 220 | static struct bpf_iter_reg bpf_kmem_cache_reg_info = { |
| 221 | .target = "kmem_cache" , |
| 222 | .feature = BPF_ITER_RESCHED, |
| 223 | .show_fdinfo = bpf_iter_kmem_cache_show_fdinfo, |
| 224 | .ctx_arg_info_size = 1, |
| 225 | .ctx_arg_info = { |
| 226 | { offsetof(struct bpf_iter__kmem_cache, s), |
| 227 | PTR_TO_BTF_ID_OR_NULL | PTR_TRUSTED }, |
| 228 | }, |
| 229 | .seq_info = &kmem_cache_iter_seq_info, |
| 230 | }; |
| 231 | |
| 232 | static int __init bpf_kmem_cache_iter_init(void) |
| 233 | { |
| 234 | bpf_kmem_cache_reg_info.ctx_arg_info[0].btf_id = bpf_kmem_cache_btf_id[0]; |
| 235 | return bpf_iter_reg_target(reg_info: &bpf_kmem_cache_reg_info); |
| 236 | } |
| 237 | |
| 238 | late_initcall(bpf_kmem_cache_iter_init); |
| 239 | |