| 1 | // SPDX-License-Identifier: GPL-2.0 |
| 2 | /* |
| 3 | * cacheinfo support - processor cache information via sysfs |
| 4 | * |
| 5 | * Based on arch/x86/kernel/cpu/intel_cacheinfo.c |
| 6 | * Author: Sudeep Holla <sudeep.holla@arm.com> |
| 7 | */ |
| 8 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
| 9 | |
| 10 | #include <linux/acpi.h> |
| 11 | #include <linux/bitfield.h> |
| 12 | #include <linux/bitops.h> |
| 13 | #include <linux/cacheinfo.h> |
| 14 | #include <linux/compiler.h> |
| 15 | #include <linux/cpu.h> |
| 16 | #include <linux/device.h> |
| 17 | #include <linux/init.h> |
| 18 | #include <linux/of.h> |
| 19 | #include <linux/sched.h> |
| 20 | #include <linux/slab.h> |
| 21 | #include <linux/smp.h> |
| 22 | #include <linux/sysfs.h> |
| 23 | |
| 24 | /* pointer to per cpu cacheinfo */ |
| 25 | static DEFINE_PER_CPU(struct cpu_cacheinfo, ci_cpu_cacheinfo); |
| 26 | #define ci_cacheinfo(cpu) (&per_cpu(ci_cpu_cacheinfo, cpu)) |
| 27 | #define cache_leaves(cpu) (ci_cacheinfo(cpu)->num_leaves) |
| 28 | #define per_cpu_cacheinfo(cpu) (ci_cacheinfo(cpu)->info_list) |
| 29 | #define per_cpu_cacheinfo_idx(cpu, idx) \ |
| 30 | (per_cpu_cacheinfo(cpu) + (idx)) |
| 31 | |
| 32 | /* Set if no cache information is found in DT/ACPI. */ |
| 33 | static bool use_arch_info; |
| 34 | |
| 35 | struct cpu_cacheinfo *get_cpu_cacheinfo(unsigned int cpu) |
| 36 | { |
| 37 | return ci_cacheinfo(cpu); |
| 38 | } |
| 39 | |
| 40 | static inline bool cache_leaves_are_shared(struct cacheinfo *this_leaf, |
| 41 | struct cacheinfo *sib_leaf) |
| 42 | { |
| 43 | /* |
| 44 | * For non DT/ACPI systems, assume unique level 1 caches, |
| 45 | * system-wide shared caches for all other levels. |
| 46 | */ |
| 47 | if (!(IS_ENABLED(CONFIG_OF) || IS_ENABLED(CONFIG_ACPI)) || |
| 48 | use_arch_info) |
| 49 | return (this_leaf->level != 1) && (sib_leaf->level != 1); |
| 50 | |
| 51 | if ((sib_leaf->attributes & CACHE_ID) && |
| 52 | (this_leaf->attributes & CACHE_ID)) |
| 53 | return sib_leaf->id == this_leaf->id; |
| 54 | |
| 55 | return sib_leaf->fw_token == this_leaf->fw_token; |
| 56 | } |
| 57 | |
| 58 | bool last_level_cache_is_valid(unsigned int cpu) |
| 59 | { |
| 60 | struct cacheinfo *llc; |
| 61 | |
| 62 | if (!cache_leaves(cpu) || !per_cpu_cacheinfo(cpu)) |
| 63 | return false; |
| 64 | |
| 65 | llc = per_cpu_cacheinfo_idx(cpu, cache_leaves(cpu) - 1); |
| 66 | |
| 67 | return (llc->attributes & CACHE_ID) || !!llc->fw_token; |
| 68 | |
| 69 | } |
| 70 | |
| 71 | bool last_level_cache_is_shared(unsigned int cpu_x, unsigned int cpu_y) |
| 72 | { |
| 73 | struct cacheinfo *llc_x, *llc_y; |
| 74 | |
| 75 | if (!last_level_cache_is_valid(cpu: cpu_x) || |
| 76 | !last_level_cache_is_valid(cpu: cpu_y)) |
| 77 | return false; |
| 78 | |
| 79 | llc_x = per_cpu_cacheinfo_idx(cpu_x, cache_leaves(cpu_x) - 1); |
| 80 | llc_y = per_cpu_cacheinfo_idx(cpu_y, cache_leaves(cpu_y) - 1); |
| 81 | |
| 82 | return cache_leaves_are_shared(this_leaf: llc_x, sib_leaf: llc_y); |
| 83 | } |
| 84 | |
| 85 | #ifdef CONFIG_OF |
| 86 | |
| 87 | static bool of_check_cache_nodes(struct device_node *np); |
| 88 | |
| 89 | /* OF properties to query for a given cache type */ |
| 90 | struct cache_type_info { |
| 91 | const char *size_prop; |
| 92 | const char *line_size_props[2]; |
| 93 | const char *nr_sets_prop; |
| 94 | }; |
| 95 | |
| 96 | static const struct cache_type_info cache_type_info[] = { |
| 97 | { |
| 98 | .size_prop = "cache-size" , |
| 99 | .line_size_props = { "cache-line-size" , |
| 100 | "cache-block-size" , }, |
| 101 | .nr_sets_prop = "cache-sets" , |
| 102 | }, { |
| 103 | .size_prop = "i-cache-size" , |
| 104 | .line_size_props = { "i-cache-line-size" , |
| 105 | "i-cache-block-size" , }, |
| 106 | .nr_sets_prop = "i-cache-sets" , |
| 107 | }, { |
| 108 | .size_prop = "d-cache-size" , |
| 109 | .line_size_props = { "d-cache-line-size" , |
| 110 | "d-cache-block-size" , }, |
| 111 | .nr_sets_prop = "d-cache-sets" , |
| 112 | }, |
| 113 | }; |
| 114 | |
| 115 | static inline int get_cacheinfo_idx(enum cache_type type) |
| 116 | { |
| 117 | if (type == CACHE_TYPE_UNIFIED) |
| 118 | return 0; |
| 119 | return type; |
| 120 | } |
| 121 | |
| 122 | static void cache_size(struct cacheinfo *this_leaf, struct device_node *np) |
| 123 | { |
| 124 | const char *propname; |
| 125 | int ct_idx; |
| 126 | |
| 127 | ct_idx = get_cacheinfo_idx(type: this_leaf->type); |
| 128 | propname = cache_type_info[ct_idx].size_prop; |
| 129 | |
| 130 | of_property_read_u32(np, propname, out_value: &this_leaf->size); |
| 131 | } |
| 132 | |
| 133 | /* not cache_line_size() because that's a macro in include/linux/cache.h */ |
| 134 | static void cache_get_line_size(struct cacheinfo *this_leaf, |
| 135 | struct device_node *np) |
| 136 | { |
| 137 | int i, lim, ct_idx; |
| 138 | |
| 139 | ct_idx = get_cacheinfo_idx(type: this_leaf->type); |
| 140 | lim = ARRAY_SIZE(cache_type_info[ct_idx].line_size_props); |
| 141 | |
| 142 | for (i = 0; i < lim; i++) { |
| 143 | int ret; |
| 144 | u32 line_size; |
| 145 | const char *propname; |
| 146 | |
| 147 | propname = cache_type_info[ct_idx].line_size_props[i]; |
| 148 | ret = of_property_read_u32(np, propname, out_value: &line_size); |
| 149 | if (!ret) { |
| 150 | this_leaf->coherency_line_size = line_size; |
| 151 | break; |
| 152 | } |
| 153 | } |
| 154 | } |
| 155 | |
| 156 | static void cache_nr_sets(struct cacheinfo *this_leaf, struct device_node *np) |
| 157 | { |
| 158 | const char *propname; |
| 159 | int ct_idx; |
| 160 | |
| 161 | ct_idx = get_cacheinfo_idx(type: this_leaf->type); |
| 162 | propname = cache_type_info[ct_idx].nr_sets_prop; |
| 163 | |
| 164 | of_property_read_u32(np, propname, out_value: &this_leaf->number_of_sets); |
| 165 | } |
| 166 | |
| 167 | static void cache_associativity(struct cacheinfo *this_leaf) |
| 168 | { |
| 169 | unsigned int line_size = this_leaf->coherency_line_size; |
| 170 | unsigned int nr_sets = this_leaf->number_of_sets; |
| 171 | unsigned int size = this_leaf->size; |
| 172 | |
| 173 | /* |
| 174 | * If the cache is fully associative, there is no need to |
| 175 | * check the other properties. |
| 176 | */ |
| 177 | if (!(nr_sets == 1) && (nr_sets > 0 && size > 0 && line_size > 0)) |
| 178 | this_leaf->ways_of_associativity = (size / nr_sets) / line_size; |
| 179 | } |
| 180 | |
| 181 | static bool cache_node_is_unified(struct cacheinfo *this_leaf, |
| 182 | struct device_node *np) |
| 183 | { |
| 184 | return of_property_read_bool(np, propname: "cache-unified" ); |
| 185 | } |
| 186 | |
| 187 | static bool match_cache_node(struct device_node *cpu, |
| 188 | const struct device_node *cache_node) |
| 189 | { |
| 190 | struct device_node *prev, *cache = of_find_next_cache_node(cpu); |
| 191 | |
| 192 | while (cache) { |
| 193 | if (cache == cache_node) { |
| 194 | of_node_put(node: cache); |
| 195 | return true; |
| 196 | } |
| 197 | |
| 198 | prev = cache; |
| 199 | cache = of_find_next_cache_node(cache); |
| 200 | of_node_put(node: prev); |
| 201 | } |
| 202 | |
| 203 | return false; |
| 204 | } |
| 205 | |
| 206 | #ifndef arch_compact_of_hwid |
| 207 | #define arch_compact_of_hwid(_x) (_x) |
| 208 | #endif |
| 209 | |
| 210 | static void cache_of_set_id(struct cacheinfo *this_leaf, |
| 211 | struct device_node *cache_node) |
| 212 | { |
| 213 | struct device_node *cpu; |
| 214 | u32 min_id = ~0; |
| 215 | |
| 216 | for_each_of_cpu_node(cpu) { |
| 217 | u64 id = of_get_cpu_hwid(cpun: cpu, thread: 0); |
| 218 | |
| 219 | id = arch_compact_of_hwid(id); |
| 220 | if (FIELD_GET(GENMASK_ULL(63, 32), id)) { |
| 221 | of_node_put(node: cpu); |
| 222 | return; |
| 223 | } |
| 224 | |
| 225 | if (match_cache_node(cpu, cache_node)) |
| 226 | min_id = min(min_id, id); |
| 227 | } |
| 228 | |
| 229 | if (min_id != ~0) { |
| 230 | this_leaf->id = min_id; |
| 231 | this_leaf->attributes |= CACHE_ID; |
| 232 | } |
| 233 | } |
| 234 | |
| 235 | static void cache_of_set_props(struct cacheinfo *this_leaf, |
| 236 | struct device_node *np) |
| 237 | { |
| 238 | /* |
| 239 | * init_cache_level must setup the cache level correctly |
| 240 | * overriding the architecturally specified levels, so |
| 241 | * if type is NONE at this stage, it should be unified |
| 242 | */ |
| 243 | if (this_leaf->type == CACHE_TYPE_NOCACHE && |
| 244 | cache_node_is_unified(this_leaf, np)) |
| 245 | this_leaf->type = CACHE_TYPE_UNIFIED; |
| 246 | cache_size(this_leaf, np); |
| 247 | cache_get_line_size(this_leaf, np); |
| 248 | cache_nr_sets(this_leaf, np); |
| 249 | cache_associativity(this_leaf); |
| 250 | cache_of_set_id(this_leaf, cache_node: np); |
| 251 | } |
| 252 | |
| 253 | static int cache_setup_of_node(unsigned int cpu) |
| 254 | { |
| 255 | struct cacheinfo *this_leaf; |
| 256 | unsigned int index = 0; |
| 257 | |
| 258 | struct device_node *np __free(device_node) = of_cpu_device_node_get(cpu); |
| 259 | if (!np) { |
| 260 | pr_err("Failed to find cpu%d device node\n" , cpu); |
| 261 | return -ENOENT; |
| 262 | } |
| 263 | |
| 264 | if (!of_check_cache_nodes(np)) { |
| 265 | return -ENOENT; |
| 266 | } |
| 267 | |
| 268 | while (index < cache_leaves(cpu)) { |
| 269 | this_leaf = per_cpu_cacheinfo_idx(cpu, index); |
| 270 | if (this_leaf->level != 1) { |
| 271 | struct device_node *prev __free(device_node) = np; |
| 272 | np = of_find_next_cache_node(np); |
| 273 | if (!np) |
| 274 | break; |
| 275 | } |
| 276 | cache_of_set_props(this_leaf, np); |
| 277 | this_leaf->fw_token = np; |
| 278 | index++; |
| 279 | } |
| 280 | |
| 281 | if (index != cache_leaves(cpu)) /* not all OF nodes populated */ |
| 282 | return -ENOENT; |
| 283 | |
| 284 | return 0; |
| 285 | } |
| 286 | |
| 287 | static bool of_check_cache_nodes(struct device_node *np) |
| 288 | { |
| 289 | if (of_property_present(np, propname: "cache-size" ) || |
| 290 | of_property_present(np, propname: "i-cache-size" ) || |
| 291 | of_property_present(np, propname: "d-cache-size" ) || |
| 292 | of_property_present(np, propname: "cache-unified" )) |
| 293 | return true; |
| 294 | |
| 295 | struct device_node *next __free(device_node) = of_find_next_cache_node(np); |
| 296 | if (next) { |
| 297 | return true; |
| 298 | } |
| 299 | |
| 300 | return false; |
| 301 | } |
| 302 | |
| 303 | static int of_count_cache_leaves(struct device_node *np) |
| 304 | { |
| 305 | unsigned int leaves = 0; |
| 306 | |
| 307 | if (of_property_present(np, propname: "cache-size" )) |
| 308 | ++leaves; |
| 309 | if (of_property_present(np, propname: "i-cache-size" )) |
| 310 | ++leaves; |
| 311 | if (of_property_present(np, propname: "d-cache-size" )) |
| 312 | ++leaves; |
| 313 | |
| 314 | if (!leaves) { |
| 315 | /* The '[i-|d-|]cache-size' property is required, but |
| 316 | * if absent, fallback on the 'cache-unified' property. |
| 317 | */ |
| 318 | if (of_property_read_bool(np, propname: "cache-unified" )) |
| 319 | return 1; |
| 320 | else |
| 321 | return 2; |
| 322 | } |
| 323 | |
| 324 | return leaves; |
| 325 | } |
| 326 | |
| 327 | int init_of_cache_level(unsigned int cpu) |
| 328 | { |
| 329 | struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu); |
| 330 | struct device_node *np __free(device_node) = of_cpu_device_node_get(cpu); |
| 331 | unsigned int levels = 0, leaves, level; |
| 332 | |
| 333 | if (!of_check_cache_nodes(np)) { |
| 334 | return -ENOENT; |
| 335 | } |
| 336 | |
| 337 | leaves = of_count_cache_leaves(np); |
| 338 | if (leaves > 0) |
| 339 | levels = 1; |
| 340 | |
| 341 | while (1) { |
| 342 | struct device_node *prev __free(device_node) = np; |
| 343 | np = of_find_next_cache_node(np); |
| 344 | if (!np) |
| 345 | break; |
| 346 | |
| 347 | if (!of_device_is_compatible(device: np, "cache" )) |
| 348 | return -EINVAL; |
| 349 | if (of_property_read_u32(np, propname: "cache-level" , out_value: &level)) |
| 350 | return -EINVAL; |
| 351 | if (level <= levels) |
| 352 | return -EINVAL; |
| 353 | |
| 354 | leaves += of_count_cache_leaves(np); |
| 355 | levels = level; |
| 356 | } |
| 357 | |
| 358 | this_cpu_ci->num_levels = levels; |
| 359 | this_cpu_ci->num_leaves = leaves; |
| 360 | |
| 361 | return 0; |
| 362 | } |
| 363 | |
| 364 | #else |
| 365 | static inline int cache_setup_of_node(unsigned int cpu) { return 0; } |
| 366 | int init_of_cache_level(unsigned int cpu) { return 0; } |
| 367 | #endif |
| 368 | |
| 369 | int __weak cache_setup_acpi(unsigned int cpu) |
| 370 | { |
| 371 | return -ENOTSUPP; |
| 372 | } |
| 373 | |
| 374 | unsigned int coherency_max_size; |
| 375 | |
| 376 | static int cache_setup_properties(unsigned int cpu) |
| 377 | { |
| 378 | int ret = 0; |
| 379 | |
| 380 | if (of_have_populated_dt()) |
| 381 | ret = cache_setup_of_node(cpu); |
| 382 | else if (!acpi_disabled) |
| 383 | ret = cache_setup_acpi(cpu); |
| 384 | |
| 385 | // Assume there is no cache information available in DT/ACPI from now. |
| 386 | if (ret && use_arch_cache_info()) |
| 387 | use_arch_info = true; |
| 388 | |
| 389 | return ret; |
| 390 | } |
| 391 | |
| 392 | static int cache_shared_cpu_map_setup(unsigned int cpu) |
| 393 | { |
| 394 | struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu); |
| 395 | struct cacheinfo *this_leaf, *sib_leaf; |
| 396 | unsigned int index, sib_index; |
| 397 | int ret = 0; |
| 398 | |
| 399 | if (this_cpu_ci->cpu_map_populated) |
| 400 | return 0; |
| 401 | |
| 402 | /* |
| 403 | * skip setting up cache properties if LLC is valid, just need |
| 404 | * to update the shared cpu_map if the cache attributes were |
| 405 | * populated early before all the cpus are brought online |
| 406 | */ |
| 407 | if (!last_level_cache_is_valid(cpu) && !use_arch_info) { |
| 408 | ret = cache_setup_properties(cpu); |
| 409 | if (ret) |
| 410 | return ret; |
| 411 | } |
| 412 | |
| 413 | for (index = 0; index < cache_leaves(cpu); index++) { |
| 414 | unsigned int i; |
| 415 | |
| 416 | this_leaf = per_cpu_cacheinfo_idx(cpu, index); |
| 417 | |
| 418 | cpumask_set_cpu(cpu, dstp: &this_leaf->shared_cpu_map); |
| 419 | for_each_online_cpu(i) { |
| 420 | if (i == cpu || !per_cpu_cacheinfo(i)) |
| 421 | continue;/* skip if itself or no cacheinfo */ |
| 422 | for (sib_index = 0; sib_index < cache_leaves(i); sib_index++) { |
| 423 | sib_leaf = per_cpu_cacheinfo_idx(i, sib_index); |
| 424 | |
| 425 | /* |
| 426 | * Comparing cache IDs only makes sense if the leaves |
| 427 | * belong to the same cache level of same type. Skip |
| 428 | * the check if level and type do not match. |
| 429 | */ |
| 430 | if (sib_leaf->level != this_leaf->level || |
| 431 | sib_leaf->type != this_leaf->type) |
| 432 | continue; |
| 433 | |
| 434 | if (cache_leaves_are_shared(this_leaf, sib_leaf)) { |
| 435 | cpumask_set_cpu(cpu, dstp: &sib_leaf->shared_cpu_map); |
| 436 | cpumask_set_cpu(cpu: i, dstp: &this_leaf->shared_cpu_map); |
| 437 | break; |
| 438 | } |
| 439 | } |
| 440 | } |
| 441 | /* record the maximum cache line size */ |
| 442 | if (this_leaf->coherency_line_size > coherency_max_size) |
| 443 | coherency_max_size = this_leaf->coherency_line_size; |
| 444 | } |
| 445 | |
| 446 | /* shared_cpu_map is now populated for the cpu */ |
| 447 | this_cpu_ci->cpu_map_populated = true; |
| 448 | return 0; |
| 449 | } |
| 450 | |
| 451 | static void cache_shared_cpu_map_remove(unsigned int cpu) |
| 452 | { |
| 453 | struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu); |
| 454 | struct cacheinfo *this_leaf, *sib_leaf; |
| 455 | unsigned int sibling, index, sib_index; |
| 456 | |
| 457 | for (index = 0; index < cache_leaves(cpu); index++) { |
| 458 | this_leaf = per_cpu_cacheinfo_idx(cpu, index); |
| 459 | for_each_cpu(sibling, &this_leaf->shared_cpu_map) { |
| 460 | if (sibling == cpu || !per_cpu_cacheinfo(sibling)) |
| 461 | continue;/* skip if itself or no cacheinfo */ |
| 462 | |
| 463 | for (sib_index = 0; sib_index < cache_leaves(sibling); sib_index++) { |
| 464 | sib_leaf = per_cpu_cacheinfo_idx(sibling, sib_index); |
| 465 | |
| 466 | /* |
| 467 | * Comparing cache IDs only makes sense if the leaves |
| 468 | * belong to the same cache level of same type. Skip |
| 469 | * the check if level and type do not match. |
| 470 | */ |
| 471 | if (sib_leaf->level != this_leaf->level || |
| 472 | sib_leaf->type != this_leaf->type) |
| 473 | continue; |
| 474 | |
| 475 | if (cache_leaves_are_shared(this_leaf, sib_leaf)) { |
| 476 | cpumask_clear_cpu(cpu, dstp: &sib_leaf->shared_cpu_map); |
| 477 | cpumask_clear_cpu(cpu: sibling, dstp: &this_leaf->shared_cpu_map); |
| 478 | break; |
| 479 | } |
| 480 | } |
| 481 | } |
| 482 | } |
| 483 | |
| 484 | /* cpu is no longer populated in the shared map */ |
| 485 | this_cpu_ci->cpu_map_populated = false; |
| 486 | } |
| 487 | |
| 488 | static void free_cache_attributes(unsigned int cpu) |
| 489 | { |
| 490 | if (!per_cpu_cacheinfo(cpu)) |
| 491 | return; |
| 492 | |
| 493 | cache_shared_cpu_map_remove(cpu); |
| 494 | } |
| 495 | |
| 496 | int __weak early_cache_level(unsigned int cpu) |
| 497 | { |
| 498 | return -ENOENT; |
| 499 | } |
| 500 | |
| 501 | int __weak init_cache_level(unsigned int cpu) |
| 502 | { |
| 503 | return -ENOENT; |
| 504 | } |
| 505 | |
| 506 | int __weak populate_cache_leaves(unsigned int cpu) |
| 507 | { |
| 508 | return -ENOENT; |
| 509 | } |
| 510 | |
| 511 | static inline int allocate_cache_info(int cpu) |
| 512 | { |
| 513 | per_cpu_cacheinfo(cpu) = kcalloc(cache_leaves(cpu), sizeof(struct cacheinfo), GFP_ATOMIC); |
| 514 | if (!per_cpu_cacheinfo(cpu)) { |
| 515 | cache_leaves(cpu) = 0; |
| 516 | return -ENOMEM; |
| 517 | } |
| 518 | |
| 519 | return 0; |
| 520 | } |
| 521 | |
| 522 | int fetch_cache_info(unsigned int cpu) |
| 523 | { |
| 524 | struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu); |
| 525 | unsigned int levels = 0, split_levels = 0; |
| 526 | int ret; |
| 527 | |
| 528 | if (acpi_disabled) { |
| 529 | ret = init_of_cache_level(cpu); |
| 530 | } else { |
| 531 | ret = acpi_get_cache_info(cpu, levels: &levels, split_levels: &split_levels); |
| 532 | if (!ret) { |
| 533 | this_cpu_ci->num_levels = levels; |
| 534 | /* |
| 535 | * This assumes that: |
| 536 | * - there cannot be any split caches (data/instruction) |
| 537 | * above a unified cache |
| 538 | * - data/instruction caches come by pair |
| 539 | */ |
| 540 | this_cpu_ci->num_leaves = levels + split_levels; |
| 541 | } |
| 542 | } |
| 543 | |
| 544 | if (ret || !cache_leaves(cpu)) { |
| 545 | ret = early_cache_level(cpu); |
| 546 | if (ret) |
| 547 | return ret; |
| 548 | |
| 549 | if (!cache_leaves(cpu)) |
| 550 | return -ENOENT; |
| 551 | |
| 552 | this_cpu_ci->early_ci_levels = true; |
| 553 | } |
| 554 | |
| 555 | return allocate_cache_info(cpu); |
| 556 | } |
| 557 | |
| 558 | static inline int init_level_allocate_ci(unsigned int cpu) |
| 559 | { |
| 560 | unsigned int early_leaves = cache_leaves(cpu); |
| 561 | |
| 562 | /* Since early initialization/allocation of the cacheinfo is allowed |
| 563 | * via fetch_cache_info() and this also gets called as CPU hotplug |
| 564 | * callbacks via cacheinfo_cpu_online, the init/alloc can be skipped |
| 565 | * as it will happen only once (the cacheinfo memory is never freed). |
| 566 | * Just populate the cacheinfo. However, if the cacheinfo has been |
| 567 | * allocated early through the arch-specific early_cache_level() call, |
| 568 | * there is a chance the info is wrong (this can happen on arm64). In |
| 569 | * that case, call init_cache_level() anyway to give the arch-specific |
| 570 | * code a chance to make things right. |
| 571 | */ |
| 572 | if (per_cpu_cacheinfo(cpu) && !ci_cacheinfo(cpu)->early_ci_levels) |
| 573 | return 0; |
| 574 | |
| 575 | if (init_cache_level(cpu) || !cache_leaves(cpu)) |
| 576 | return -ENOENT; |
| 577 | |
| 578 | /* |
| 579 | * Now that we have properly initialized the cache level info, make |
| 580 | * sure we don't try to do that again the next time we are called |
| 581 | * (e.g. as CPU hotplug callbacks). |
| 582 | */ |
| 583 | ci_cacheinfo(cpu)->early_ci_levels = false; |
| 584 | |
| 585 | /* |
| 586 | * Some architectures (e.g., x86) do not use early initialization. |
| 587 | * Allocate memory now in such case. |
| 588 | */ |
| 589 | if (cache_leaves(cpu) <= early_leaves && per_cpu_cacheinfo(cpu)) |
| 590 | return 0; |
| 591 | |
| 592 | kfree(per_cpu_cacheinfo(cpu)); |
| 593 | return allocate_cache_info(cpu); |
| 594 | } |
| 595 | |
| 596 | int detect_cache_attributes(unsigned int cpu) |
| 597 | { |
| 598 | int ret; |
| 599 | |
| 600 | ret = init_level_allocate_ci(cpu); |
| 601 | if (ret) |
| 602 | return ret; |
| 603 | |
| 604 | /* |
| 605 | * If LLC is valid the cache leaves were already populated so just go to |
| 606 | * update the cpu map. |
| 607 | */ |
| 608 | if (!last_level_cache_is_valid(cpu)) { |
| 609 | /* |
| 610 | * populate_cache_leaves() may completely setup the cache leaves and |
| 611 | * shared_cpu_map or it may leave it partially setup. |
| 612 | */ |
| 613 | ret = populate_cache_leaves(cpu); |
| 614 | if (ret) |
| 615 | goto free_ci; |
| 616 | } |
| 617 | |
| 618 | /* |
| 619 | * For systems using DT for cache hierarchy, fw_token |
| 620 | * and shared_cpu_map will be set up here only if they are |
| 621 | * not populated already |
| 622 | */ |
| 623 | ret = cache_shared_cpu_map_setup(cpu); |
| 624 | if (ret) { |
| 625 | pr_warn("Unable to detect cache hierarchy for CPU %d\n" , cpu); |
| 626 | goto free_ci; |
| 627 | } |
| 628 | |
| 629 | return 0; |
| 630 | |
| 631 | free_ci: |
| 632 | free_cache_attributes(cpu); |
| 633 | return ret; |
| 634 | } |
| 635 | |
| 636 | /* pointer to cpuX/cache device */ |
| 637 | static DEFINE_PER_CPU(struct device *, ci_cache_dev); |
| 638 | #define per_cpu_cache_dev(cpu) (per_cpu(ci_cache_dev, cpu)) |
| 639 | |
| 640 | static cpumask_t cache_dev_map; |
| 641 | |
| 642 | /* pointer to array of devices for cpuX/cache/indexY */ |
| 643 | static DEFINE_PER_CPU(struct device **, ci_index_dev); |
| 644 | #define per_cpu_index_dev(cpu) (per_cpu(ci_index_dev, cpu)) |
| 645 | #define per_cache_index_dev(cpu, idx) ((per_cpu_index_dev(cpu))[idx]) |
| 646 | |
| 647 | #define show_one(file_name, object) \ |
| 648 | static ssize_t file_name##_show(struct device *dev, \ |
| 649 | struct device_attribute *attr, char *buf) \ |
| 650 | { \ |
| 651 | struct cacheinfo *this_leaf = dev_get_drvdata(dev); \ |
| 652 | return sysfs_emit(buf, "%u\n", this_leaf->object); \ |
| 653 | } |
| 654 | |
| 655 | show_one(id, id); |
| 656 | show_one(level, level); |
| 657 | show_one(coherency_line_size, coherency_line_size); |
| 658 | show_one(number_of_sets, number_of_sets); |
| 659 | show_one(physical_line_partition, physical_line_partition); |
| 660 | show_one(ways_of_associativity, ways_of_associativity); |
| 661 | |
| 662 | static ssize_t size_show(struct device *dev, |
| 663 | struct device_attribute *attr, char *buf) |
| 664 | { |
| 665 | struct cacheinfo *this_leaf = dev_get_drvdata(dev); |
| 666 | |
| 667 | return sysfs_emit(buf, fmt: "%uK\n" , this_leaf->size >> 10); |
| 668 | } |
| 669 | |
| 670 | static ssize_t shared_cpu_map_show(struct device *dev, |
| 671 | struct device_attribute *attr, char *buf) |
| 672 | { |
| 673 | struct cacheinfo *this_leaf = dev_get_drvdata(dev); |
| 674 | const struct cpumask *mask = &this_leaf->shared_cpu_map; |
| 675 | |
| 676 | return sysfs_emit(buf, fmt: "%*pb\n" , nr_cpu_ids, mask); |
| 677 | } |
| 678 | |
| 679 | static ssize_t shared_cpu_list_show(struct device *dev, |
| 680 | struct device_attribute *attr, char *buf) |
| 681 | { |
| 682 | struct cacheinfo *this_leaf = dev_get_drvdata(dev); |
| 683 | const struct cpumask *mask = &this_leaf->shared_cpu_map; |
| 684 | |
| 685 | return sysfs_emit(buf, fmt: "%*pbl\n" , nr_cpu_ids, mask); |
| 686 | } |
| 687 | |
| 688 | static ssize_t type_show(struct device *dev, |
| 689 | struct device_attribute *attr, char *buf) |
| 690 | { |
| 691 | struct cacheinfo *this_leaf = dev_get_drvdata(dev); |
| 692 | const char *output; |
| 693 | |
| 694 | switch (this_leaf->type) { |
| 695 | case CACHE_TYPE_DATA: |
| 696 | output = "Data" ; |
| 697 | break; |
| 698 | case CACHE_TYPE_INST: |
| 699 | output = "Instruction" ; |
| 700 | break; |
| 701 | case CACHE_TYPE_UNIFIED: |
| 702 | output = "Unified" ; |
| 703 | break; |
| 704 | default: |
| 705 | return -EINVAL; |
| 706 | } |
| 707 | |
| 708 | return sysfs_emit(buf, fmt: "%s\n" , output); |
| 709 | } |
| 710 | |
| 711 | static ssize_t allocation_policy_show(struct device *dev, |
| 712 | struct device_attribute *attr, char *buf) |
| 713 | { |
| 714 | struct cacheinfo *this_leaf = dev_get_drvdata(dev); |
| 715 | unsigned int ci_attr = this_leaf->attributes; |
| 716 | const char *output; |
| 717 | |
| 718 | if ((ci_attr & CACHE_READ_ALLOCATE) && (ci_attr & CACHE_WRITE_ALLOCATE)) |
| 719 | output = "ReadWriteAllocate" ; |
| 720 | else if (ci_attr & CACHE_READ_ALLOCATE) |
| 721 | output = "ReadAllocate" ; |
| 722 | else if (ci_attr & CACHE_WRITE_ALLOCATE) |
| 723 | output = "WriteAllocate" ; |
| 724 | else |
| 725 | return 0; |
| 726 | |
| 727 | return sysfs_emit(buf, fmt: "%s\n" , output); |
| 728 | } |
| 729 | |
| 730 | static ssize_t write_policy_show(struct device *dev, |
| 731 | struct device_attribute *attr, char *buf) |
| 732 | { |
| 733 | struct cacheinfo *this_leaf = dev_get_drvdata(dev); |
| 734 | unsigned int ci_attr = this_leaf->attributes; |
| 735 | int n = 0; |
| 736 | |
| 737 | if (ci_attr & CACHE_WRITE_THROUGH) |
| 738 | n = sysfs_emit(buf, fmt: "WriteThrough\n" ); |
| 739 | else if (ci_attr & CACHE_WRITE_BACK) |
| 740 | n = sysfs_emit(buf, fmt: "WriteBack\n" ); |
| 741 | return n; |
| 742 | } |
| 743 | |
| 744 | static DEVICE_ATTR_RO(id); |
| 745 | static DEVICE_ATTR_RO(level); |
| 746 | static DEVICE_ATTR_RO(type); |
| 747 | static DEVICE_ATTR_RO(coherency_line_size); |
| 748 | static DEVICE_ATTR_RO(ways_of_associativity); |
| 749 | static DEVICE_ATTR_RO(number_of_sets); |
| 750 | static DEVICE_ATTR_RO(size); |
| 751 | static DEVICE_ATTR_RO(allocation_policy); |
| 752 | static DEVICE_ATTR_RO(write_policy); |
| 753 | static DEVICE_ATTR_RO(shared_cpu_map); |
| 754 | static DEVICE_ATTR_RO(shared_cpu_list); |
| 755 | static DEVICE_ATTR_RO(physical_line_partition); |
| 756 | |
| 757 | static struct attribute *cache_default_attrs[] = { |
| 758 | &dev_attr_id.attr, |
| 759 | &dev_attr_type.attr, |
| 760 | &dev_attr_level.attr, |
| 761 | &dev_attr_shared_cpu_map.attr, |
| 762 | &dev_attr_shared_cpu_list.attr, |
| 763 | &dev_attr_coherency_line_size.attr, |
| 764 | &dev_attr_ways_of_associativity.attr, |
| 765 | &dev_attr_number_of_sets.attr, |
| 766 | &dev_attr_size.attr, |
| 767 | &dev_attr_allocation_policy.attr, |
| 768 | &dev_attr_write_policy.attr, |
| 769 | &dev_attr_physical_line_partition.attr, |
| 770 | NULL |
| 771 | }; |
| 772 | |
| 773 | static umode_t |
| 774 | cache_default_attrs_is_visible(struct kobject *kobj, |
| 775 | struct attribute *attr, int unused) |
| 776 | { |
| 777 | struct device *dev = kobj_to_dev(kobj); |
| 778 | struct cacheinfo *this_leaf = dev_get_drvdata(dev); |
| 779 | const struct cpumask *mask = &this_leaf->shared_cpu_map; |
| 780 | umode_t mode = attr->mode; |
| 781 | |
| 782 | if ((attr == &dev_attr_id.attr) && (this_leaf->attributes & CACHE_ID)) |
| 783 | return mode; |
| 784 | if ((attr == &dev_attr_type.attr) && this_leaf->type) |
| 785 | return mode; |
| 786 | if ((attr == &dev_attr_level.attr) && this_leaf->level) |
| 787 | return mode; |
| 788 | if ((attr == &dev_attr_shared_cpu_map.attr) && !cpumask_empty(srcp: mask)) |
| 789 | return mode; |
| 790 | if ((attr == &dev_attr_shared_cpu_list.attr) && !cpumask_empty(srcp: mask)) |
| 791 | return mode; |
| 792 | if ((attr == &dev_attr_coherency_line_size.attr) && |
| 793 | this_leaf->coherency_line_size) |
| 794 | return mode; |
| 795 | if ((attr == &dev_attr_ways_of_associativity.attr) && |
| 796 | this_leaf->size) /* allow 0 = full associativity */ |
| 797 | return mode; |
| 798 | if ((attr == &dev_attr_number_of_sets.attr) && |
| 799 | this_leaf->number_of_sets) |
| 800 | return mode; |
| 801 | if ((attr == &dev_attr_size.attr) && this_leaf->size) |
| 802 | return mode; |
| 803 | if ((attr == &dev_attr_write_policy.attr) && |
| 804 | (this_leaf->attributes & CACHE_WRITE_POLICY_MASK)) |
| 805 | return mode; |
| 806 | if ((attr == &dev_attr_allocation_policy.attr) && |
| 807 | (this_leaf->attributes & CACHE_ALLOCATE_POLICY_MASK)) |
| 808 | return mode; |
| 809 | if ((attr == &dev_attr_physical_line_partition.attr) && |
| 810 | this_leaf->physical_line_partition) |
| 811 | return mode; |
| 812 | |
| 813 | return 0; |
| 814 | } |
| 815 | |
| 816 | static const struct attribute_group cache_default_group = { |
| 817 | .attrs = cache_default_attrs, |
| 818 | .is_visible = cache_default_attrs_is_visible, |
| 819 | }; |
| 820 | |
| 821 | static const struct attribute_group *cache_default_groups[] = { |
| 822 | &cache_default_group, |
| 823 | NULL, |
| 824 | }; |
| 825 | |
| 826 | static const struct attribute_group *cache_private_groups[] = { |
| 827 | &cache_default_group, |
| 828 | NULL, /* Place holder for private group */ |
| 829 | NULL, |
| 830 | }; |
| 831 | |
| 832 | const struct attribute_group * |
| 833 | __weak cache_get_priv_group(struct cacheinfo *this_leaf) |
| 834 | { |
| 835 | return NULL; |
| 836 | } |
| 837 | |
| 838 | static const struct attribute_group ** |
| 839 | cache_get_attribute_groups(struct cacheinfo *this_leaf) |
| 840 | { |
| 841 | const struct attribute_group *priv_group = |
| 842 | cache_get_priv_group(this_leaf); |
| 843 | |
| 844 | if (!priv_group) |
| 845 | return cache_default_groups; |
| 846 | |
| 847 | if (!cache_private_groups[1]) |
| 848 | cache_private_groups[1] = priv_group; |
| 849 | |
| 850 | return cache_private_groups; |
| 851 | } |
| 852 | |
| 853 | /* Add/Remove cache interface for CPU device */ |
| 854 | static void cpu_cache_sysfs_exit(unsigned int cpu) |
| 855 | { |
| 856 | int i; |
| 857 | struct device *ci_dev; |
| 858 | |
| 859 | if (per_cpu_index_dev(cpu)) { |
| 860 | for (i = 0; i < cache_leaves(cpu); i++) { |
| 861 | ci_dev = per_cache_index_dev(cpu, i); |
| 862 | if (!ci_dev) |
| 863 | continue; |
| 864 | device_unregister(dev: ci_dev); |
| 865 | } |
| 866 | kfree(per_cpu_index_dev(cpu)); |
| 867 | per_cpu_index_dev(cpu) = NULL; |
| 868 | } |
| 869 | device_unregister(per_cpu_cache_dev(cpu)); |
| 870 | per_cpu_cache_dev(cpu) = NULL; |
| 871 | } |
| 872 | |
| 873 | static int cpu_cache_sysfs_init(unsigned int cpu) |
| 874 | { |
| 875 | struct device *dev = get_cpu_device(cpu); |
| 876 | |
| 877 | if (per_cpu_cacheinfo(cpu) == NULL) |
| 878 | return -ENOENT; |
| 879 | |
| 880 | per_cpu_cache_dev(cpu) = cpu_device_create(parent: dev, NULL, NULL, fmt: "cache" ); |
| 881 | if (IS_ERR(per_cpu_cache_dev(cpu))) |
| 882 | return PTR_ERR(per_cpu_cache_dev(cpu)); |
| 883 | |
| 884 | /* Allocate all required memory */ |
| 885 | per_cpu_index_dev(cpu) = kcalloc(cache_leaves(cpu), |
| 886 | sizeof(struct device *), GFP_KERNEL); |
| 887 | if (unlikely(per_cpu_index_dev(cpu) == NULL)) |
| 888 | goto err_out; |
| 889 | |
| 890 | return 0; |
| 891 | |
| 892 | err_out: |
| 893 | cpu_cache_sysfs_exit(cpu); |
| 894 | return -ENOMEM; |
| 895 | } |
| 896 | |
| 897 | static int cache_add_dev(unsigned int cpu) |
| 898 | { |
| 899 | unsigned int i; |
| 900 | int rc; |
| 901 | struct device *ci_dev, *parent; |
| 902 | struct cacheinfo *this_leaf; |
| 903 | const struct attribute_group **cache_groups; |
| 904 | |
| 905 | rc = cpu_cache_sysfs_init(cpu); |
| 906 | if (unlikely(rc < 0)) |
| 907 | return rc; |
| 908 | |
| 909 | parent = per_cpu_cache_dev(cpu); |
| 910 | for (i = 0; i < cache_leaves(cpu); i++) { |
| 911 | this_leaf = per_cpu_cacheinfo_idx(cpu, i); |
| 912 | if (this_leaf->disable_sysfs) |
| 913 | continue; |
| 914 | if (this_leaf->type == CACHE_TYPE_NOCACHE) |
| 915 | break; |
| 916 | cache_groups = cache_get_attribute_groups(this_leaf); |
| 917 | ci_dev = cpu_device_create(parent, drvdata: this_leaf, groups: cache_groups, |
| 918 | fmt: "index%1u" , i); |
| 919 | if (IS_ERR(ptr: ci_dev)) { |
| 920 | rc = PTR_ERR(ptr: ci_dev); |
| 921 | goto err; |
| 922 | } |
| 923 | per_cache_index_dev(cpu, i) = ci_dev; |
| 924 | } |
| 925 | cpumask_set_cpu(cpu, dstp: &cache_dev_map); |
| 926 | |
| 927 | return 0; |
| 928 | err: |
| 929 | cpu_cache_sysfs_exit(cpu); |
| 930 | return rc; |
| 931 | } |
| 932 | |
| 933 | static unsigned int cpu_map_shared_cache(bool online, unsigned int cpu, |
| 934 | cpumask_t **map) |
| 935 | { |
| 936 | struct cacheinfo *llc, *sib_llc; |
| 937 | unsigned int sibling; |
| 938 | |
| 939 | if (!last_level_cache_is_valid(cpu)) |
| 940 | return 0; |
| 941 | |
| 942 | llc = per_cpu_cacheinfo_idx(cpu, cache_leaves(cpu) - 1); |
| 943 | |
| 944 | if (llc->type != CACHE_TYPE_DATA && llc->type != CACHE_TYPE_UNIFIED) |
| 945 | return 0; |
| 946 | |
| 947 | if (online) { |
| 948 | *map = &llc->shared_cpu_map; |
| 949 | return cpumask_weight(srcp: *map); |
| 950 | } |
| 951 | |
| 952 | /* shared_cpu_map of offlined CPU will be cleared, so use sibling map */ |
| 953 | for_each_cpu(sibling, &llc->shared_cpu_map) { |
| 954 | if (sibling == cpu || !last_level_cache_is_valid(cpu: sibling)) |
| 955 | continue; |
| 956 | sib_llc = per_cpu_cacheinfo_idx(sibling, cache_leaves(sibling) - 1); |
| 957 | *map = &sib_llc->shared_cpu_map; |
| 958 | return cpumask_weight(srcp: *map); |
| 959 | } |
| 960 | |
| 961 | return 0; |
| 962 | } |
| 963 | |
| 964 | /* |
| 965 | * Calculate the size of the per-CPU data cache slice. This can be |
| 966 | * used to estimate the size of the data cache slice that can be used |
| 967 | * by one CPU under ideal circumstances. UNIFIED caches are counted |
| 968 | * in addition to DATA caches. So, please consider code cache usage |
| 969 | * when use the result. |
| 970 | * |
| 971 | * Because the cache inclusive/non-inclusive information isn't |
| 972 | * available, we just use the size of the per-CPU slice of LLC to make |
| 973 | * the result more predictable across architectures. |
| 974 | */ |
| 975 | static void update_per_cpu_data_slice_size_cpu(unsigned int cpu) |
| 976 | { |
| 977 | struct cpu_cacheinfo *ci; |
| 978 | struct cacheinfo *llc; |
| 979 | unsigned int nr_shared; |
| 980 | |
| 981 | if (!last_level_cache_is_valid(cpu)) |
| 982 | return; |
| 983 | |
| 984 | ci = ci_cacheinfo(cpu); |
| 985 | llc = per_cpu_cacheinfo_idx(cpu, cache_leaves(cpu) - 1); |
| 986 | |
| 987 | if (llc->type != CACHE_TYPE_DATA && llc->type != CACHE_TYPE_UNIFIED) |
| 988 | return; |
| 989 | |
| 990 | nr_shared = cpumask_weight(srcp: &llc->shared_cpu_map); |
| 991 | if (nr_shared) |
| 992 | ci->per_cpu_data_slice_size = llc->size / nr_shared; |
| 993 | } |
| 994 | |
| 995 | static void update_per_cpu_data_slice_size(bool cpu_online, unsigned int cpu, |
| 996 | cpumask_t *cpu_map) |
| 997 | { |
| 998 | unsigned int icpu; |
| 999 | |
| 1000 | for_each_cpu(icpu, cpu_map) { |
| 1001 | if (!cpu_online && icpu == cpu) |
| 1002 | continue; |
| 1003 | update_per_cpu_data_slice_size_cpu(cpu: icpu); |
| 1004 | setup_pcp_cacheinfo(icpu); |
| 1005 | } |
| 1006 | } |
| 1007 | |
| 1008 | static int cacheinfo_cpu_online(unsigned int cpu) |
| 1009 | { |
| 1010 | int rc = detect_cache_attributes(cpu); |
| 1011 | cpumask_t *cpu_map; |
| 1012 | |
| 1013 | if (rc) |
| 1014 | return rc; |
| 1015 | rc = cache_add_dev(cpu); |
| 1016 | if (rc) |
| 1017 | goto err; |
| 1018 | if (cpu_map_shared_cache(online: true, cpu, map: &cpu_map)) |
| 1019 | update_per_cpu_data_slice_size(cpu_online: true, cpu, cpu_map); |
| 1020 | return 0; |
| 1021 | err: |
| 1022 | free_cache_attributes(cpu); |
| 1023 | return rc; |
| 1024 | } |
| 1025 | |
| 1026 | static int cacheinfo_cpu_pre_down(unsigned int cpu) |
| 1027 | { |
| 1028 | cpumask_t *cpu_map; |
| 1029 | unsigned int nr_shared; |
| 1030 | |
| 1031 | nr_shared = cpu_map_shared_cache(online: false, cpu, map: &cpu_map); |
| 1032 | if (cpumask_test_and_clear_cpu(cpu, cpumask: &cache_dev_map)) |
| 1033 | cpu_cache_sysfs_exit(cpu); |
| 1034 | |
| 1035 | free_cache_attributes(cpu); |
| 1036 | if (nr_shared > 1) |
| 1037 | update_per_cpu_data_slice_size(cpu_online: false, cpu, cpu_map); |
| 1038 | return 0; |
| 1039 | } |
| 1040 | |
| 1041 | static int __init cacheinfo_sysfs_init(void) |
| 1042 | { |
| 1043 | return cpuhp_setup_state(state: CPUHP_AP_BASE_CACHEINFO_ONLINE, |
| 1044 | name: "base/cacheinfo:online" , |
| 1045 | startup: cacheinfo_cpu_online, teardown: cacheinfo_cpu_pre_down); |
| 1046 | } |
| 1047 | device_initcall(cacheinfo_sysfs_init); |
| 1048 | |