| 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
| 2 | |
| 3 | #ifndef BTRFS_LRU_CACHE_H |
| 4 | #define BTRFS_LRU_CACHE_H |
| 5 | |
| 6 | #include <linux/types.h> |
| 7 | #include <linux/maple_tree.h> |
| 8 | #include <linux/list.h> |
| 9 | |
| 10 | /* |
| 11 | * A cache entry. This is meant to be embedded in a structure of a user of |
| 12 | * this module. Similar to how struct list_head and struct rb_node are used. |
| 13 | * |
| 14 | * Note: it should be embedded as the first element in a struct (offset 0), and |
| 15 | * this module assumes it was allocated with kmalloc(), so it calls kfree() when |
| 16 | * it needs to free an entry. |
| 17 | */ |
| 18 | struct btrfs_lru_cache_entry { |
| 19 | struct list_head lru_list; |
| 20 | u64 key; |
| 21 | /* |
| 22 | * Optional generation associated to a key. Use 0 if not needed/used. |
| 23 | * Entries with the same key and different generations are stored in a |
| 24 | * linked list, so use this only for cases where there's a small number |
| 25 | * of different generations. |
| 26 | */ |
| 27 | u64 gen; |
| 28 | /* |
| 29 | * The maple tree uses unsigned long type for the keys, which is 32 bits |
| 30 | * on 32 bits systems, and 64 bits on 64 bits systems. So if we want to |
| 31 | * use something like inode numbers as keys, which are always a u64, we |
| 32 | * have to deal with this in a special way - we store the key in the |
| 33 | * entry itself, as a u64, and the values inserted into the maple tree |
| 34 | * are linked lists of entries - so in case we are on a 64 bits system, |
| 35 | * that list always has a single entry, while on 32 bits systems it |
| 36 | * may have more than one, with each entry having the same value for |
| 37 | * their lower 32 bits of the u64 key. |
| 38 | */ |
| 39 | struct list_head list; |
| 40 | }; |
| 41 | |
| 42 | struct btrfs_lru_cache { |
| 43 | struct list_head lru_list; |
| 44 | struct maple_tree entries; |
| 45 | /* Number of entries stored in the cache. */ |
| 46 | unsigned int size; |
| 47 | /* Maximum number of entries the cache can have. */ |
| 48 | unsigned int max_size; |
| 49 | }; |
| 50 | |
| 51 | #define btrfs_lru_cache_for_each_entry_safe(cache, entry, tmp) \ |
| 52 | list_for_each_entry_safe_reverse((entry), (tmp), &(cache)->lru_list, lru_list) |
| 53 | |
| 54 | static inline struct btrfs_lru_cache_entry *btrfs_lru_cache_lru_entry( |
| 55 | struct btrfs_lru_cache *cache) |
| 56 | { |
| 57 | return list_first_entry_or_null(&cache->lru_list, |
| 58 | struct btrfs_lru_cache_entry, lru_list); |
| 59 | } |
| 60 | |
| 61 | void btrfs_lru_cache_init(struct btrfs_lru_cache *cache, unsigned int max_size); |
| 62 | struct btrfs_lru_cache_entry *btrfs_lru_cache_lookup(struct btrfs_lru_cache *cache, |
| 63 | u64 key, u64 gen); |
| 64 | int btrfs_lru_cache_store(struct btrfs_lru_cache *cache, |
| 65 | struct btrfs_lru_cache_entry *new_entry, |
| 66 | gfp_t gfp); |
| 67 | void btrfs_lru_cache_remove(struct btrfs_lru_cache *cache, |
| 68 | struct btrfs_lru_cache_entry *entry); |
| 69 | void btrfs_lru_cache_clear(struct btrfs_lru_cache *cache); |
| 70 | |
| 71 | #endif |
| 72 | |