mbcache.c 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437
  1. #include <linux/spinlock.h>
  2. #include <linux/slab.h>
  3. #include <linux/list.h>
  4. #include <linux/list_bl.h>
  5. #include <linux/module.h>
  6. #include <linux/sched.h>
  7. #include <linux/workqueue.h>
  8. #include <linux/mbcache.h>
  9. /*
  10. * Mbcache is a simple key-value store. Keys need not be unique, however
  11. * key-value pairs are expected to be unique (we use this fact in
  12. * mb_cache_entry_delete_block()).
  13. *
  14. * Ext2 and ext4 use this cache for deduplication of extended attribute blocks.
  15. * They use hash of a block contents as a key and block number as a value.
  16. * That's why keys need not be unique (different xattr blocks may end up having
  17. * the same hash). However block number always uniquely identifies a cache
  18. * entry.
  19. *
  20. * We provide functions for creation and removal of entries, search by key,
  21. * and a special "delete entry with given key-value pair" operation. Fixed
  22. * size hash table is used for fast key lookups.
  23. */
  24. struct mb_cache {
  25. /* Hash table of entries */
  26. struct hlist_bl_head *c_hash;
  27. /* log2 of hash table size */
  28. int c_bucket_bits;
  29. /* Maximum entries in cache to avoid degrading hash too much */
  30. int c_max_entries;
  31. /* Protects c_list, c_entry_count */
  32. spinlock_t c_list_lock;
  33. struct list_head c_list;
  34. /* Number of entries in cache */
  35. unsigned long c_entry_count;
  36. struct shrinker c_shrink;
  37. /* Work for shrinking when the cache has too many entries */
  38. struct work_struct c_shrink_work;
  39. };
  40. static struct kmem_cache *mb_entry_cache;
  41. static unsigned long mb_cache_shrink(struct mb_cache *cache,
  42. unsigned int nr_to_scan);
  43. static inline struct hlist_bl_head *mb_cache_entry_head(struct mb_cache *cache,
  44. u32 key)
  45. {
  46. return &cache->c_hash[hash_32(key, cache->c_bucket_bits)];
  47. }
  48. /*
  49. * Number of entries to reclaim synchronously when there are too many entries
  50. * in cache
  51. */
  52. #define SYNC_SHRINK_BATCH 64
  53. /*
  54. * mb_cache_entry_create - create entry in cache
  55. * @cache - cache where the entry should be created
  56. * @mask - gfp mask with which the entry should be allocated
  57. * @key - key of the entry
  58. * @block - block that contains data
  59. * @reusable - is the block reusable by other inodes?
  60. *
  61. * Creates entry in @cache with key @key and records that data is stored in
  62. * block @block. The function returns -EBUSY if entry with the same key
  63. * and for the same block already exists in cache. Otherwise 0 is returned.
  64. */
  65. int mb_cache_entry_create(struct mb_cache *cache, gfp_t mask, u32 key,
  66. sector_t block, bool reusable)
  67. {
  68. struct mb_cache_entry *entry, *dup;
  69. struct hlist_bl_node *dup_node;
  70. struct hlist_bl_head *head;
  71. /* Schedule background reclaim if there are too many entries */
  72. if (cache->c_entry_count >= cache->c_max_entries)
  73. schedule_work(&cache->c_shrink_work);
  74. /* Do some sync reclaim if background reclaim cannot keep up */
  75. if (cache->c_entry_count >= 2*cache->c_max_entries)
  76. mb_cache_shrink(cache, SYNC_SHRINK_BATCH);
  77. entry = kmem_cache_alloc(mb_entry_cache, mask);
  78. if (!entry)
  79. return -ENOMEM;
  80. INIT_LIST_HEAD(&entry->e_list);
  81. /* One ref for hash, one ref returned */
  82. atomic_set(&entry->e_refcnt, 1);
  83. entry->e_key = key;
  84. entry->e_block = block;
  85. entry->e_reusable = reusable;
  86. head = mb_cache_entry_head(cache, key);
  87. hlist_bl_lock(head);
  88. hlist_bl_for_each_entry(dup, dup_node, head, e_hash_list) {
  89. if (dup->e_key == key && dup->e_block == block) {
  90. hlist_bl_unlock(head);
  91. kmem_cache_free(mb_entry_cache, entry);
  92. return -EBUSY;
  93. }
  94. }
  95. hlist_bl_add_head(&entry->e_hash_list, head);
  96. hlist_bl_unlock(head);
  97. spin_lock(&cache->c_list_lock);
  98. list_add_tail(&entry->e_list, &cache->c_list);
  99. /* Grab ref for LRU list */
  100. atomic_inc(&entry->e_refcnt);
  101. cache->c_entry_count++;
  102. spin_unlock(&cache->c_list_lock);
  103. return 0;
  104. }
  105. EXPORT_SYMBOL(mb_cache_entry_create);
  106. void __mb_cache_entry_free(struct mb_cache_entry *entry)
  107. {
  108. kmem_cache_free(mb_entry_cache, entry);
  109. }
  110. EXPORT_SYMBOL(__mb_cache_entry_free);
  111. static struct mb_cache_entry *__entry_find(struct mb_cache *cache,
  112. struct mb_cache_entry *entry,
  113. u32 key)
  114. {
  115. struct mb_cache_entry *old_entry = entry;
  116. struct hlist_bl_node *node;
  117. struct hlist_bl_head *head;
  118. head = mb_cache_entry_head(cache, key);
  119. hlist_bl_lock(head);
  120. if (entry && !hlist_bl_unhashed(&entry->e_hash_list))
  121. node = entry->e_hash_list.next;
  122. else
  123. node = hlist_bl_first(head);
  124. while (node) {
  125. entry = hlist_bl_entry(node, struct mb_cache_entry,
  126. e_hash_list);
  127. if (entry->e_key == key && entry->e_reusable) {
  128. atomic_inc(&entry->e_refcnt);
  129. goto out;
  130. }
  131. node = node->next;
  132. }
  133. entry = NULL;
  134. out:
  135. hlist_bl_unlock(head);
  136. if (old_entry)
  137. mb_cache_entry_put(cache, old_entry);
  138. return entry;
  139. }
  140. /*
  141. * mb_cache_entry_find_first - find the first entry in cache with given key
  142. * @cache: cache where we should search
  143. * @key: key to look for
  144. *
  145. * Search in @cache for entry with key @key. Grabs reference to the first
  146. * entry found and returns the entry.
  147. */
  148. struct mb_cache_entry *mb_cache_entry_find_first(struct mb_cache *cache,
  149. u32 key)
  150. {
  151. return __entry_find(cache, NULL, key);
  152. }
  153. EXPORT_SYMBOL(mb_cache_entry_find_first);
  154. /*
  155. * mb_cache_entry_find_next - find next entry in cache with the same
  156. * @cache: cache where we should search
  157. * @entry: entry to start search from
  158. *
  159. * Finds next entry in the hash chain which has the same key as @entry.
  160. * If @entry is unhashed (which can happen when deletion of entry races
  161. * with the search), finds the first entry in the hash chain. The function
  162. * drops reference to @entry and returns with a reference to the found entry.
  163. */
  164. struct mb_cache_entry *mb_cache_entry_find_next(struct mb_cache *cache,
  165. struct mb_cache_entry *entry)
  166. {
  167. return __entry_find(cache, entry, entry->e_key);
  168. }
  169. EXPORT_SYMBOL(mb_cache_entry_find_next);
  170. /*
  171. * mb_cache_entry_get - get a cache entry by block number (and key)
  172. * @cache - cache we work with
  173. * @key - key of block number @block
  174. * @block - block number
  175. */
  176. struct mb_cache_entry *mb_cache_entry_get(struct mb_cache *cache, u32 key,
  177. sector_t block)
  178. {
  179. struct hlist_bl_node *node;
  180. struct hlist_bl_head *head;
  181. struct mb_cache_entry *entry;
  182. head = mb_cache_entry_head(cache, key);
  183. hlist_bl_lock(head);
  184. hlist_bl_for_each_entry(entry, node, head, e_hash_list) {
  185. if (entry->e_key == key && entry->e_block == block) {
  186. atomic_inc(&entry->e_refcnt);
  187. goto out;
  188. }
  189. }
  190. entry = NULL;
  191. out:
  192. hlist_bl_unlock(head);
  193. return entry;
  194. }
  195. EXPORT_SYMBOL(mb_cache_entry_get);
  196. /* mb_cache_entry_delete_block - remove information about block from cache
  197. * @cache - cache we work with
  198. * @key - key of block @block
  199. * @block - block number
  200. *
  201. * Remove entry from cache @cache with key @key with data stored in @block.
  202. */
  203. void mb_cache_entry_delete_block(struct mb_cache *cache, u32 key,
  204. sector_t block)
  205. {
  206. struct hlist_bl_node *node;
  207. struct hlist_bl_head *head;
  208. struct mb_cache_entry *entry;
  209. head = mb_cache_entry_head(cache, key);
  210. hlist_bl_lock(head);
  211. hlist_bl_for_each_entry(entry, node, head, e_hash_list) {
  212. if (entry->e_key == key && entry->e_block == block) {
  213. /* We keep hash list reference to keep entry alive */
  214. hlist_bl_del_init(&entry->e_hash_list);
  215. hlist_bl_unlock(head);
  216. spin_lock(&cache->c_list_lock);
  217. if (!list_empty(&entry->e_list)) {
  218. list_del_init(&entry->e_list);
  219. cache->c_entry_count--;
  220. atomic_dec(&entry->e_refcnt);
  221. }
  222. spin_unlock(&cache->c_list_lock);
  223. mb_cache_entry_put(cache, entry);
  224. return;
  225. }
  226. }
  227. hlist_bl_unlock(head);
  228. }
  229. EXPORT_SYMBOL(mb_cache_entry_delete_block);
  230. /* mb_cache_entry_touch - cache entry got used
  231. * @cache - cache the entry belongs to
  232. * @entry - entry that got used
  233. *
  234. * Marks entry as used to give hit higher chances of surviving in cache.
  235. */
  236. void mb_cache_entry_touch(struct mb_cache *cache,
  237. struct mb_cache_entry *entry)
  238. {
  239. entry->e_referenced = 1;
  240. }
  241. EXPORT_SYMBOL(mb_cache_entry_touch);
  242. static unsigned long mb_cache_count(struct shrinker *shrink,
  243. struct shrink_control *sc)
  244. {
  245. struct mb_cache *cache = container_of(shrink, struct mb_cache,
  246. c_shrink);
  247. return cache->c_entry_count;
  248. }
  249. /* Shrink number of entries in cache */
  250. static unsigned long mb_cache_shrink(struct mb_cache *cache,
  251. unsigned int nr_to_scan)
  252. {
  253. struct mb_cache_entry *entry;
  254. struct hlist_bl_head *head;
  255. unsigned int shrunk = 0;
  256. spin_lock(&cache->c_list_lock);
  257. while (nr_to_scan-- && !list_empty(&cache->c_list)) {
  258. entry = list_first_entry(&cache->c_list,
  259. struct mb_cache_entry, e_list);
  260. if (entry->e_referenced) {
  261. entry->e_referenced = 0;
  262. list_move_tail(&cache->c_list, &entry->e_list);
  263. continue;
  264. }
  265. list_del_init(&entry->e_list);
  266. cache->c_entry_count--;
  267. /*
  268. * We keep LRU list reference so that entry doesn't go away
  269. * from under us.
  270. */
  271. spin_unlock(&cache->c_list_lock);
  272. head = mb_cache_entry_head(cache, entry->e_key);
  273. hlist_bl_lock(head);
  274. if (!hlist_bl_unhashed(&entry->e_hash_list)) {
  275. hlist_bl_del_init(&entry->e_hash_list);
  276. atomic_dec(&entry->e_refcnt);
  277. }
  278. hlist_bl_unlock(head);
  279. if (mb_cache_entry_put(cache, entry))
  280. shrunk++;
  281. cond_resched();
  282. spin_lock(&cache->c_list_lock);
  283. }
  284. spin_unlock(&cache->c_list_lock);
  285. return shrunk;
  286. }
  287. static unsigned long mb_cache_scan(struct shrinker *shrink,
  288. struct shrink_control *sc)
  289. {
  290. int nr_to_scan = sc->nr_to_scan;
  291. struct mb_cache *cache = container_of(shrink, struct mb_cache,
  292. c_shrink);
  293. return mb_cache_shrink(cache, nr_to_scan);
  294. }
  295. /* We shrink 1/X of the cache when we have too many entries in it */
  296. #define SHRINK_DIVISOR 16
  297. static void mb_cache_shrink_worker(struct work_struct *work)
  298. {
  299. struct mb_cache *cache = container_of(work, struct mb_cache,
  300. c_shrink_work);
  301. mb_cache_shrink(cache, cache->c_max_entries / SHRINK_DIVISOR);
  302. }
  303. /*
  304. * mb_cache_create - create cache
  305. * @bucket_bits: log2 of the hash table size
  306. *
  307. * Create cache for keys with 2^bucket_bits hash entries.
  308. */
  309. struct mb_cache *mb_cache_create(int bucket_bits)
  310. {
  311. struct mb_cache *cache;
  312. int bucket_count = 1 << bucket_bits;
  313. int i;
  314. if (!try_module_get(THIS_MODULE))
  315. return NULL;
  316. cache = kzalloc(sizeof(struct mb_cache), GFP_KERNEL);
  317. if (!cache)
  318. goto err_out;
  319. cache->c_bucket_bits = bucket_bits;
  320. cache->c_max_entries = bucket_count << 4;
  321. INIT_LIST_HEAD(&cache->c_list);
  322. spin_lock_init(&cache->c_list_lock);
  323. cache->c_hash = kmalloc(bucket_count * sizeof(struct hlist_bl_head),
  324. GFP_KERNEL);
  325. if (!cache->c_hash) {
  326. kfree(cache);
  327. goto err_out;
  328. }
  329. for (i = 0; i < bucket_count; i++)
  330. INIT_HLIST_BL_HEAD(&cache->c_hash[i]);
  331. cache->c_shrink.count_objects = mb_cache_count;
  332. cache->c_shrink.scan_objects = mb_cache_scan;
  333. cache->c_shrink.seeks = DEFAULT_SEEKS;
  334. if (register_shrinker(&cache->c_shrink)) {
  335. kfree(cache->c_hash);
  336. kfree(cache);
  337. goto err_out;
  338. }
  339. INIT_WORK(&cache->c_shrink_work, mb_cache_shrink_worker);
  340. return cache;
  341. err_out:
  342. module_put(THIS_MODULE);
  343. return NULL;
  344. }
  345. EXPORT_SYMBOL(mb_cache_create);
  346. /*
  347. * mb_cache_destroy - destroy cache
  348. * @cache: the cache to destroy
  349. *
  350. * Free all entries in cache and cache itself. Caller must make sure nobody
  351. * (except shrinker) can reach @cache when calling this.
  352. */
  353. void mb_cache_destroy(struct mb_cache *cache)
  354. {
  355. struct mb_cache_entry *entry, *next;
  356. unregister_shrinker(&cache->c_shrink);
  357. /*
  358. * We don't bother with any locking. Cache must not be used at this
  359. * point.
  360. */
  361. list_for_each_entry_safe(entry, next, &cache->c_list, e_list) {
  362. if (!hlist_bl_unhashed(&entry->e_hash_list)) {
  363. hlist_bl_del_init(&entry->e_hash_list);
  364. atomic_dec(&entry->e_refcnt);
  365. } else
  366. WARN_ON(1);
  367. list_del(&entry->e_list);
  368. WARN_ON(atomic_read(&entry->e_refcnt) != 1);
  369. mb_cache_entry_put(cache, entry);
  370. }
  371. kfree(cache->c_hash);
  372. kfree(cache);
  373. module_put(THIS_MODULE);
  374. }
  375. EXPORT_SYMBOL(mb_cache_destroy);
  376. static int __init mbcache_init(void)
  377. {
  378. mb_entry_cache = kmem_cache_create("mbcache",
  379. sizeof(struct mb_cache_entry), 0,
  380. SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD, NULL);
  381. BUG_ON(!mb_entry_cache);
  382. return 0;
  383. }
  384. static void __exit mbcache_exit(void)
  385. {
  386. kmem_cache_destroy(mb_entry_cache);
  387. }
  388. module_init(mbcache_init)
  389. module_exit(mbcache_exit)
  390. MODULE_AUTHOR("Jan Kara <jack@suse.cz>");
  391. MODULE_DESCRIPTION("Meta block cache (for extended attributes)");
  392. MODULE_LICENSE("GPL");