blkcache.c 3.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173
  1. /*
  2. * Copyright (C) Nelson Integration, LLC 2016
  3. * Author: Eric Nelson<eric@nelint.com>
  4. *
  5. * SPDX-License-Identifier: GPL-2.0+
  6. *
  7. */
  8. #include <config.h>
  9. #include <common.h>
  10. #include <malloc.h>
  11. #include <part.h>
  12. #include <linux/ctype.h>
  13. #include <linux/list.h>
  14. struct block_cache_node {
  15. struct list_head lh;
  16. int iftype;
  17. int devnum;
  18. lbaint_t start;
  19. lbaint_t blkcnt;
  20. unsigned long blksz;
  21. char *cache;
  22. };
  23. static LIST_HEAD(block_cache);
  24. static struct block_cache_stats _stats = {
  25. .max_blocks_per_entry = 2,
  26. .max_entries = 32
  27. };
  28. static struct block_cache_node *cache_find(int iftype, int devnum,
  29. lbaint_t start, lbaint_t blkcnt,
  30. unsigned long blksz)
  31. {
  32. struct block_cache_node *node;
  33. list_for_each_entry(node, &block_cache, lh)
  34. if ((node->iftype == iftype) &&
  35. (node->devnum == devnum) &&
  36. (node->blksz == blksz) &&
  37. (node->start <= start) &&
  38. (node->start + node->blkcnt >= start + blkcnt)) {
  39. if (block_cache.next != &node->lh) {
  40. /* maintain MRU ordering */
  41. list_del(&node->lh);
  42. list_add(&node->lh, &block_cache);
  43. }
  44. return node;
  45. }
  46. return 0;
  47. }
  48. int blkcache_read(int iftype, int devnum,
  49. lbaint_t start, lbaint_t blkcnt,
  50. unsigned long blksz, void *buffer)
  51. {
  52. struct block_cache_node *node = cache_find(iftype, devnum, start,
  53. blkcnt, blksz);
  54. if (node) {
  55. const char *src = node->cache + (start - node->start) * blksz;
  56. memcpy(buffer, src, blksz * blkcnt);
  57. debug("hit: start " LBAF ", count " LBAFU "\n",
  58. start, blkcnt);
  59. ++_stats.hits;
  60. return 1;
  61. }
  62. debug("miss: start " LBAF ", count " LBAFU "\n",
  63. start, blkcnt);
  64. ++_stats.misses;
  65. return 0;
  66. }
  67. void blkcache_fill(int iftype, int devnum,
  68. lbaint_t start, lbaint_t blkcnt,
  69. unsigned long blksz, void const *buffer)
  70. {
  71. lbaint_t bytes;
  72. struct block_cache_node *node;
  73. /* don't cache big stuff */
  74. if (blkcnt > _stats.max_blocks_per_entry)
  75. return;
  76. if (_stats.max_entries == 0)
  77. return;
  78. bytes = blksz * blkcnt;
  79. if (_stats.max_entries <= _stats.entries) {
  80. /* pop LRU */
  81. node = (struct block_cache_node *)block_cache.prev;
  82. list_del(&node->lh);
  83. _stats.entries--;
  84. debug("drop: start " LBAF ", count " LBAFU "\n",
  85. node->start, node->blkcnt);
  86. if (node->blkcnt * node->blksz < bytes) {
  87. free(node->cache);
  88. node->cache = 0;
  89. }
  90. } else {
  91. node = malloc(sizeof(*node));
  92. if (!node)
  93. return;
  94. node->cache = 0;
  95. }
  96. if (!node->cache) {
  97. node->cache = malloc(bytes);
  98. if (!node->cache) {
  99. free(node);
  100. return;
  101. }
  102. }
  103. debug("fill: start " LBAF ", count " LBAFU "\n",
  104. start, blkcnt);
  105. node->iftype = iftype;
  106. node->devnum = devnum;
  107. node->start = start;
  108. node->blkcnt = blkcnt;
  109. node->blksz = blksz;
  110. memcpy(node->cache, buffer, bytes);
  111. list_add(&node->lh, &block_cache);
  112. _stats.entries++;
  113. }
  114. void blkcache_invalidate(int iftype, int devnum)
  115. {
  116. struct list_head *entry, *n;
  117. struct block_cache_node *node;
  118. list_for_each_safe(entry, n, &block_cache) {
  119. node = (struct block_cache_node *)entry;
  120. if ((node->iftype == iftype) &&
  121. (node->devnum == devnum)) {
  122. list_del(entry);
  123. free(node->cache);
  124. free(node);
  125. --_stats.entries;
  126. }
  127. }
  128. }
  129. void blkcache_configure(unsigned blocks, unsigned entries)
  130. {
  131. struct block_cache_node *node;
  132. if ((blocks != _stats.max_blocks_per_entry) ||
  133. (entries != _stats.max_entries)) {
  134. /* invalidate cache */
  135. while (!list_empty(&block_cache)) {
  136. node = (struct block_cache_node *)block_cache.next;
  137. list_del(&node->lh);
  138. free(node->cache);
  139. free(node);
  140. }
  141. _stats.entries = 0;
  142. }
  143. _stats.max_blocks_per_entry = blocks;
  144. _stats.max_entries = entries;
  145. _stats.hits = 0;
  146. _stats.misses = 0;
  147. }
  148. void blkcache_stats(struct block_cache_stats *stats)
  149. {
  150. memcpy(stats, &_stats, sizeof(*stats));
  151. _stats.hits = 0;
  152. _stats.misses = 0;
  153. }