cacheinfo.c 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554
  1. /*
  2. * cacheinfo support - processor cache information via sysfs
  3. *
  4. * Based on arch/x86/kernel/cpu/intel_cacheinfo.c
  5. * Author: Sudeep Holla <sudeep.holla@arm.com>
  6. *
  7. * This program is free software; you can redistribute it and/or modify
  8. * it under the terms of the GNU General Public License version 2 as
  9. * published by the Free Software Foundation.
  10. *
  11. * This program is distributed "as is" WITHOUT ANY WARRANTY of any
  12. * kind, whether express or implied; without even the implied warranty
  13. * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  14. * GNU General Public License for more details.
  15. *
  16. * You should have received a copy of the GNU General Public License
  17. * along with this program. If not, see <http://www.gnu.org/licenses/>.
  18. */
  19. #include <linux/bitops.h>
  20. #include <linux/cacheinfo.h>
  21. #include <linux/compiler.h>
  22. #include <linux/cpu.h>
  23. #include <linux/device.h>
  24. #include <linux/init.h>
  25. #include <linux/of.h>
  26. #include <linux/sched.h>
  27. #include <linux/slab.h>
  28. #include <linux/smp.h>
  29. #include <linux/sysfs.h>
  30. /* pointer to per cpu cacheinfo */
  31. static DEFINE_PER_CPU(struct cpu_cacheinfo, ci_cpu_cacheinfo);
  32. #define ci_cacheinfo(cpu) (&per_cpu(ci_cpu_cacheinfo, cpu))
  33. #define cache_leaves(cpu) (ci_cacheinfo(cpu)->num_leaves)
  34. #define per_cpu_cacheinfo(cpu) (ci_cacheinfo(cpu)->info_list)
  35. struct cpu_cacheinfo *get_cpu_cacheinfo(unsigned int cpu)
  36. {
  37. return ci_cacheinfo(cpu);
  38. }
  39. #ifdef CONFIG_OF
  40. static int cache_setup_of_node(unsigned int cpu)
  41. {
  42. struct device_node *np;
  43. struct cacheinfo *this_leaf;
  44. struct device *cpu_dev = get_cpu_device(cpu);
  45. struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
  46. unsigned int index = 0;
  47. /* skip if of_node is already populated */
  48. if (this_cpu_ci->info_list->of_node)
  49. return 0;
  50. if (!cpu_dev) {
  51. pr_err("No cpu device for CPU %d\n", cpu);
  52. return -ENODEV;
  53. }
  54. np = cpu_dev->of_node;
  55. if (!np) {
  56. pr_err("Failed to find cpu%d device node\n", cpu);
  57. return -ENOENT;
  58. }
  59. while (index < cache_leaves(cpu)) {
  60. this_leaf = this_cpu_ci->info_list + index;
  61. if (this_leaf->level != 1)
  62. np = of_find_next_cache_node(np);
  63. else
  64. np = of_node_get(np);/* cpu node itself */
  65. if (!np)
  66. break;
  67. this_leaf->of_node = np;
  68. index++;
  69. }
  70. if (index != cache_leaves(cpu)) /* not all OF nodes populated */
  71. return -ENOENT;
  72. return 0;
  73. }
  74. static inline bool cache_leaves_are_shared(struct cacheinfo *this_leaf,
  75. struct cacheinfo *sib_leaf)
  76. {
  77. return sib_leaf->of_node == this_leaf->of_node;
  78. }
  79. #else
  80. static inline int cache_setup_of_node(unsigned int cpu) { return 0; }
  81. static inline bool cache_leaves_are_shared(struct cacheinfo *this_leaf,
  82. struct cacheinfo *sib_leaf)
  83. {
  84. /*
  85. * For non-DT systems, assume unique level 1 cache, system-wide
  86. * shared caches for all other levels. This will be used only if
  87. * arch specific code has not populated shared_cpu_map
  88. */
  89. return !(this_leaf->level == 1);
  90. }
  91. #endif
  92. static int cache_shared_cpu_map_setup(unsigned int cpu)
  93. {
  94. struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
  95. struct cacheinfo *this_leaf, *sib_leaf;
  96. unsigned int index;
  97. int ret;
  98. ret = cache_setup_of_node(cpu);
  99. if (ret)
  100. return ret;
  101. for (index = 0; index < cache_leaves(cpu); index++) {
  102. unsigned int i;
  103. this_leaf = this_cpu_ci->info_list + index;
  104. /* skip if shared_cpu_map is already populated */
  105. if (!cpumask_empty(&this_leaf->shared_cpu_map))
  106. continue;
  107. cpumask_set_cpu(cpu, &this_leaf->shared_cpu_map);
  108. for_each_online_cpu(i) {
  109. struct cpu_cacheinfo *sib_cpu_ci = get_cpu_cacheinfo(i);
  110. if (i == cpu || !sib_cpu_ci->info_list)
  111. continue;/* skip if itself or no cacheinfo */
  112. sib_leaf = sib_cpu_ci->info_list + index;
  113. if (cache_leaves_are_shared(this_leaf, sib_leaf)) {
  114. cpumask_set_cpu(cpu, &sib_leaf->shared_cpu_map);
  115. cpumask_set_cpu(i, &this_leaf->shared_cpu_map);
  116. }
  117. }
  118. }
  119. return 0;
  120. }
  121. static void cache_shared_cpu_map_remove(unsigned int cpu)
  122. {
  123. struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
  124. struct cacheinfo *this_leaf, *sib_leaf;
  125. unsigned int sibling, index;
  126. for (index = 0; index < cache_leaves(cpu); index++) {
  127. this_leaf = this_cpu_ci->info_list + index;
  128. for_each_cpu(sibling, &this_leaf->shared_cpu_map) {
  129. struct cpu_cacheinfo *sib_cpu_ci;
  130. if (sibling == cpu) /* skip itself */
  131. continue;
  132. sib_cpu_ci = get_cpu_cacheinfo(sibling);
  133. if (!sib_cpu_ci->info_list)
  134. continue;
  135. sib_leaf = sib_cpu_ci->info_list + index;
  136. cpumask_clear_cpu(cpu, &sib_leaf->shared_cpu_map);
  137. cpumask_clear_cpu(sibling, &this_leaf->shared_cpu_map);
  138. }
  139. of_node_put(this_leaf->of_node);
  140. }
  141. }
  142. static void free_cache_attributes(unsigned int cpu)
  143. {
  144. if (!per_cpu_cacheinfo(cpu))
  145. return;
  146. cache_shared_cpu_map_remove(cpu);
  147. kfree(per_cpu_cacheinfo(cpu));
  148. per_cpu_cacheinfo(cpu) = NULL;
  149. }
  150. int __weak init_cache_level(unsigned int cpu)
  151. {
  152. return -ENOENT;
  153. }
  154. int __weak populate_cache_leaves(unsigned int cpu)
  155. {
  156. return -ENOENT;
  157. }
  158. static int detect_cache_attributes(unsigned int cpu)
  159. {
  160. int ret;
  161. if (init_cache_level(cpu) || !cache_leaves(cpu))
  162. return -ENOENT;
  163. per_cpu_cacheinfo(cpu) = kcalloc(cache_leaves(cpu),
  164. sizeof(struct cacheinfo), GFP_KERNEL);
  165. if (per_cpu_cacheinfo(cpu) == NULL)
  166. return -ENOMEM;
  167. ret = populate_cache_leaves(cpu);
  168. if (ret)
  169. goto free_ci;
  170. /*
  171. * For systems using DT for cache hierarchy, of_node and shared_cpu_map
  172. * will be set up here only if they are not populated already
  173. */
  174. ret = cache_shared_cpu_map_setup(cpu);
  175. if (ret) {
  176. pr_warn("Unable to detect cache hierarchy from DT for CPU %d\n",
  177. cpu);
  178. goto free_ci;
  179. }
  180. return 0;
  181. free_ci:
  182. free_cache_attributes(cpu);
  183. return ret;
  184. }
  185. /* pointer to cpuX/cache device */
  186. static DEFINE_PER_CPU(struct device *, ci_cache_dev);
  187. #define per_cpu_cache_dev(cpu) (per_cpu(ci_cache_dev, cpu))
  188. static cpumask_t cache_dev_map;
  189. /* pointer to array of devices for cpuX/cache/indexY */
  190. static DEFINE_PER_CPU(struct device **, ci_index_dev);
  191. #define per_cpu_index_dev(cpu) (per_cpu(ci_index_dev, cpu))
  192. #define per_cache_index_dev(cpu, idx) ((per_cpu_index_dev(cpu))[idx])
  193. #define show_one(file_name, object) \
  194. static ssize_t file_name##_show(struct device *dev, \
  195. struct device_attribute *attr, char *buf) \
  196. { \
  197. struct cacheinfo *this_leaf = dev_get_drvdata(dev); \
  198. return sprintf(buf, "%u\n", this_leaf->object); \
  199. }
  200. show_one(level, level);
  201. show_one(coherency_line_size, coherency_line_size);
  202. show_one(number_of_sets, number_of_sets);
  203. show_one(physical_line_partition, physical_line_partition);
  204. show_one(ways_of_associativity, ways_of_associativity);
  205. static ssize_t size_show(struct device *dev,
  206. struct device_attribute *attr, char *buf)
  207. {
  208. struct cacheinfo *this_leaf = dev_get_drvdata(dev);
  209. return sprintf(buf, "%uK\n", this_leaf->size >> 10);
  210. }
  211. static ssize_t shared_cpumap_show_func(struct device *dev, bool list, char *buf)
  212. {
  213. struct cacheinfo *this_leaf = dev_get_drvdata(dev);
  214. const struct cpumask *mask = &this_leaf->shared_cpu_map;
  215. return cpumap_print_to_pagebuf(list, buf, mask);
  216. }
  217. static ssize_t shared_cpu_map_show(struct device *dev,
  218. struct device_attribute *attr, char *buf)
  219. {
  220. return shared_cpumap_show_func(dev, false, buf);
  221. }
  222. static ssize_t shared_cpu_list_show(struct device *dev,
  223. struct device_attribute *attr, char *buf)
  224. {
  225. return shared_cpumap_show_func(dev, true, buf);
  226. }
  227. static ssize_t type_show(struct device *dev,
  228. struct device_attribute *attr, char *buf)
  229. {
  230. struct cacheinfo *this_leaf = dev_get_drvdata(dev);
  231. switch (this_leaf->type) {
  232. case CACHE_TYPE_DATA:
  233. return sprintf(buf, "Data\n");
  234. case CACHE_TYPE_INST:
  235. return sprintf(buf, "Instruction\n");
  236. case CACHE_TYPE_UNIFIED:
  237. return sprintf(buf, "Unified\n");
  238. default:
  239. return -EINVAL;
  240. }
  241. }
  242. static ssize_t allocation_policy_show(struct device *dev,
  243. struct device_attribute *attr, char *buf)
  244. {
  245. struct cacheinfo *this_leaf = dev_get_drvdata(dev);
  246. unsigned int ci_attr = this_leaf->attributes;
  247. int n = 0;
  248. if ((ci_attr & CACHE_READ_ALLOCATE) && (ci_attr & CACHE_WRITE_ALLOCATE))
  249. n = sprintf(buf, "ReadWriteAllocate\n");
  250. else if (ci_attr & CACHE_READ_ALLOCATE)
  251. n = sprintf(buf, "ReadAllocate\n");
  252. else if (ci_attr & CACHE_WRITE_ALLOCATE)
  253. n = sprintf(buf, "WriteAllocate\n");
  254. return n;
  255. }
  256. static ssize_t write_policy_show(struct device *dev,
  257. struct device_attribute *attr, char *buf)
  258. {
  259. struct cacheinfo *this_leaf = dev_get_drvdata(dev);
  260. unsigned int ci_attr = this_leaf->attributes;
  261. int n = 0;
  262. if (ci_attr & CACHE_WRITE_THROUGH)
  263. n = sprintf(buf, "WriteThrough\n");
  264. else if (ci_attr & CACHE_WRITE_BACK)
  265. n = sprintf(buf, "WriteBack\n");
  266. return n;
  267. }
  268. static DEVICE_ATTR_RO(level);
  269. static DEVICE_ATTR_RO(type);
  270. static DEVICE_ATTR_RO(coherency_line_size);
  271. static DEVICE_ATTR_RO(ways_of_associativity);
  272. static DEVICE_ATTR_RO(number_of_sets);
  273. static DEVICE_ATTR_RO(size);
  274. static DEVICE_ATTR_RO(allocation_policy);
  275. static DEVICE_ATTR_RO(write_policy);
  276. static DEVICE_ATTR_RO(shared_cpu_map);
  277. static DEVICE_ATTR_RO(shared_cpu_list);
  278. static DEVICE_ATTR_RO(physical_line_partition);
  279. static struct attribute *cache_default_attrs[] = {
  280. &dev_attr_type.attr,
  281. &dev_attr_level.attr,
  282. &dev_attr_shared_cpu_map.attr,
  283. &dev_attr_shared_cpu_list.attr,
  284. &dev_attr_coherency_line_size.attr,
  285. &dev_attr_ways_of_associativity.attr,
  286. &dev_attr_number_of_sets.attr,
  287. &dev_attr_size.attr,
  288. &dev_attr_allocation_policy.attr,
  289. &dev_attr_write_policy.attr,
  290. &dev_attr_physical_line_partition.attr,
  291. NULL
  292. };
  293. static umode_t
  294. cache_default_attrs_is_visible(struct kobject *kobj,
  295. struct attribute *attr, int unused)
  296. {
  297. struct device *dev = kobj_to_dev(kobj);
  298. struct cacheinfo *this_leaf = dev_get_drvdata(dev);
  299. const struct cpumask *mask = &this_leaf->shared_cpu_map;
  300. umode_t mode = attr->mode;
  301. if ((attr == &dev_attr_type.attr) && this_leaf->type)
  302. return mode;
  303. if ((attr == &dev_attr_level.attr) && this_leaf->level)
  304. return mode;
  305. if ((attr == &dev_attr_shared_cpu_map.attr) && !cpumask_empty(mask))
  306. return mode;
  307. if ((attr == &dev_attr_shared_cpu_list.attr) && !cpumask_empty(mask))
  308. return mode;
  309. if ((attr == &dev_attr_coherency_line_size.attr) &&
  310. this_leaf->coherency_line_size)
  311. return mode;
  312. if ((attr == &dev_attr_ways_of_associativity.attr) &&
  313. this_leaf->size) /* allow 0 = full associativity */
  314. return mode;
  315. if ((attr == &dev_attr_number_of_sets.attr) &&
  316. this_leaf->number_of_sets)
  317. return mode;
  318. if ((attr == &dev_attr_size.attr) && this_leaf->size)
  319. return mode;
  320. if ((attr == &dev_attr_write_policy.attr) &&
  321. (this_leaf->attributes & CACHE_WRITE_POLICY_MASK))
  322. return mode;
  323. if ((attr == &dev_attr_allocation_policy.attr) &&
  324. (this_leaf->attributes & CACHE_ALLOCATE_POLICY_MASK))
  325. return mode;
  326. if ((attr == &dev_attr_physical_line_partition.attr) &&
  327. this_leaf->physical_line_partition)
  328. return mode;
  329. return 0;
  330. }
  331. static const struct attribute_group cache_default_group = {
  332. .attrs = cache_default_attrs,
  333. .is_visible = cache_default_attrs_is_visible,
  334. };
  335. static const struct attribute_group *cache_default_groups[] = {
  336. &cache_default_group,
  337. NULL,
  338. };
  339. static const struct attribute_group *cache_private_groups[] = {
  340. &cache_default_group,
  341. NULL, /* Place holder for private group */
  342. NULL,
  343. };
  344. const struct attribute_group *
  345. __weak cache_get_priv_group(struct cacheinfo *this_leaf)
  346. {
  347. return NULL;
  348. }
  349. static const struct attribute_group **
  350. cache_get_attribute_groups(struct cacheinfo *this_leaf)
  351. {
  352. const struct attribute_group *priv_group =
  353. cache_get_priv_group(this_leaf);
  354. if (!priv_group)
  355. return cache_default_groups;
  356. if (!cache_private_groups[1])
  357. cache_private_groups[1] = priv_group;
  358. return cache_private_groups;
  359. }
  360. /* Add/Remove cache interface for CPU device */
  361. static void cpu_cache_sysfs_exit(unsigned int cpu)
  362. {
  363. int i;
  364. struct device *ci_dev;
  365. if (per_cpu_index_dev(cpu)) {
  366. for (i = 0; i < cache_leaves(cpu); i++) {
  367. ci_dev = per_cache_index_dev(cpu, i);
  368. if (!ci_dev)
  369. continue;
  370. device_unregister(ci_dev);
  371. }
  372. kfree(per_cpu_index_dev(cpu));
  373. per_cpu_index_dev(cpu) = NULL;
  374. }
  375. device_unregister(per_cpu_cache_dev(cpu));
  376. per_cpu_cache_dev(cpu) = NULL;
  377. }
  378. static int cpu_cache_sysfs_init(unsigned int cpu)
  379. {
  380. struct device *dev = get_cpu_device(cpu);
  381. if (per_cpu_cacheinfo(cpu) == NULL)
  382. return -ENOENT;
  383. per_cpu_cache_dev(cpu) = cpu_device_create(dev, NULL, NULL, "cache");
  384. if (IS_ERR(per_cpu_cache_dev(cpu)))
  385. return PTR_ERR(per_cpu_cache_dev(cpu));
  386. /* Allocate all required memory */
  387. per_cpu_index_dev(cpu) = kcalloc(cache_leaves(cpu),
  388. sizeof(struct device *), GFP_KERNEL);
  389. if (unlikely(per_cpu_index_dev(cpu) == NULL))
  390. goto err_out;
  391. return 0;
  392. err_out:
  393. cpu_cache_sysfs_exit(cpu);
  394. return -ENOMEM;
  395. }
  396. static int cache_add_dev(unsigned int cpu)
  397. {
  398. unsigned int i;
  399. int rc;
  400. struct device *ci_dev, *parent;
  401. struct cacheinfo *this_leaf;
  402. struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
  403. const struct attribute_group **cache_groups;
  404. rc = cpu_cache_sysfs_init(cpu);
  405. if (unlikely(rc < 0))
  406. return rc;
  407. parent = per_cpu_cache_dev(cpu);
  408. for (i = 0; i < cache_leaves(cpu); i++) {
  409. this_leaf = this_cpu_ci->info_list + i;
  410. if (this_leaf->disable_sysfs)
  411. continue;
  412. cache_groups = cache_get_attribute_groups(this_leaf);
  413. ci_dev = cpu_device_create(parent, this_leaf, cache_groups,
  414. "index%1u", i);
  415. if (IS_ERR(ci_dev)) {
  416. rc = PTR_ERR(ci_dev);
  417. goto err;
  418. }
  419. per_cache_index_dev(cpu, i) = ci_dev;
  420. }
  421. cpumask_set_cpu(cpu, &cache_dev_map);
  422. return 0;
  423. err:
  424. cpu_cache_sysfs_exit(cpu);
  425. return rc;
  426. }
  427. static void cache_remove_dev(unsigned int cpu)
  428. {
  429. if (!cpumask_test_cpu(cpu, &cache_dev_map))
  430. return;
  431. cpumask_clear_cpu(cpu, &cache_dev_map);
  432. cpu_cache_sysfs_exit(cpu);
  433. }
  434. static int cacheinfo_cpu_callback(struct notifier_block *nfb,
  435. unsigned long action, void *hcpu)
  436. {
  437. unsigned int cpu = (unsigned long)hcpu;
  438. int rc = 0;
  439. switch (action & ~CPU_TASKS_FROZEN) {
  440. case CPU_ONLINE:
  441. rc = detect_cache_attributes(cpu);
  442. if (!rc)
  443. rc = cache_add_dev(cpu);
  444. break;
  445. case CPU_DEAD:
  446. cache_remove_dev(cpu);
  447. free_cache_attributes(cpu);
  448. break;
  449. }
  450. return notifier_from_errno(rc);
  451. }
  452. static int __init cacheinfo_sysfs_init(void)
  453. {
  454. int cpu, rc = 0;
  455. cpu_notifier_register_begin();
  456. for_each_online_cpu(cpu) {
  457. rc = detect_cache_attributes(cpu);
  458. if (rc)
  459. goto out;
  460. rc = cache_add_dev(cpu);
  461. if (rc) {
  462. free_cache_attributes(cpu);
  463. pr_err("error populating cacheinfo..cpu%d\n", cpu);
  464. goto out;
  465. }
  466. }
  467. __hotcpu_notifier(cacheinfo_cpu_callback, 0);
  468. out:
  469. cpu_notifier_register_done();
  470. return rc;
  471. }
  472. device_initcall(cacheinfo_sysfs_init);