regmap-debugfs.c 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653
  1. /*
  2. * Register map access API - debugfs
  3. *
  4. * Copyright 2011 Wolfson Microelectronics plc
  5. *
  6. * Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
  7. *
  8. * This program is free software; you can redistribute it and/or modify
  9. * it under the terms of the GNU General Public License version 2 as
  10. * published by the Free Software Foundation.
  11. */
  12. #include <linux/slab.h>
  13. #include <linux/mutex.h>
  14. #include <linux/debugfs.h>
  15. #include <linux/uaccess.h>
  16. #include <linux/device.h>
  17. #include <linux/list.h>
  18. #include "internal.h"
  19. struct regmap_debugfs_node {
  20. struct regmap *map;
  21. const char *name;
  22. struct list_head link;
  23. };
  24. static struct dentry *regmap_debugfs_root;
  25. static LIST_HEAD(regmap_debugfs_early_list);
  26. static DEFINE_MUTEX(regmap_debugfs_early_lock);
  27. /* Calculate the length of a fixed format */
  28. static size_t regmap_calc_reg_len(int max_val)
  29. {
  30. return snprintf(NULL, 0, "%x", max_val);
  31. }
  32. static ssize_t regmap_name_read_file(struct file *file,
  33. char __user *user_buf, size_t count,
  34. loff_t *ppos)
  35. {
  36. struct regmap *map = file->private_data;
  37. int ret;
  38. char *buf;
  39. buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
  40. if (!buf)
  41. return -ENOMEM;
  42. ret = snprintf(buf, PAGE_SIZE, "%s\n", map->dev->driver->name);
  43. if (ret < 0) {
  44. kfree(buf);
  45. return ret;
  46. }
  47. ret = simple_read_from_buffer(user_buf, count, ppos, buf, ret);
  48. kfree(buf);
  49. return ret;
  50. }
  51. static const struct file_operations regmap_name_fops = {
  52. .open = simple_open,
  53. .read = regmap_name_read_file,
  54. .llseek = default_llseek,
  55. };
  56. static void regmap_debugfs_free_dump_cache(struct regmap *map)
  57. {
  58. struct regmap_debugfs_off_cache *c;
  59. while (!list_empty(&map->debugfs_off_cache)) {
  60. c = list_first_entry(&map->debugfs_off_cache,
  61. struct regmap_debugfs_off_cache,
  62. list);
  63. list_del(&c->list);
  64. kfree(c);
  65. }
  66. }
  67. static bool regmap_printable(struct regmap *map, unsigned int reg)
  68. {
  69. if (regmap_precious(map, reg))
  70. return false;
  71. if (!regmap_readable(map, reg) && !regmap_cached(map, reg))
  72. return false;
  73. return true;
  74. }
  75. /*
  76. * Work out where the start offset maps into register numbers, bearing
  77. * in mind that we suppress hidden registers.
  78. */
  79. static unsigned int regmap_debugfs_get_dump_start(struct regmap *map,
  80. unsigned int base,
  81. loff_t from,
  82. loff_t *pos)
  83. {
  84. struct regmap_debugfs_off_cache *c = NULL;
  85. loff_t p = 0;
  86. unsigned int i, ret;
  87. unsigned int fpos_offset;
  88. unsigned int reg_offset;
  89. /* Suppress the cache if we're using a subrange */
  90. if (base)
  91. return base;
  92. /*
  93. * If we don't have a cache build one so we don't have to do a
  94. * linear scan each time.
  95. */
  96. mutex_lock(&map->cache_lock);
  97. i = base;
  98. if (list_empty(&map->debugfs_off_cache)) {
  99. for (; i <= map->max_register; i += map->reg_stride) {
  100. /* Skip unprinted registers, closing off cache entry */
  101. if (!regmap_printable(map, i)) {
  102. if (c) {
  103. c->max = p - 1;
  104. c->max_reg = i - map->reg_stride;
  105. list_add_tail(&c->list,
  106. &map->debugfs_off_cache);
  107. c = NULL;
  108. }
  109. continue;
  110. }
  111. /* No cache entry? Start a new one */
  112. if (!c) {
  113. c = kzalloc(sizeof(*c), GFP_KERNEL);
  114. if (!c) {
  115. regmap_debugfs_free_dump_cache(map);
  116. mutex_unlock(&map->cache_lock);
  117. return base;
  118. }
  119. c->min = p;
  120. c->base_reg = i;
  121. }
  122. p += map->debugfs_tot_len;
  123. }
  124. }
  125. /* Close the last entry off if we didn't scan beyond it */
  126. if (c) {
  127. c->max = p - 1;
  128. c->max_reg = i - map->reg_stride;
  129. list_add_tail(&c->list,
  130. &map->debugfs_off_cache);
  131. }
  132. /*
  133. * This should never happen; we return above if we fail to
  134. * allocate and we should never be in this code if there are
  135. * no registers at all.
  136. */
  137. WARN_ON(list_empty(&map->debugfs_off_cache));
  138. ret = base;
  139. /* Find the relevant block:offset */
  140. list_for_each_entry(c, &map->debugfs_off_cache, list) {
  141. if (from >= c->min && from <= c->max) {
  142. fpos_offset = from - c->min;
  143. reg_offset = fpos_offset / map->debugfs_tot_len;
  144. *pos = c->min + (reg_offset * map->debugfs_tot_len);
  145. mutex_unlock(&map->cache_lock);
  146. return c->base_reg + (reg_offset * map->reg_stride);
  147. }
  148. *pos = c->max;
  149. ret = c->max_reg;
  150. }
  151. mutex_unlock(&map->cache_lock);
  152. return ret;
  153. }
  154. static inline void regmap_calc_tot_len(struct regmap *map,
  155. void *buf, size_t count)
  156. {
  157. /* Calculate the length of a fixed format */
  158. if (!map->debugfs_tot_len) {
  159. map->debugfs_reg_len = regmap_calc_reg_len(map->max_register),
  160. map->debugfs_val_len = 2 * map->format.val_bytes;
  161. map->debugfs_tot_len = map->debugfs_reg_len +
  162. map->debugfs_val_len + 3; /* : \n */
  163. }
  164. }
  165. static ssize_t regmap_read_debugfs(struct regmap *map, unsigned int from,
  166. unsigned int to, char __user *user_buf,
  167. size_t count, loff_t *ppos)
  168. {
  169. size_t buf_pos = 0;
  170. loff_t p = *ppos;
  171. ssize_t ret;
  172. int i;
  173. char *buf;
  174. unsigned int val, start_reg;
  175. if (*ppos < 0 || !count)
  176. return -EINVAL;
  177. buf = kmalloc(count, GFP_KERNEL);
  178. if (!buf)
  179. return -ENOMEM;
  180. regmap_calc_tot_len(map, buf, count);
  181. /* Work out which register we're starting at */
  182. start_reg = regmap_debugfs_get_dump_start(map, from, *ppos, &p);
  183. for (i = start_reg; i <= to; i += map->reg_stride) {
  184. if (!regmap_readable(map, i) && !regmap_cached(map, i))
  185. continue;
  186. if (regmap_precious(map, i))
  187. continue;
  188. /* If we're in the region the user is trying to read */
  189. if (p >= *ppos) {
  190. /* ...but not beyond it */
  191. if (buf_pos + map->debugfs_tot_len > count)
  192. break;
  193. /* Format the register */
  194. snprintf(buf + buf_pos, count - buf_pos, "%.*x: ",
  195. map->debugfs_reg_len, i - from);
  196. buf_pos += map->debugfs_reg_len + 2;
  197. /* Format the value, write all X if we can't read */
  198. ret = regmap_read(map, i, &val);
  199. if (ret == 0)
  200. snprintf(buf + buf_pos, count - buf_pos,
  201. "%.*x", map->debugfs_val_len, val);
  202. else
  203. memset(buf + buf_pos, 'X',
  204. map->debugfs_val_len);
  205. buf_pos += 2 * map->format.val_bytes;
  206. buf[buf_pos++] = '\n';
  207. }
  208. p += map->debugfs_tot_len;
  209. }
  210. ret = buf_pos;
  211. if (copy_to_user(user_buf, buf, buf_pos)) {
  212. ret = -EFAULT;
  213. goto out;
  214. }
  215. *ppos += buf_pos;
  216. out:
  217. kfree(buf);
  218. return ret;
  219. }
  220. static ssize_t regmap_map_read_file(struct file *file, char __user *user_buf,
  221. size_t count, loff_t *ppos)
  222. {
  223. struct regmap *map = file->private_data;
  224. return regmap_read_debugfs(map, 0, map->max_register, user_buf,
  225. count, ppos);
  226. }
  227. #undef REGMAP_ALLOW_WRITE_DEBUGFS
  228. #ifdef REGMAP_ALLOW_WRITE_DEBUGFS
  229. /*
  230. * This can be dangerous especially when we have clients such as
  231. * PMICs, therefore don't provide any real compile time configuration option
  232. * for this feature, people who want to use this will need to modify
  233. * the source code directly.
  234. */
  235. static ssize_t regmap_map_write_file(struct file *file,
  236. const char __user *user_buf,
  237. size_t count, loff_t *ppos)
  238. {
  239. char buf[32];
  240. size_t buf_size;
  241. char *start = buf;
  242. unsigned long reg, value;
  243. struct regmap *map = file->private_data;
  244. int ret;
  245. buf_size = min(count, (sizeof(buf)-1));
  246. if (copy_from_user(buf, user_buf, buf_size))
  247. return -EFAULT;
  248. buf[buf_size] = 0;
  249. while (*start == ' ')
  250. start++;
  251. reg = simple_strtoul(start, &start, 16);
  252. while (*start == ' ')
  253. start++;
  254. if (kstrtoul(start, 16, &value))
  255. return -EINVAL;
  256. /* Userspace has been fiddling around behind the kernel's back */
  257. add_taint(TAINT_USER, LOCKDEP_STILL_OK);
  258. ret = regmap_write(map, reg, value);
  259. if (ret < 0)
  260. return ret;
  261. return buf_size;
  262. }
  263. #else
  264. #define regmap_map_write_file NULL
  265. #endif
  266. static const struct file_operations regmap_map_fops = {
  267. .open = simple_open,
  268. .read = regmap_map_read_file,
  269. .write = regmap_map_write_file,
  270. .llseek = default_llseek,
  271. };
  272. static ssize_t regmap_range_read_file(struct file *file, char __user *user_buf,
  273. size_t count, loff_t *ppos)
  274. {
  275. struct regmap_range_node *range = file->private_data;
  276. struct regmap *map = range->map;
  277. return regmap_read_debugfs(map, range->range_min, range->range_max,
  278. user_buf, count, ppos);
  279. }
  280. static const struct file_operations regmap_range_fops = {
  281. .open = simple_open,
  282. .read = regmap_range_read_file,
  283. .llseek = default_llseek,
  284. };
  285. static ssize_t regmap_reg_ranges_read_file(struct file *file,
  286. char __user *user_buf, size_t count,
  287. loff_t *ppos)
  288. {
  289. struct regmap *map = file->private_data;
  290. struct regmap_debugfs_off_cache *c;
  291. loff_t p = 0;
  292. size_t buf_pos = 0;
  293. char *buf;
  294. char *entry;
  295. int ret;
  296. unsigned entry_len;
  297. if (*ppos < 0 || !count)
  298. return -EINVAL;
  299. buf = kmalloc(count, GFP_KERNEL);
  300. if (!buf)
  301. return -ENOMEM;
  302. entry = kmalloc(PAGE_SIZE, GFP_KERNEL);
  303. if (!entry) {
  304. kfree(buf);
  305. return -ENOMEM;
  306. }
  307. /* While we are at it, build the register dump cache
  308. * now so the read() operation on the `registers' file
  309. * can benefit from using the cache. We do not care
  310. * about the file position information that is contained
  311. * in the cache, just about the actual register blocks */
  312. regmap_calc_tot_len(map, buf, count);
  313. regmap_debugfs_get_dump_start(map, 0, *ppos, &p);
  314. /* Reset file pointer as the fixed-format of the `registers'
  315. * file is not compatible with the `range' file */
  316. p = 0;
  317. mutex_lock(&map->cache_lock);
  318. list_for_each_entry(c, &map->debugfs_off_cache, list) {
  319. entry_len = snprintf(entry, PAGE_SIZE, "%x-%x\n",
  320. c->base_reg, c->max_reg);
  321. if (p >= *ppos) {
  322. if (buf_pos + entry_len > count)
  323. break;
  324. memcpy(buf + buf_pos, entry, entry_len);
  325. buf_pos += entry_len;
  326. }
  327. p += entry_len;
  328. }
  329. mutex_unlock(&map->cache_lock);
  330. kfree(entry);
  331. ret = buf_pos;
  332. if (copy_to_user(user_buf, buf, buf_pos)) {
  333. ret = -EFAULT;
  334. goto out_buf;
  335. }
  336. *ppos += buf_pos;
  337. out_buf:
  338. kfree(buf);
  339. return ret;
  340. }
  341. static const struct file_operations regmap_reg_ranges_fops = {
  342. .open = simple_open,
  343. .read = regmap_reg_ranges_read_file,
  344. .llseek = default_llseek,
  345. };
  346. static int regmap_access_show(struct seq_file *s, void *ignored)
  347. {
  348. struct regmap *map = s->private;
  349. int i, reg_len;
  350. reg_len = regmap_calc_reg_len(map->max_register);
  351. for (i = 0; i <= map->max_register; i += map->reg_stride) {
  352. /* Ignore registers which are neither readable nor writable */
  353. if (!regmap_readable(map, i) && !regmap_writeable(map, i))
  354. continue;
  355. /* Format the register */
  356. seq_printf(s, "%.*x: %c %c %c %c\n", reg_len, i,
  357. regmap_readable(map, i) ? 'y' : 'n',
  358. regmap_writeable(map, i) ? 'y' : 'n',
  359. regmap_volatile(map, i) ? 'y' : 'n',
  360. regmap_precious(map, i) ? 'y' : 'n');
  361. }
  362. return 0;
  363. }
  364. static int access_open(struct inode *inode, struct file *file)
  365. {
  366. return single_open(file, regmap_access_show, inode->i_private);
  367. }
  368. static const struct file_operations regmap_access_fops = {
  369. .open = access_open,
  370. .read = seq_read,
  371. .llseek = seq_lseek,
  372. .release = single_release,
  373. };
  374. static ssize_t regmap_cache_only_write_file(struct file *file,
  375. const char __user *user_buf,
  376. size_t count, loff_t *ppos)
  377. {
  378. struct regmap *map = container_of(file->private_data,
  379. struct regmap, cache_only);
  380. ssize_t result;
  381. bool was_enabled, require_sync = false;
  382. int err;
  383. map->lock(map->lock_arg);
  384. was_enabled = map->cache_only;
  385. result = debugfs_write_file_bool(file, user_buf, count, ppos);
  386. if (result < 0) {
  387. map->unlock(map->lock_arg);
  388. return result;
  389. }
  390. if (map->cache_only && !was_enabled) {
  391. dev_warn(map->dev, "debugfs cache_only=Y forced\n");
  392. add_taint(TAINT_USER, LOCKDEP_STILL_OK);
  393. } else if (!map->cache_only && was_enabled) {
  394. dev_warn(map->dev, "debugfs cache_only=N forced: syncing cache\n");
  395. require_sync = true;
  396. }
  397. map->unlock(map->lock_arg);
  398. if (require_sync) {
  399. err = regcache_sync(map);
  400. if (err)
  401. dev_err(map->dev, "Failed to sync cache %d\n", err);
  402. }
  403. return result;
  404. }
  405. static const struct file_operations regmap_cache_only_fops = {
  406. .open = simple_open,
  407. .read = debugfs_read_file_bool,
  408. .write = regmap_cache_only_write_file,
  409. };
  410. static ssize_t regmap_cache_bypass_write_file(struct file *file,
  411. const char __user *user_buf,
  412. size_t count, loff_t *ppos)
  413. {
  414. struct regmap *map = container_of(file->private_data,
  415. struct regmap, cache_bypass);
  416. ssize_t result;
  417. bool was_enabled;
  418. map->lock(map->lock_arg);
  419. was_enabled = map->cache_bypass;
  420. result = debugfs_write_file_bool(file, user_buf, count, ppos);
  421. if (result < 0)
  422. goto out;
  423. if (map->cache_bypass && !was_enabled) {
  424. dev_warn(map->dev, "debugfs cache_bypass=Y forced\n");
  425. add_taint(TAINT_USER, LOCKDEP_STILL_OK);
  426. } else if (!map->cache_bypass && was_enabled) {
  427. dev_warn(map->dev, "debugfs cache_bypass=N forced\n");
  428. }
  429. out:
  430. map->unlock(map->lock_arg);
  431. return result;
  432. }
  433. static const struct file_operations regmap_cache_bypass_fops = {
  434. .open = simple_open,
  435. .read = debugfs_read_file_bool,
  436. .write = regmap_cache_bypass_write_file,
  437. };
  438. void regmap_debugfs_init(struct regmap *map, const char *name)
  439. {
  440. struct rb_node *next;
  441. struct regmap_range_node *range_node;
  442. const char *devname = "dummy";
  443. /* If we don't have the debugfs root yet, postpone init */
  444. if (!regmap_debugfs_root) {
  445. struct regmap_debugfs_node *node;
  446. node = kzalloc(sizeof(*node), GFP_KERNEL);
  447. if (!node)
  448. return;
  449. node->map = map;
  450. node->name = name;
  451. mutex_lock(&regmap_debugfs_early_lock);
  452. list_add(&node->link, &regmap_debugfs_early_list);
  453. mutex_unlock(&regmap_debugfs_early_lock);
  454. return;
  455. }
  456. INIT_LIST_HEAD(&map->debugfs_off_cache);
  457. mutex_init(&map->cache_lock);
  458. if (map->dev)
  459. devname = dev_name(map->dev);
  460. if (name) {
  461. map->debugfs_name = kasprintf(GFP_KERNEL, "%s-%s",
  462. devname, name);
  463. name = map->debugfs_name;
  464. } else {
  465. name = devname;
  466. }
  467. map->debugfs = debugfs_create_dir(name, regmap_debugfs_root);
  468. if (!map->debugfs) {
  469. dev_warn(map->dev, "Failed to create debugfs directory\n");
  470. return;
  471. }
  472. debugfs_create_file("name", 0400, map->debugfs,
  473. map, &regmap_name_fops);
  474. debugfs_create_file("range", 0400, map->debugfs,
  475. map, &regmap_reg_ranges_fops);
  476. if (map->max_register || regmap_readable(map, 0)) {
  477. umode_t registers_mode;
  478. #if defined(REGMAP_ALLOW_WRITE_DEBUGFS)
  479. registers_mode = 0600;
  480. #else
  481. registers_mode = 0400;
  482. #endif
  483. debugfs_create_file("registers", registers_mode, map->debugfs,
  484. map, &regmap_map_fops);
  485. debugfs_create_file("access", 0400, map->debugfs,
  486. map, &regmap_access_fops);
  487. }
  488. if (map->cache_type) {
  489. debugfs_create_file("cache_only", 0600, map->debugfs,
  490. &map->cache_only, &regmap_cache_only_fops);
  491. debugfs_create_bool("cache_dirty", 0400, map->debugfs,
  492. &map->cache_dirty);
  493. debugfs_create_file("cache_bypass", 0600, map->debugfs,
  494. &map->cache_bypass,
  495. &regmap_cache_bypass_fops);
  496. }
  497. next = rb_first(&map->range_tree);
  498. while (next) {
  499. range_node = rb_entry(next, struct regmap_range_node, node);
  500. if (range_node->name)
  501. debugfs_create_file(range_node->name, 0400,
  502. map->debugfs, range_node,
  503. &regmap_range_fops);
  504. next = rb_next(&range_node->node);
  505. }
  506. if (map->cache_ops && map->cache_ops->debugfs_init)
  507. map->cache_ops->debugfs_init(map);
  508. }
  509. void regmap_debugfs_exit(struct regmap *map)
  510. {
  511. if (map->debugfs) {
  512. debugfs_remove_recursive(map->debugfs);
  513. mutex_lock(&map->cache_lock);
  514. regmap_debugfs_free_dump_cache(map);
  515. mutex_unlock(&map->cache_lock);
  516. kfree(map->debugfs_name);
  517. } else {
  518. struct regmap_debugfs_node *node, *tmp;
  519. mutex_lock(&regmap_debugfs_early_lock);
  520. list_for_each_entry_safe(node, tmp, &regmap_debugfs_early_list,
  521. link) {
  522. if (node->map == map) {
  523. list_del(&node->link);
  524. kfree(node);
  525. }
  526. }
  527. mutex_unlock(&regmap_debugfs_early_lock);
  528. }
  529. }
  530. void regmap_debugfs_initcall(void)
  531. {
  532. struct regmap_debugfs_node *node, *tmp;
  533. regmap_debugfs_root = debugfs_create_dir("regmap", NULL);
  534. if (!regmap_debugfs_root) {
  535. pr_warn("regmap: Failed to create debugfs root\n");
  536. return;
  537. }
  538. mutex_lock(&regmap_debugfs_early_lock);
  539. list_for_each_entry_safe(node, tmp, &regmap_debugfs_early_list, link) {
  540. regmap_debugfs_init(node->map, node->name);
  541. list_del(&node->link);
  542. kfree(node);
  543. }
  544. mutex_unlock(&regmap_debugfs_early_lock);
  545. }