regmap-debugfs.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692
  1. // SPDX-License-Identifier: GPL-2.0
  2. //
  3. // Register map access API - debugfs
  4. //
  5. // Copyright 2011 Wolfson Microelectronics plc
  6. //
  7. // Author: Mark Brown <[email protected]>
  8. #include <linux/slab.h>
  9. #include <linux/mutex.h>
  10. #include <linux/debugfs.h>
  11. #include <linux/uaccess.h>
  12. #include <linux/device.h>
  13. #include <linux/list.h>
  14. #include "internal.h"
  15. struct regmap_debugfs_node {
  16. struct regmap *map;
  17. struct list_head link;
  18. };
  19. static unsigned int dummy_index;
  20. static struct dentry *regmap_debugfs_root;
  21. static LIST_HEAD(regmap_debugfs_early_list);
  22. static DEFINE_MUTEX(regmap_debugfs_early_lock);
  23. /* Calculate the length of a fixed format */
  24. static size_t regmap_calc_reg_len(int max_val)
  25. {
  26. return snprintf(NULL, 0, "%x", max_val);
  27. }
  28. static ssize_t regmap_name_read_file(struct file *file,
  29. char __user *user_buf, size_t count,
  30. loff_t *ppos)
  31. {
  32. struct regmap *map = file->private_data;
  33. const char *name = "nodev";
  34. int ret;
  35. char *buf;
  36. buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
  37. if (!buf)
  38. return -ENOMEM;
  39. if (map->dev && map->dev->driver)
  40. name = map->dev->driver->name;
  41. ret = snprintf(buf, PAGE_SIZE, "%s\n", name);
  42. if (ret >= PAGE_SIZE) {
  43. kfree(buf);
  44. return ret;
  45. }
  46. ret = simple_read_from_buffer(user_buf, count, ppos, buf, ret);
  47. kfree(buf);
  48. return ret;
  49. }
  50. static const struct file_operations regmap_name_fops = {
  51. .open = simple_open,
  52. .read = regmap_name_read_file,
  53. .llseek = default_llseek,
  54. };
  55. static void regmap_debugfs_free_dump_cache(struct regmap *map)
  56. {
  57. struct regmap_debugfs_off_cache *c;
  58. while (!list_empty(&map->debugfs_off_cache)) {
  59. c = list_first_entry(&map->debugfs_off_cache,
  60. struct regmap_debugfs_off_cache,
  61. list);
  62. list_del(&c->list);
  63. kfree(c);
  64. }
  65. }
  66. static bool regmap_printable(struct regmap *map, unsigned int reg)
  67. {
  68. if (regmap_precious(map, reg))
  69. return false;
  70. if (!regmap_readable(map, reg) && !regmap_cached(map, reg))
  71. return false;
  72. return true;
  73. }
  74. /*
  75. * Work out where the start offset maps into register numbers, bearing
  76. * in mind that we suppress hidden registers.
  77. */
  78. static unsigned int regmap_debugfs_get_dump_start(struct regmap *map,
  79. unsigned int base,
  80. loff_t from,
  81. loff_t *pos)
  82. {
  83. struct regmap_debugfs_off_cache *c = NULL;
  84. loff_t p = 0;
  85. unsigned int i, ret;
  86. unsigned int fpos_offset;
  87. unsigned int reg_offset;
  88. /* Suppress the cache if we're using a subrange */
  89. if (base)
  90. return base;
  91. /*
  92. * If we don't have a cache build one so we don't have to do a
  93. * linear scan each time.
  94. */
  95. mutex_lock(&map->cache_lock);
  96. i = base;
  97. if (list_empty(&map->debugfs_off_cache)) {
  98. for (; i <= map->max_register; i += map->reg_stride) {
  99. /* Skip unprinted registers, closing off cache entry */
  100. if (!regmap_printable(map, i)) {
  101. if (c) {
  102. c->max = p - 1;
  103. c->max_reg = i - map->reg_stride;
  104. list_add_tail(&c->list,
  105. &map->debugfs_off_cache);
  106. c = NULL;
  107. }
  108. continue;
  109. }
  110. /* No cache entry? Start a new one */
  111. if (!c) {
  112. c = kzalloc(sizeof(*c), GFP_KERNEL);
  113. if (!c) {
  114. regmap_debugfs_free_dump_cache(map);
  115. mutex_unlock(&map->cache_lock);
  116. return base;
  117. }
  118. c->min = p;
  119. c->base_reg = i;
  120. }
  121. p += map->debugfs_tot_len;
  122. }
  123. }
  124. /* Close the last entry off if we didn't scan beyond it */
  125. if (c) {
  126. c->max = p - 1;
  127. c->max_reg = i - map->reg_stride;
  128. list_add_tail(&c->list,
  129. &map->debugfs_off_cache);
  130. }
  131. /*
  132. * This should never happen; we return above if we fail to
  133. * allocate and we should never be in this code if there are
  134. * no registers at all.
  135. */
  136. WARN_ON(list_empty(&map->debugfs_off_cache));
  137. ret = base;
  138. /* Find the relevant block:offset */
  139. list_for_each_entry(c, &map->debugfs_off_cache, list) {
  140. if (from >= c->min && from <= c->max) {
  141. fpos_offset = from - c->min;
  142. reg_offset = fpos_offset / map->debugfs_tot_len;
  143. *pos = c->min + (reg_offset * map->debugfs_tot_len);
  144. mutex_unlock(&map->cache_lock);
  145. return c->base_reg + (reg_offset * map->reg_stride);
  146. }
  147. *pos = c->max;
  148. ret = c->max_reg;
  149. }
  150. mutex_unlock(&map->cache_lock);
  151. return ret;
  152. }
  153. static inline void regmap_calc_tot_len(struct regmap *map,
  154. void *buf, size_t count)
  155. {
  156. /* Calculate the length of a fixed format */
  157. if (!map->debugfs_tot_len) {
  158. map->debugfs_reg_len = regmap_calc_reg_len(map->max_register);
  159. map->debugfs_val_len = 2 * map->format.val_bytes;
  160. map->debugfs_tot_len = map->debugfs_reg_len +
  161. map->debugfs_val_len + 3; /* : \n */
  162. }
  163. }
  164. static int regmap_next_readable_reg(struct regmap *map, int reg)
  165. {
  166. struct regmap_debugfs_off_cache *c;
  167. int ret = -EINVAL;
  168. if (regmap_printable(map, reg + map->reg_stride)) {
  169. ret = reg + map->reg_stride;
  170. } else {
  171. mutex_lock(&map->cache_lock);
  172. list_for_each_entry(c, &map->debugfs_off_cache, list) {
  173. if (reg > c->max_reg)
  174. continue;
  175. if (reg < c->base_reg) {
  176. ret = c->base_reg;
  177. break;
  178. }
  179. }
  180. mutex_unlock(&map->cache_lock);
  181. }
  182. return ret;
  183. }
  184. static ssize_t regmap_read_debugfs(struct regmap *map, unsigned int from,
  185. unsigned int to, char __user *user_buf,
  186. size_t count, loff_t *ppos)
  187. {
  188. size_t buf_pos = 0;
  189. loff_t p = *ppos;
  190. ssize_t ret;
  191. int i;
  192. char *buf;
  193. unsigned int val, start_reg;
  194. if (*ppos < 0 || !count)
  195. return -EINVAL;
  196. if (count > (PAGE_SIZE << (MAX_ORDER - 1)))
  197. count = PAGE_SIZE << (MAX_ORDER - 1);
  198. buf = kmalloc(count, GFP_KERNEL);
  199. if (!buf)
  200. return -ENOMEM;
  201. regmap_calc_tot_len(map, buf, count);
  202. /* Work out which register we're starting at */
  203. start_reg = regmap_debugfs_get_dump_start(map, from, *ppos, &p);
  204. for (i = start_reg; i >= 0 && i <= to;
  205. i = regmap_next_readable_reg(map, i)) {
  206. /* If we're in the region the user is trying to read */
  207. if (p >= *ppos) {
  208. /* ...but not beyond it */
  209. if (buf_pos + map->debugfs_tot_len > count)
  210. break;
  211. /* Format the register */
  212. snprintf(buf + buf_pos, count - buf_pos, "%.*x: ",
  213. map->debugfs_reg_len, i - from);
  214. buf_pos += map->debugfs_reg_len + 2;
  215. /* Format the value, write all X if we can't read */
  216. ret = regmap_read(map, i, &val);
  217. if (ret == 0)
  218. snprintf(buf + buf_pos, count - buf_pos,
  219. "%.*x", map->debugfs_val_len, val);
  220. else
  221. memset(buf + buf_pos, 'X',
  222. map->debugfs_val_len);
  223. buf_pos += 2 * map->format.val_bytes;
  224. buf[buf_pos++] = '\n';
  225. }
  226. p += map->debugfs_tot_len;
  227. }
  228. ret = buf_pos;
  229. if (copy_to_user(user_buf, buf, buf_pos)) {
  230. ret = -EFAULT;
  231. goto out;
  232. }
  233. *ppos += buf_pos;
  234. out:
  235. kfree(buf);
  236. return ret;
  237. }
  238. static ssize_t regmap_map_read_file(struct file *file, char __user *user_buf,
  239. size_t count, loff_t *ppos)
  240. {
  241. struct regmap *map = file->private_data;
  242. return regmap_read_debugfs(map, 0, map->max_register, user_buf,
  243. count, ppos);
  244. }
  245. #undef REGMAP_ALLOW_WRITE_DEBUGFS
  246. #ifdef REGMAP_ALLOW_WRITE_DEBUGFS
  247. /*
  248. * This can be dangerous especially when we have clients such as
  249. * PMICs, therefore don't provide any real compile time configuration option
  250. * for this feature, people who want to use this will need to modify
  251. * the source code directly.
  252. */
  253. static ssize_t regmap_map_write_file(struct file *file,
  254. const char __user *user_buf,
  255. size_t count, loff_t *ppos)
  256. {
  257. char buf[32];
  258. size_t buf_size;
  259. char *start = buf;
  260. unsigned long reg, value;
  261. struct regmap *map = file->private_data;
  262. int ret;
  263. buf_size = min(count, (sizeof(buf)-1));
  264. if (copy_from_user(buf, user_buf, buf_size))
  265. return -EFAULT;
  266. buf[buf_size] = 0;
  267. while (*start == ' ')
  268. start++;
  269. reg = simple_strtoul(start, &start, 16);
  270. while (*start == ' ')
  271. start++;
  272. if (kstrtoul(start, 16, &value))
  273. return -EINVAL;
  274. /* Userspace has been fiddling around behind the kernel's back */
  275. add_taint(TAINT_USER, LOCKDEP_STILL_OK);
  276. ret = regmap_write(map, reg, value);
  277. if (ret < 0)
  278. return ret;
  279. return buf_size;
  280. }
  281. #else
  282. #define regmap_map_write_file NULL
  283. #endif
  284. static const struct file_operations regmap_map_fops = {
  285. .open = simple_open,
  286. .read = regmap_map_read_file,
  287. .write = regmap_map_write_file,
  288. .llseek = default_llseek,
  289. };
  290. static ssize_t regmap_range_read_file(struct file *file, char __user *user_buf,
  291. size_t count, loff_t *ppos)
  292. {
  293. struct regmap_range_node *range = file->private_data;
  294. struct regmap *map = range->map;
  295. return regmap_read_debugfs(map, range->range_min, range->range_max,
  296. user_buf, count, ppos);
  297. }
  298. static const struct file_operations regmap_range_fops = {
  299. .open = simple_open,
  300. .read = regmap_range_read_file,
  301. .llseek = default_llseek,
  302. };
  303. static ssize_t regmap_reg_ranges_read_file(struct file *file,
  304. char __user *user_buf, size_t count,
  305. loff_t *ppos)
  306. {
  307. struct regmap *map = file->private_data;
  308. struct regmap_debugfs_off_cache *c;
  309. loff_t p = 0;
  310. size_t buf_pos = 0;
  311. char *buf;
  312. char *entry;
  313. int ret;
  314. unsigned int entry_len;
  315. if (*ppos < 0 || !count)
  316. return -EINVAL;
  317. if (count > (PAGE_SIZE << (MAX_ORDER - 1)))
  318. count = PAGE_SIZE << (MAX_ORDER - 1);
  319. buf = kmalloc(count, GFP_KERNEL);
  320. if (!buf)
  321. return -ENOMEM;
  322. entry = kmalloc(PAGE_SIZE, GFP_KERNEL);
  323. if (!entry) {
  324. kfree(buf);
  325. return -ENOMEM;
  326. }
  327. /* While we are at it, build the register dump cache
  328. * now so the read() operation on the `registers' file
  329. * can benefit from using the cache. We do not care
  330. * about the file position information that is contained
  331. * in the cache, just about the actual register blocks */
  332. regmap_calc_tot_len(map, buf, count);
  333. regmap_debugfs_get_dump_start(map, 0, *ppos, &p);
  334. /* Reset file pointer as the fixed-format of the `registers'
  335. * file is not compatible with the `range' file */
  336. p = 0;
  337. mutex_lock(&map->cache_lock);
  338. list_for_each_entry(c, &map->debugfs_off_cache, list) {
  339. entry_len = snprintf(entry, PAGE_SIZE, "%x-%x\n",
  340. c->base_reg, c->max_reg);
  341. if (p >= *ppos) {
  342. if (buf_pos + entry_len > count)
  343. break;
  344. memcpy(buf + buf_pos, entry, entry_len);
  345. buf_pos += entry_len;
  346. }
  347. p += entry_len;
  348. }
  349. mutex_unlock(&map->cache_lock);
  350. kfree(entry);
  351. ret = buf_pos;
  352. if (copy_to_user(user_buf, buf, buf_pos)) {
  353. ret = -EFAULT;
  354. goto out_buf;
  355. }
  356. *ppos += buf_pos;
  357. out_buf:
  358. kfree(buf);
  359. return ret;
  360. }
  361. static const struct file_operations regmap_reg_ranges_fops = {
  362. .open = simple_open,
  363. .read = regmap_reg_ranges_read_file,
  364. .llseek = default_llseek,
  365. };
  366. static int regmap_access_show(struct seq_file *s, void *ignored)
  367. {
  368. struct regmap *map = s->private;
  369. int i, reg_len;
  370. reg_len = regmap_calc_reg_len(map->max_register);
  371. for (i = 0; i <= map->max_register; i += map->reg_stride) {
  372. /* Ignore registers which are neither readable nor writable */
  373. if (!regmap_readable(map, i) && !regmap_writeable(map, i))
  374. continue;
  375. /* Format the register */
  376. seq_printf(s, "%.*x: %c %c %c %c\n", reg_len, i,
  377. regmap_readable(map, i) ? 'y' : 'n',
  378. regmap_writeable(map, i) ? 'y' : 'n',
  379. regmap_volatile(map, i) ? 'y' : 'n',
  380. regmap_precious(map, i) ? 'y' : 'n');
  381. }
  382. return 0;
  383. }
  384. DEFINE_SHOW_ATTRIBUTE(regmap_access);
  385. static ssize_t regmap_cache_only_write_file(struct file *file,
  386. const char __user *user_buf,
  387. size_t count, loff_t *ppos)
  388. {
  389. struct regmap *map = container_of(file->private_data,
  390. struct regmap, cache_only);
  391. bool new_val, require_sync = false;
  392. int err;
  393. err = kstrtobool_from_user(user_buf, count, &new_val);
  394. /* Ignore malforned data like debugfs_write_file_bool() */
  395. if (err)
  396. return count;
  397. err = debugfs_file_get(file->f_path.dentry);
  398. if (err)
  399. return err;
  400. map->lock(map->lock_arg);
  401. if (new_val && !map->cache_only) {
  402. dev_warn(map->dev, "debugfs cache_only=Y forced\n");
  403. add_taint(TAINT_USER, LOCKDEP_STILL_OK);
  404. } else if (!new_val && map->cache_only) {
  405. dev_warn(map->dev, "debugfs cache_only=N forced: syncing cache\n");
  406. require_sync = true;
  407. }
  408. map->cache_only = new_val;
  409. map->unlock(map->lock_arg);
  410. debugfs_file_put(file->f_path.dentry);
  411. if (require_sync) {
  412. err = regcache_sync(map);
  413. if (err)
  414. dev_err(map->dev, "Failed to sync cache %d\n", err);
  415. }
  416. return count;
  417. }
  418. static const struct file_operations regmap_cache_only_fops = {
  419. .open = simple_open,
  420. .read = debugfs_read_file_bool,
  421. .write = regmap_cache_only_write_file,
  422. };
  423. static ssize_t regmap_cache_bypass_write_file(struct file *file,
  424. const char __user *user_buf,
  425. size_t count, loff_t *ppos)
  426. {
  427. struct regmap *map = container_of(file->private_data,
  428. struct regmap, cache_bypass);
  429. bool new_val;
  430. int err;
  431. err = kstrtobool_from_user(user_buf, count, &new_val);
  432. /* Ignore malforned data like debugfs_write_file_bool() */
  433. if (err)
  434. return count;
  435. err = debugfs_file_get(file->f_path.dentry);
  436. if (err)
  437. return err;
  438. map->lock(map->lock_arg);
  439. if (new_val && !map->cache_bypass) {
  440. dev_warn(map->dev, "debugfs cache_bypass=Y forced\n");
  441. add_taint(TAINT_USER, LOCKDEP_STILL_OK);
  442. } else if (!new_val && map->cache_bypass) {
  443. dev_warn(map->dev, "debugfs cache_bypass=N forced\n");
  444. }
  445. map->cache_bypass = new_val;
  446. map->unlock(map->lock_arg);
  447. debugfs_file_put(file->f_path.dentry);
  448. return count;
  449. }
  450. static const struct file_operations regmap_cache_bypass_fops = {
  451. .open = simple_open,
  452. .read = debugfs_read_file_bool,
  453. .write = regmap_cache_bypass_write_file,
  454. };
  455. void regmap_debugfs_init(struct regmap *map)
  456. {
  457. struct rb_node *next;
  458. struct regmap_range_node *range_node;
  459. const char *devname = "dummy";
  460. const char *name = map->name;
  461. /*
  462. * Userspace can initiate reads from the hardware over debugfs.
  463. * Normally internal regmap structures and buffers are protected with
  464. * a mutex or a spinlock, but if the regmap owner decided to disable
  465. * all locking mechanisms, this is no longer the case. For safety:
  466. * don't create the debugfs entries if locking is disabled.
  467. */
  468. if (map->debugfs_disable) {
  469. dev_dbg(map->dev, "regmap locking disabled - not creating debugfs entries\n");
  470. return;
  471. }
  472. /* If we don't have the debugfs root yet, postpone init */
  473. if (!regmap_debugfs_root) {
  474. struct regmap_debugfs_node *node;
  475. node = kzalloc(sizeof(*node), GFP_KERNEL);
  476. if (!node)
  477. return;
  478. node->map = map;
  479. mutex_lock(&regmap_debugfs_early_lock);
  480. list_add(&node->link, &regmap_debugfs_early_list);
  481. mutex_unlock(&regmap_debugfs_early_lock);
  482. return;
  483. }
  484. INIT_LIST_HEAD(&map->debugfs_off_cache);
  485. mutex_init(&map->cache_lock);
  486. if (map->dev)
  487. devname = dev_name(map->dev);
  488. if (name) {
  489. if (!map->debugfs_name) {
  490. map->debugfs_name = kasprintf(GFP_KERNEL, "%s-%s",
  491. devname, name);
  492. if (!map->debugfs_name)
  493. return;
  494. }
  495. name = map->debugfs_name;
  496. } else {
  497. name = devname;
  498. }
  499. if (!strcmp(name, "dummy")) {
  500. kfree(map->debugfs_name);
  501. map->debugfs_name = kasprintf(GFP_KERNEL, "dummy%d",
  502. dummy_index);
  503. if (!map->debugfs_name)
  504. return;
  505. name = map->debugfs_name;
  506. dummy_index++;
  507. }
  508. map->debugfs = debugfs_create_dir(name, regmap_debugfs_root);
  509. debugfs_create_file("name", 0400, map->debugfs,
  510. map, &regmap_name_fops);
  511. debugfs_create_file("range", 0400, map->debugfs,
  512. map, &regmap_reg_ranges_fops);
  513. if (map->max_register || regmap_readable(map, 0)) {
  514. umode_t registers_mode;
  515. #if defined(REGMAP_ALLOW_WRITE_DEBUGFS)
  516. registers_mode = 0600;
  517. #else
  518. registers_mode = 0400;
  519. #endif
  520. debugfs_create_file("registers", registers_mode, map->debugfs,
  521. map, &regmap_map_fops);
  522. debugfs_create_file("access", 0400, map->debugfs,
  523. map, &regmap_access_fops);
  524. }
  525. if (map->cache_type) {
  526. debugfs_create_file("cache_only", 0600, map->debugfs,
  527. &map->cache_only, &regmap_cache_only_fops);
  528. debugfs_create_bool("cache_dirty", 0400, map->debugfs,
  529. &map->cache_dirty);
  530. debugfs_create_file("cache_bypass", 0600, map->debugfs,
  531. &map->cache_bypass,
  532. &regmap_cache_bypass_fops);
  533. }
  534. next = rb_first(&map->range_tree);
  535. while (next) {
  536. range_node = rb_entry(next, struct regmap_range_node, node);
  537. if (range_node->name)
  538. debugfs_create_file(range_node->name, 0400,
  539. map->debugfs, range_node,
  540. &regmap_range_fops);
  541. next = rb_next(&range_node->node);
  542. }
  543. if (map->cache_ops && map->cache_ops->debugfs_init)
  544. map->cache_ops->debugfs_init(map);
  545. }
  546. void regmap_debugfs_exit(struct regmap *map)
  547. {
  548. if (map->debugfs) {
  549. debugfs_remove_recursive(map->debugfs);
  550. mutex_lock(&map->cache_lock);
  551. regmap_debugfs_free_dump_cache(map);
  552. mutex_unlock(&map->cache_lock);
  553. kfree(map->debugfs_name);
  554. map->debugfs_name = NULL;
  555. } else {
  556. struct regmap_debugfs_node *node, *tmp;
  557. mutex_lock(&regmap_debugfs_early_lock);
  558. list_for_each_entry_safe(node, tmp, &regmap_debugfs_early_list,
  559. link) {
  560. if (node->map == map) {
  561. list_del(&node->link);
  562. kfree(node);
  563. }
  564. }
  565. mutex_unlock(&regmap_debugfs_early_lock);
  566. }
  567. }
  568. void regmap_debugfs_initcall(void)
  569. {
  570. struct regmap_debugfs_node *node, *tmp;
  571. regmap_debugfs_root = debugfs_create_dir("regmap", NULL);
  572. mutex_lock(&regmap_debugfs_early_lock);
  573. list_for_each_entry_safe(node, tmp, &regmap_debugfs_early_list, link) {
  574. regmap_debugfs_init(node->map);
  575. list_del(&node->link);
  576. kfree(node);
  577. }
  578. mutex_unlock(&regmap_debugfs_early_lock);
  579. }