regmap-mmio.c 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636
  1. // SPDX-License-Identifier: GPL-2.0
  2. //
  3. // Register map access API - MMIO support
  4. //
  5. // Copyright (c) 2012, NVIDIA CORPORATION. All rights reserved.
  6. #include <linux/clk.h>
  7. #include <linux/err.h>
  8. #include <linux/io.h>
  9. #include <linux/module.h>
  10. #include <linux/regmap.h>
  11. #include <linux/slab.h>
  12. #include <linux/swab.h>
  13. #include "internal.h"
  14. struct regmap_mmio_context {
  15. void __iomem *regs;
  16. unsigned int val_bytes;
  17. bool big_endian;
  18. bool attached_clk;
  19. struct clk *clk;
  20. void (*reg_write)(struct regmap_mmio_context *ctx,
  21. unsigned int reg, unsigned int val);
  22. unsigned int (*reg_read)(struct regmap_mmio_context *ctx,
  23. unsigned int reg);
  24. };
  25. static int regmap_mmio_regbits_check(size_t reg_bits)
  26. {
  27. switch (reg_bits) {
  28. case 8:
  29. case 16:
  30. case 32:
  31. return 0;
  32. default:
  33. return -EINVAL;
  34. }
  35. }
  36. static int regmap_mmio_get_min_stride(size_t val_bits)
  37. {
  38. int min_stride;
  39. switch (val_bits) {
  40. case 8:
  41. /* The core treats 0 as 1 */
  42. min_stride = 0;
  43. break;
  44. case 16:
  45. min_stride = 2;
  46. break;
  47. case 32:
  48. min_stride = 4;
  49. break;
  50. default:
  51. return -EINVAL;
  52. }
  53. return min_stride;
  54. }
  55. static void regmap_mmio_write8(struct regmap_mmio_context *ctx,
  56. unsigned int reg,
  57. unsigned int val)
  58. {
  59. writeb(val, ctx->regs + reg);
  60. }
  61. static void regmap_mmio_write8_relaxed(struct regmap_mmio_context *ctx,
  62. unsigned int reg,
  63. unsigned int val)
  64. {
  65. writeb_relaxed(val, ctx->regs + reg);
  66. }
  67. static void regmap_mmio_iowrite8(struct regmap_mmio_context *ctx,
  68. unsigned int reg, unsigned int val)
  69. {
  70. iowrite8(val, ctx->regs + reg);
  71. }
  72. static void regmap_mmio_write16le(struct regmap_mmio_context *ctx,
  73. unsigned int reg,
  74. unsigned int val)
  75. {
  76. writew(val, ctx->regs + reg);
  77. }
  78. static void regmap_mmio_write16le_relaxed(struct regmap_mmio_context *ctx,
  79. unsigned int reg,
  80. unsigned int val)
  81. {
  82. writew_relaxed(val, ctx->regs + reg);
  83. }
  84. static void regmap_mmio_iowrite16le(struct regmap_mmio_context *ctx,
  85. unsigned int reg, unsigned int val)
  86. {
  87. iowrite16(val, ctx->regs + reg);
  88. }
  89. static void regmap_mmio_write16be(struct regmap_mmio_context *ctx,
  90. unsigned int reg,
  91. unsigned int val)
  92. {
  93. writew(swab16(val), ctx->regs + reg);
  94. }
  95. static void regmap_mmio_iowrite16be(struct regmap_mmio_context *ctx,
  96. unsigned int reg, unsigned int val)
  97. {
  98. iowrite16be(val, ctx->regs + reg);
  99. }
  100. static void regmap_mmio_write32le(struct regmap_mmio_context *ctx,
  101. unsigned int reg,
  102. unsigned int val)
  103. {
  104. writel(val, ctx->regs + reg);
  105. }
  106. static void regmap_mmio_write32le_relaxed(struct regmap_mmio_context *ctx,
  107. unsigned int reg,
  108. unsigned int val)
  109. {
  110. writel_relaxed(val, ctx->regs + reg);
  111. }
  112. static void regmap_mmio_iowrite32le(struct regmap_mmio_context *ctx,
  113. unsigned int reg, unsigned int val)
  114. {
  115. iowrite32(val, ctx->regs + reg);
  116. }
  117. static void regmap_mmio_write32be(struct regmap_mmio_context *ctx,
  118. unsigned int reg,
  119. unsigned int val)
  120. {
  121. writel(swab32(val), ctx->regs + reg);
  122. }
  123. static void regmap_mmio_iowrite32be(struct regmap_mmio_context *ctx,
  124. unsigned int reg, unsigned int val)
  125. {
  126. iowrite32be(val, ctx->regs + reg);
  127. }
  128. static int regmap_mmio_write(void *context, unsigned int reg, unsigned int val)
  129. {
  130. struct regmap_mmio_context *ctx = context;
  131. int ret;
  132. if (!IS_ERR(ctx->clk)) {
  133. ret = clk_enable(ctx->clk);
  134. if (ret < 0)
  135. return ret;
  136. }
  137. ctx->reg_write(ctx, reg, val);
  138. if (!IS_ERR(ctx->clk))
  139. clk_disable(ctx->clk);
  140. return 0;
  141. }
  142. static int regmap_mmio_noinc_write(void *context, unsigned int reg,
  143. const void *val, size_t val_count)
  144. {
  145. struct regmap_mmio_context *ctx = context;
  146. int ret = 0;
  147. int i;
  148. if (!IS_ERR(ctx->clk)) {
  149. ret = clk_enable(ctx->clk);
  150. if (ret < 0)
  151. return ret;
  152. }
  153. /*
  154. * There are no native, assembly-optimized write single register
  155. * operations for big endian, so fall back to emulation if this
  156. * is needed. (Single bytes are fine, they are not affected by
  157. * endianness.)
  158. */
  159. if (ctx->big_endian && (ctx->val_bytes > 1)) {
  160. switch (ctx->val_bytes) {
  161. case 2:
  162. {
  163. const u16 *valp = (const u16 *)val;
  164. for (i = 0; i < val_count; i++)
  165. writew(swab16(valp[i]), ctx->regs + reg);
  166. goto out_clk;
  167. }
  168. case 4:
  169. {
  170. const u32 *valp = (const u32 *)val;
  171. for (i = 0; i < val_count; i++)
  172. writel(swab32(valp[i]), ctx->regs + reg);
  173. goto out_clk;
  174. }
  175. #ifdef CONFIG_64BIT
  176. case 8:
  177. {
  178. const u64 *valp = (const u64 *)val;
  179. for (i = 0; i < val_count; i++)
  180. writeq(swab64(valp[i]), ctx->regs + reg);
  181. goto out_clk;
  182. }
  183. #endif
  184. default:
  185. ret = -EINVAL;
  186. goto out_clk;
  187. }
  188. }
  189. switch (ctx->val_bytes) {
  190. case 1:
  191. writesb(ctx->regs + reg, (const u8 *)val, val_count);
  192. break;
  193. case 2:
  194. writesw(ctx->regs + reg, (const u16 *)val, val_count);
  195. break;
  196. case 4:
  197. writesl(ctx->regs + reg, (const u32 *)val, val_count);
  198. break;
  199. #ifdef CONFIG_64BIT
  200. case 8:
  201. writesq(ctx->regs + reg, (const u64 *)val, val_count);
  202. break;
  203. #endif
  204. default:
  205. ret = -EINVAL;
  206. break;
  207. }
  208. out_clk:
  209. if (!IS_ERR(ctx->clk))
  210. clk_disable(ctx->clk);
  211. return ret;
  212. }
  213. static unsigned int regmap_mmio_read8(struct regmap_mmio_context *ctx,
  214. unsigned int reg)
  215. {
  216. return readb(ctx->regs + reg);
  217. }
  218. static unsigned int regmap_mmio_read8_relaxed(struct regmap_mmio_context *ctx,
  219. unsigned int reg)
  220. {
  221. return readb_relaxed(ctx->regs + reg);
  222. }
  223. static unsigned int regmap_mmio_ioread8(struct regmap_mmio_context *ctx,
  224. unsigned int reg)
  225. {
  226. return ioread8(ctx->regs + reg);
  227. }
  228. static unsigned int regmap_mmio_read16le(struct regmap_mmio_context *ctx,
  229. unsigned int reg)
  230. {
  231. return readw(ctx->regs + reg);
  232. }
  233. static unsigned int regmap_mmio_read16le_relaxed(struct regmap_mmio_context *ctx,
  234. unsigned int reg)
  235. {
  236. return readw_relaxed(ctx->regs + reg);
  237. }
  238. static unsigned int regmap_mmio_ioread16le(struct regmap_mmio_context *ctx,
  239. unsigned int reg)
  240. {
  241. return ioread16(ctx->regs + reg);
  242. }
  243. static unsigned int regmap_mmio_read16be(struct regmap_mmio_context *ctx,
  244. unsigned int reg)
  245. {
  246. return swab16(readw(ctx->regs + reg));
  247. }
  248. static unsigned int regmap_mmio_ioread16be(struct regmap_mmio_context *ctx,
  249. unsigned int reg)
  250. {
  251. return ioread16be(ctx->regs + reg);
  252. }
  253. static unsigned int regmap_mmio_read32le(struct regmap_mmio_context *ctx,
  254. unsigned int reg)
  255. {
  256. return readl(ctx->regs + reg);
  257. }
  258. static unsigned int regmap_mmio_read32le_relaxed(struct regmap_mmio_context *ctx,
  259. unsigned int reg)
  260. {
  261. return readl_relaxed(ctx->regs + reg);
  262. }
  263. static unsigned int regmap_mmio_ioread32le(struct regmap_mmio_context *ctx,
  264. unsigned int reg)
  265. {
  266. return ioread32(ctx->regs + reg);
  267. }
  268. static unsigned int regmap_mmio_read32be(struct regmap_mmio_context *ctx,
  269. unsigned int reg)
  270. {
  271. return swab32(readl(ctx->regs + reg));
  272. }
  273. static unsigned int regmap_mmio_ioread32be(struct regmap_mmio_context *ctx,
  274. unsigned int reg)
  275. {
  276. return ioread32be(ctx->regs + reg);
  277. }
  278. static int regmap_mmio_read(void *context, unsigned int reg, unsigned int *val)
  279. {
  280. struct regmap_mmio_context *ctx = context;
  281. int ret;
  282. if (!IS_ERR(ctx->clk)) {
  283. ret = clk_enable(ctx->clk);
  284. if (ret < 0)
  285. return ret;
  286. }
  287. *val = ctx->reg_read(ctx, reg);
  288. if (!IS_ERR(ctx->clk))
  289. clk_disable(ctx->clk);
  290. return 0;
  291. }
  292. static int regmap_mmio_noinc_read(void *context, unsigned int reg,
  293. void *val, size_t val_count)
  294. {
  295. struct regmap_mmio_context *ctx = context;
  296. int ret = 0;
  297. if (!IS_ERR(ctx->clk)) {
  298. ret = clk_enable(ctx->clk);
  299. if (ret < 0)
  300. return ret;
  301. }
  302. switch (ctx->val_bytes) {
  303. case 1:
  304. readsb(ctx->regs + reg, (u8 *)val, val_count);
  305. break;
  306. case 2:
  307. readsw(ctx->regs + reg, (u16 *)val, val_count);
  308. break;
  309. case 4:
  310. readsl(ctx->regs + reg, (u32 *)val, val_count);
  311. break;
  312. #ifdef CONFIG_64BIT
  313. case 8:
  314. readsq(ctx->regs + reg, (u64 *)val, val_count);
  315. break;
  316. #endif
  317. default:
  318. ret = -EINVAL;
  319. goto out_clk;
  320. }
  321. /*
  322. * There are no native, assembly-optimized write single register
  323. * operations for big endian, so fall back to emulation if this
  324. * is needed. (Single bytes are fine, they are not affected by
  325. * endianness.)
  326. */
  327. if (ctx->big_endian && (ctx->val_bytes > 1)) {
  328. switch (ctx->val_bytes) {
  329. case 2:
  330. swab16_array(val, val_count);
  331. break;
  332. case 4:
  333. swab32_array(val, val_count);
  334. break;
  335. #ifdef CONFIG_64BIT
  336. case 8:
  337. swab64_array(val, val_count);
  338. break;
  339. #endif
  340. default:
  341. ret = -EINVAL;
  342. break;
  343. }
  344. }
  345. out_clk:
  346. if (!IS_ERR(ctx->clk))
  347. clk_disable(ctx->clk);
  348. return ret;
  349. }
  350. static void regmap_mmio_free_context(void *context)
  351. {
  352. struct regmap_mmio_context *ctx = context;
  353. if (!IS_ERR(ctx->clk)) {
  354. clk_unprepare(ctx->clk);
  355. if (!ctx->attached_clk)
  356. clk_put(ctx->clk);
  357. }
  358. kfree(context);
  359. }
  360. static const struct regmap_bus regmap_mmio = {
  361. .fast_io = true,
  362. .reg_write = regmap_mmio_write,
  363. .reg_read = regmap_mmio_read,
  364. .reg_noinc_write = regmap_mmio_noinc_write,
  365. .reg_noinc_read = regmap_mmio_noinc_read,
  366. .free_context = regmap_mmio_free_context,
  367. .val_format_endian_default = REGMAP_ENDIAN_LITTLE,
  368. };
  369. static struct regmap_mmio_context *regmap_mmio_gen_context(struct device *dev,
  370. const char *clk_id,
  371. void __iomem *regs,
  372. const struct regmap_config *config)
  373. {
  374. struct regmap_mmio_context *ctx;
  375. int min_stride;
  376. int ret;
  377. ret = regmap_mmio_regbits_check(config->reg_bits);
  378. if (ret)
  379. return ERR_PTR(ret);
  380. if (config->pad_bits)
  381. return ERR_PTR(-EINVAL);
  382. min_stride = regmap_mmio_get_min_stride(config->val_bits);
  383. if (min_stride < 0)
  384. return ERR_PTR(min_stride);
  385. if (config->reg_stride < min_stride)
  386. return ERR_PTR(-EINVAL);
  387. if (config->use_relaxed_mmio && config->io_port)
  388. return ERR_PTR(-EINVAL);
  389. ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
  390. if (!ctx)
  391. return ERR_PTR(-ENOMEM);
  392. ctx->regs = regs;
  393. ctx->val_bytes = config->val_bits / 8;
  394. ctx->clk = ERR_PTR(-ENODEV);
  395. switch (regmap_get_val_endian(dev, &regmap_mmio, config)) {
  396. case REGMAP_ENDIAN_DEFAULT:
  397. case REGMAP_ENDIAN_LITTLE:
  398. #ifdef __LITTLE_ENDIAN
  399. case REGMAP_ENDIAN_NATIVE:
  400. #endif
  401. switch (config->val_bits) {
  402. case 8:
  403. if (config->io_port) {
  404. ctx->reg_read = regmap_mmio_ioread8;
  405. ctx->reg_write = regmap_mmio_iowrite8;
  406. } else if (config->use_relaxed_mmio) {
  407. ctx->reg_read = regmap_mmio_read8_relaxed;
  408. ctx->reg_write = regmap_mmio_write8_relaxed;
  409. } else {
  410. ctx->reg_read = regmap_mmio_read8;
  411. ctx->reg_write = regmap_mmio_write8;
  412. }
  413. break;
  414. case 16:
  415. if (config->io_port) {
  416. ctx->reg_read = regmap_mmio_ioread16le;
  417. ctx->reg_write = regmap_mmio_iowrite16le;
  418. } else if (config->use_relaxed_mmio) {
  419. ctx->reg_read = regmap_mmio_read16le_relaxed;
  420. ctx->reg_write = regmap_mmio_write16le_relaxed;
  421. } else {
  422. ctx->reg_read = regmap_mmio_read16le;
  423. ctx->reg_write = regmap_mmio_write16le;
  424. }
  425. break;
  426. case 32:
  427. if (config->io_port) {
  428. ctx->reg_read = regmap_mmio_ioread32le;
  429. ctx->reg_write = regmap_mmio_iowrite32le;
  430. } else if (config->use_relaxed_mmio) {
  431. ctx->reg_read = regmap_mmio_read32le_relaxed;
  432. ctx->reg_write = regmap_mmio_write32le_relaxed;
  433. } else {
  434. ctx->reg_read = regmap_mmio_read32le;
  435. ctx->reg_write = regmap_mmio_write32le;
  436. }
  437. break;
  438. default:
  439. ret = -EINVAL;
  440. goto err_free;
  441. }
  442. break;
  443. case REGMAP_ENDIAN_BIG:
  444. #ifdef __BIG_ENDIAN
  445. case REGMAP_ENDIAN_NATIVE:
  446. #endif
  447. ctx->big_endian = true;
  448. switch (config->val_bits) {
  449. case 8:
  450. if (config->io_port) {
  451. ctx->reg_read = regmap_mmio_ioread8;
  452. ctx->reg_write = regmap_mmio_iowrite8;
  453. } else {
  454. ctx->reg_read = regmap_mmio_read8;
  455. ctx->reg_write = regmap_mmio_write8;
  456. }
  457. break;
  458. case 16:
  459. if (config->io_port) {
  460. ctx->reg_read = regmap_mmio_ioread16be;
  461. ctx->reg_write = regmap_mmio_iowrite16be;
  462. } else {
  463. ctx->reg_read = regmap_mmio_read16be;
  464. ctx->reg_write = regmap_mmio_write16be;
  465. }
  466. break;
  467. case 32:
  468. if (config->io_port) {
  469. ctx->reg_read = regmap_mmio_ioread32be;
  470. ctx->reg_write = regmap_mmio_iowrite32be;
  471. } else {
  472. ctx->reg_read = regmap_mmio_read32be;
  473. ctx->reg_write = regmap_mmio_write32be;
  474. }
  475. break;
  476. default:
  477. ret = -EINVAL;
  478. goto err_free;
  479. }
  480. break;
  481. default:
  482. ret = -EINVAL;
  483. goto err_free;
  484. }
  485. if (clk_id == NULL)
  486. return ctx;
  487. ctx->clk = clk_get(dev, clk_id);
  488. if (IS_ERR(ctx->clk)) {
  489. ret = PTR_ERR(ctx->clk);
  490. goto err_free;
  491. }
  492. ret = clk_prepare(ctx->clk);
  493. if (ret < 0) {
  494. clk_put(ctx->clk);
  495. goto err_free;
  496. }
  497. return ctx;
  498. err_free:
  499. kfree(ctx);
  500. return ERR_PTR(ret);
  501. }
  502. struct regmap *__regmap_init_mmio_clk(struct device *dev, const char *clk_id,
  503. void __iomem *regs,
  504. const struct regmap_config *config,
  505. struct lock_class_key *lock_key,
  506. const char *lock_name)
  507. {
  508. struct regmap_mmio_context *ctx;
  509. ctx = regmap_mmio_gen_context(dev, clk_id, regs, config);
  510. if (IS_ERR(ctx))
  511. return ERR_CAST(ctx);
  512. return __regmap_init(dev, &regmap_mmio, ctx, config,
  513. lock_key, lock_name);
  514. }
  515. EXPORT_SYMBOL_GPL(__regmap_init_mmio_clk);
  516. struct regmap *__devm_regmap_init_mmio_clk(struct device *dev,
  517. const char *clk_id,
  518. void __iomem *regs,
  519. const struct regmap_config *config,
  520. struct lock_class_key *lock_key,
  521. const char *lock_name)
  522. {
  523. struct regmap_mmio_context *ctx;
  524. ctx = regmap_mmio_gen_context(dev, clk_id, regs, config);
  525. if (IS_ERR(ctx))
  526. return ERR_CAST(ctx);
  527. return __devm_regmap_init(dev, &regmap_mmio, ctx, config,
  528. lock_key, lock_name);
  529. }
  530. EXPORT_SYMBOL_GPL(__devm_regmap_init_mmio_clk);
  531. int regmap_mmio_attach_clk(struct regmap *map, struct clk *clk)
  532. {
  533. struct regmap_mmio_context *ctx = map->bus_context;
  534. ctx->clk = clk;
  535. ctx->attached_clk = true;
  536. return clk_prepare(ctx->clk);
  537. }
  538. EXPORT_SYMBOL_GPL(regmap_mmio_attach_clk);
  539. void regmap_mmio_detach_clk(struct regmap *map)
  540. {
  541. struct regmap_mmio_context *ctx = map->bus_context;
  542. clk_unprepare(ctx->clk);
  543. ctx->attached_clk = false;
  544. ctx->clk = NULL;
  545. }
  546. EXPORT_SYMBOL_GPL(regmap_mmio_detach_clk);
  547. MODULE_LICENSE("GPL v2");