io.h 27 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228
  1. /* SPDX-License-Identifier: GPL-2.0-or-later */
  2. /* Generic I/O port emulation.
  3. *
  4. * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
  5. * Written by David Howells ([email protected])
  6. */
  7. #ifndef __ASM_GENERIC_IO_H
  8. #define __ASM_GENERIC_IO_H
  9. #include <asm/page.h> /* I/O is all done through memory accesses */
  10. #include <linux/string.h> /* for memset() and memcpy() */
  11. #include <linux/types.h>
  12. #include <linux/instruction_pointer.h>
  13. #ifdef CONFIG_GENERIC_IOMAP
  14. #include <asm-generic/iomap.h>
  15. #endif
  16. #include <asm/mmiowb.h>
  17. #include <asm-generic/pci_iomap.h>
  18. #ifndef __io_br
  19. #define __io_br() barrier()
  20. #endif
  21. /* prevent prefetching of coherent DMA data ahead of a dma-complete */
  22. #ifndef __io_ar
  23. #ifdef rmb
  24. #define __io_ar(v) rmb()
  25. #else
  26. #define __io_ar(v) barrier()
  27. #endif
  28. #endif
  29. /* flush writes to coherent DMA data before possibly triggering a DMA read */
  30. #ifndef __io_bw
  31. #ifdef wmb
  32. #define __io_bw() wmb()
  33. #else
  34. #define __io_bw() barrier()
  35. #endif
  36. #endif
  37. /* serialize device access against a spin_unlock, usually handled there. */
  38. #ifndef __io_aw
  39. #define __io_aw() mmiowb_set_pending()
  40. #endif
  41. #ifndef __io_pbw
  42. #define __io_pbw() __io_bw()
  43. #endif
  44. #ifndef __io_paw
  45. #define __io_paw() __io_aw()
  46. #endif
  47. #ifndef __io_pbr
  48. #define __io_pbr() __io_br()
  49. #endif
  50. #ifndef __io_par
  51. #define __io_par(v) __io_ar(v)
  52. #endif
  53. /*
  54. * "__DISABLE_TRACE_MMIO__" flag can be used to disable MMIO tracing for
  55. * specific kernel drivers in case of excessive/unwanted logging.
  56. *
  57. * Usage: Add a #define flag at the beginning of the driver file.
  58. * Ex: #define __DISABLE_TRACE_MMIO__
  59. * #include <...>
  60. * ...
  61. */
  62. #if IS_ENABLED(CONFIG_TRACE_MMIO_ACCESS) && !(defined(__DISABLE_TRACE_MMIO__))
  63. #include <linux/tracepoint-defs.h>
  64. DECLARE_TRACEPOINT(rwmmio_write);
  65. DECLARE_TRACEPOINT(rwmmio_post_write);
  66. DECLARE_TRACEPOINT(rwmmio_read);
  67. DECLARE_TRACEPOINT(rwmmio_post_read);
  68. void log_write_mmio(u64 val, u8 width, volatile void __iomem *addr,
  69. unsigned long caller_addr, unsigned long caller_addr0);
  70. void log_post_write_mmio(u64 val, u8 width, volatile void __iomem *addr,
  71. unsigned long caller_addr, unsigned long caller_addr0);
  72. void log_read_mmio(u8 width, const volatile void __iomem *addr,
  73. unsigned long caller_addr, unsigned long caller_addr0);
  74. void log_post_read_mmio(u64 val, u8 width, const volatile void __iomem *addr,
  75. unsigned long caller_addr, unsigned long caller_addr0);
  76. #else
  77. static inline void log_write_mmio(u64 val, u8 width, volatile void __iomem *addr,
  78. unsigned long caller_addr, unsigned long caller_addr0) {}
  79. static inline void log_post_write_mmio(u64 val, u8 width, volatile void __iomem *addr,
  80. unsigned long caller_addr, unsigned long caller_addr0) {}
  81. static inline void log_read_mmio(u8 width, const volatile void __iomem *addr,
  82. unsigned long caller_addr, unsigned long caller_addr0) {}
  83. static inline void log_post_read_mmio(u64 val, u8 width, const volatile void __iomem *addr,
  84. unsigned long caller_addr, unsigned long caller_addr0) {}
  85. #endif /* CONFIG_TRACE_MMIO_ACCESS */
  86. /*
  87. * __raw_{read,write}{b,w,l,q}() access memory in native endianness.
  88. *
  89. * On some architectures memory mapped IO needs to be accessed differently.
  90. * On the simple architectures, we just read/write the memory location
  91. * directly.
  92. */
  93. #ifndef __raw_readb
  94. #define __raw_readb __raw_readb
  95. static inline u8 __raw_readb(const volatile void __iomem *addr)
  96. {
  97. return *(const volatile u8 __force *)addr;
  98. }
  99. #endif
  100. #ifndef __raw_readw
  101. #define __raw_readw __raw_readw
  102. static inline u16 __raw_readw(const volatile void __iomem *addr)
  103. {
  104. return *(const volatile u16 __force *)addr;
  105. }
  106. #endif
  107. #ifndef __raw_readl
  108. #define __raw_readl __raw_readl
  109. static inline u32 __raw_readl(const volatile void __iomem *addr)
  110. {
  111. return *(const volatile u32 __force *)addr;
  112. }
  113. #endif
  114. #ifdef CONFIG_64BIT
  115. #ifndef __raw_readq
  116. #define __raw_readq __raw_readq
  117. static inline u64 __raw_readq(const volatile void __iomem *addr)
  118. {
  119. return *(const volatile u64 __force *)addr;
  120. }
  121. #endif
  122. #endif /* CONFIG_64BIT */
  123. #ifndef __raw_writeb
  124. #define __raw_writeb __raw_writeb
  125. static inline void __raw_writeb(u8 value, volatile void __iomem *addr)
  126. {
  127. *(volatile u8 __force *)addr = value;
  128. }
  129. #endif
  130. #ifndef __raw_writew
  131. #define __raw_writew __raw_writew
  132. static inline void __raw_writew(u16 value, volatile void __iomem *addr)
  133. {
  134. *(volatile u16 __force *)addr = value;
  135. }
  136. #endif
  137. #ifndef __raw_writel
  138. #define __raw_writel __raw_writel
  139. static inline void __raw_writel(u32 value, volatile void __iomem *addr)
  140. {
  141. *(volatile u32 __force *)addr = value;
  142. }
  143. #endif
  144. #ifdef CONFIG_64BIT
  145. #ifndef __raw_writeq
  146. #define __raw_writeq __raw_writeq
  147. static inline void __raw_writeq(u64 value, volatile void __iomem *addr)
  148. {
  149. *(volatile u64 __force *)addr = value;
  150. }
  151. #endif
  152. #endif /* CONFIG_64BIT */
  153. /*
  154. * {read,write}{b,w,l,q}() access little endian memory and return result in
  155. * native endianness.
  156. */
  157. #ifndef readb
  158. #define readb readb
  159. static inline u8 readb(const volatile void __iomem *addr)
  160. {
  161. u8 val;
  162. log_read_mmio(8, addr, _THIS_IP_, _RET_IP_);
  163. __io_br();
  164. val = __raw_readb(addr);
  165. __io_ar(val);
  166. log_post_read_mmio(val, 8, addr, _THIS_IP_, _RET_IP_);
  167. return val;
  168. }
  169. #endif
  170. #ifndef readw
  171. #define readw readw
  172. static inline u16 readw(const volatile void __iomem *addr)
  173. {
  174. u16 val;
  175. log_read_mmio(16, addr, _THIS_IP_, _RET_IP_);
  176. __io_br();
  177. val = __le16_to_cpu((__le16 __force)__raw_readw(addr));
  178. __io_ar(val);
  179. log_post_read_mmio(val, 16, addr, _THIS_IP_, _RET_IP_);
  180. return val;
  181. }
  182. #endif
  183. #ifndef readl
  184. #define readl readl
  185. static inline u32 readl(const volatile void __iomem *addr)
  186. {
  187. u32 val;
  188. log_read_mmio(32, addr, _THIS_IP_, _RET_IP_);
  189. __io_br();
  190. val = __le32_to_cpu((__le32 __force)__raw_readl(addr));
  191. __io_ar(val);
  192. log_post_read_mmio(val, 32, addr, _THIS_IP_, _RET_IP_);
  193. return val;
  194. }
  195. #endif
  196. #ifdef CONFIG_64BIT
  197. #ifndef readq
  198. #define readq readq
  199. static inline u64 readq(const volatile void __iomem *addr)
  200. {
  201. u64 val;
  202. log_read_mmio(64, addr, _THIS_IP_, _RET_IP_);
  203. __io_br();
  204. val = __le64_to_cpu((__le64 __force)__raw_readq(addr));
  205. __io_ar(val);
  206. log_post_read_mmio(val, 64, addr, _THIS_IP_, _RET_IP_);
  207. return val;
  208. }
  209. #endif
  210. #endif /* CONFIG_64BIT */
  211. #ifndef writeb
  212. #define writeb writeb
  213. static inline void writeb(u8 value, volatile void __iomem *addr)
  214. {
  215. log_write_mmio(value, 8, addr, _THIS_IP_, _RET_IP_);
  216. __io_bw();
  217. __raw_writeb(value, addr);
  218. __io_aw();
  219. log_post_write_mmio(value, 8, addr, _THIS_IP_, _RET_IP_);
  220. }
  221. #endif
  222. #ifndef writew
  223. #define writew writew
  224. static inline void writew(u16 value, volatile void __iomem *addr)
  225. {
  226. log_write_mmio(value, 16, addr, _THIS_IP_, _RET_IP_);
  227. __io_bw();
  228. __raw_writew((u16 __force)cpu_to_le16(value), addr);
  229. __io_aw();
  230. log_post_write_mmio(value, 16, addr, _THIS_IP_, _RET_IP_);
  231. }
  232. #endif
  233. #ifndef writel
  234. #define writel writel
  235. static inline void writel(u32 value, volatile void __iomem *addr)
  236. {
  237. log_write_mmio(value, 32, addr, _THIS_IP_, _RET_IP_);
  238. __io_bw();
  239. __raw_writel((u32 __force)__cpu_to_le32(value), addr);
  240. __io_aw();
  241. log_post_write_mmio(value, 32, addr, _THIS_IP_, _RET_IP_);
  242. }
  243. #endif
  244. #ifdef CONFIG_64BIT
  245. #ifndef writeq
  246. #define writeq writeq
  247. static inline void writeq(u64 value, volatile void __iomem *addr)
  248. {
  249. log_write_mmio(value, 64, addr, _THIS_IP_, _RET_IP_);
  250. __io_bw();
  251. __raw_writeq((u64 __force)__cpu_to_le64(value), addr);
  252. __io_aw();
  253. log_post_write_mmio(value, 64, addr, _THIS_IP_, _RET_IP_);
  254. }
  255. #endif
  256. #endif /* CONFIG_64BIT */
  257. /*
  258. * {read,write}{b,w,l,q}_relaxed() are like the regular version, but
  259. * are not guaranteed to provide ordering against spinlocks or memory
  260. * accesses.
  261. */
  262. #ifndef readb_relaxed
  263. #define readb_relaxed readb_relaxed
  264. static inline u8 readb_relaxed(const volatile void __iomem *addr)
  265. {
  266. u8 val;
  267. log_read_mmio(8, addr, _THIS_IP_, _RET_IP_);
  268. val = __raw_readb(addr);
  269. log_post_read_mmio(val, 8, addr, _THIS_IP_, _RET_IP_);
  270. return val;
  271. }
  272. #endif
  273. #ifndef readw_relaxed
  274. #define readw_relaxed readw_relaxed
  275. static inline u16 readw_relaxed(const volatile void __iomem *addr)
  276. {
  277. u16 val;
  278. log_read_mmio(16, addr, _THIS_IP_, _RET_IP_);
  279. val = __le16_to_cpu(__raw_readw(addr));
  280. log_post_read_mmio(val, 16, addr, _THIS_IP_, _RET_IP_);
  281. return val;
  282. }
  283. #endif
  284. #ifndef readl_relaxed
  285. #define readl_relaxed readl_relaxed
  286. static inline u32 readl_relaxed(const volatile void __iomem *addr)
  287. {
  288. u32 val;
  289. log_read_mmio(32, addr, _THIS_IP_, _RET_IP_);
  290. val = __le32_to_cpu(__raw_readl(addr));
  291. log_post_read_mmio(val, 32, addr, _THIS_IP_, _RET_IP_);
  292. return val;
  293. }
  294. #endif
  295. #if defined(readq) && !defined(readq_relaxed)
  296. #define readq_relaxed readq_relaxed
  297. static inline u64 readq_relaxed(const volatile void __iomem *addr)
  298. {
  299. u64 val;
  300. log_read_mmio(64, addr, _THIS_IP_, _RET_IP_);
  301. val = __le64_to_cpu(__raw_readq(addr));
  302. log_post_read_mmio(val, 64, addr, _THIS_IP_, _RET_IP_);
  303. return val;
  304. }
  305. #endif
  306. #ifndef writeb_relaxed
  307. #define writeb_relaxed writeb_relaxed
  308. static inline void writeb_relaxed(u8 value, volatile void __iomem *addr)
  309. {
  310. log_write_mmio(value, 8, addr, _THIS_IP_, _RET_IP_);
  311. __raw_writeb(value, addr);
  312. log_post_write_mmio(value, 8, addr, _THIS_IP_, _RET_IP_);
  313. }
  314. #endif
  315. #ifndef writew_relaxed
  316. #define writew_relaxed writew_relaxed
  317. static inline void writew_relaxed(u16 value, volatile void __iomem *addr)
  318. {
  319. log_write_mmio(value, 16, addr, _THIS_IP_, _RET_IP_);
  320. __raw_writew(cpu_to_le16(value), addr);
  321. log_post_write_mmio(value, 16, addr, _THIS_IP_, _RET_IP_);
  322. }
  323. #endif
  324. #ifndef writel_relaxed
  325. #define writel_relaxed writel_relaxed
  326. static inline void writel_relaxed(u32 value, volatile void __iomem *addr)
  327. {
  328. log_write_mmio(value, 32, addr, _THIS_IP_, _RET_IP_);
  329. __raw_writel(__cpu_to_le32(value), addr);
  330. log_post_write_mmio(value, 32, addr, _THIS_IP_, _RET_IP_);
  331. }
  332. #endif
  333. #if defined(writeq) && !defined(writeq_relaxed)
  334. #define writeq_relaxed writeq_relaxed
  335. static inline void writeq_relaxed(u64 value, volatile void __iomem *addr)
  336. {
  337. log_write_mmio(value, 64, addr, _THIS_IP_, _RET_IP_);
  338. __raw_writeq(__cpu_to_le64(value), addr);
  339. log_post_write_mmio(value, 64, addr, _THIS_IP_, _RET_IP_);
  340. }
  341. #endif
  342. /*
  343. * {read,write}s{b,w,l,q}() repeatedly access the same memory address in
  344. * native endianness in 8-, 16-, 32- or 64-bit chunks (@count times).
  345. */
  346. #ifndef readsb
  347. #define readsb readsb
  348. static inline void readsb(const volatile void __iomem *addr, void *buffer,
  349. unsigned int count)
  350. {
  351. if (count) {
  352. u8 *buf = buffer;
  353. do {
  354. u8 x = __raw_readb(addr);
  355. *buf++ = x;
  356. } while (--count);
  357. }
  358. }
  359. #endif
  360. #ifndef readsw
  361. #define readsw readsw
  362. static inline void readsw(const volatile void __iomem *addr, void *buffer,
  363. unsigned int count)
  364. {
  365. if (count) {
  366. u16 *buf = buffer;
  367. do {
  368. u16 x = __raw_readw(addr);
  369. *buf++ = x;
  370. } while (--count);
  371. }
  372. }
  373. #endif
  374. #ifndef readsl
  375. #define readsl readsl
  376. static inline void readsl(const volatile void __iomem *addr, void *buffer,
  377. unsigned int count)
  378. {
  379. if (count) {
  380. u32 *buf = buffer;
  381. do {
  382. u32 x = __raw_readl(addr);
  383. *buf++ = x;
  384. } while (--count);
  385. }
  386. }
  387. #endif
  388. #ifdef CONFIG_64BIT
  389. #ifndef readsq
  390. #define readsq readsq
  391. static inline void readsq(const volatile void __iomem *addr, void *buffer,
  392. unsigned int count)
  393. {
  394. if (count) {
  395. u64 *buf = buffer;
  396. do {
  397. u64 x = __raw_readq(addr);
  398. *buf++ = x;
  399. } while (--count);
  400. }
  401. }
  402. #endif
  403. #endif /* CONFIG_64BIT */
  404. #ifndef writesb
  405. #define writesb writesb
  406. static inline void writesb(volatile void __iomem *addr, const void *buffer,
  407. unsigned int count)
  408. {
  409. if (count) {
  410. const u8 *buf = buffer;
  411. do {
  412. __raw_writeb(*buf++, addr);
  413. } while (--count);
  414. }
  415. }
  416. #endif
  417. #ifndef writesw
  418. #define writesw writesw
  419. static inline void writesw(volatile void __iomem *addr, const void *buffer,
  420. unsigned int count)
  421. {
  422. if (count) {
  423. const u16 *buf = buffer;
  424. do {
  425. __raw_writew(*buf++, addr);
  426. } while (--count);
  427. }
  428. }
  429. #endif
  430. #ifndef writesl
  431. #define writesl writesl
  432. static inline void writesl(volatile void __iomem *addr, const void *buffer,
  433. unsigned int count)
  434. {
  435. if (count) {
  436. const u32 *buf = buffer;
  437. do {
  438. __raw_writel(*buf++, addr);
  439. } while (--count);
  440. }
  441. }
  442. #endif
  443. #ifdef CONFIG_64BIT
  444. #ifndef writesq
  445. #define writesq writesq
  446. static inline void writesq(volatile void __iomem *addr, const void *buffer,
  447. unsigned int count)
  448. {
  449. if (count) {
  450. const u64 *buf = buffer;
  451. do {
  452. __raw_writeq(*buf++, addr);
  453. } while (--count);
  454. }
  455. }
  456. #endif
  457. #endif /* CONFIG_64BIT */
  458. #ifndef PCI_IOBASE
  459. #define PCI_IOBASE ((void __iomem *)0)
  460. #endif
  461. #ifndef IO_SPACE_LIMIT
  462. #define IO_SPACE_LIMIT 0xffff
  463. #endif
  464. /*
  465. * {in,out}{b,w,l}() access little endian I/O. {in,out}{b,w,l}_p() can be
  466. * implemented on hardware that needs an additional delay for I/O accesses to
  467. * take effect.
  468. */
  469. #if !defined(inb) && !defined(_inb)
  470. #define _inb _inb
  471. static inline u8 _inb(unsigned long addr)
  472. {
  473. u8 val;
  474. __io_pbr();
  475. val = __raw_readb(PCI_IOBASE + addr);
  476. __io_par(val);
  477. return val;
  478. }
  479. #endif
  480. #if !defined(inw) && !defined(_inw)
  481. #define _inw _inw
  482. static inline u16 _inw(unsigned long addr)
  483. {
  484. u16 val;
  485. __io_pbr();
  486. val = __le16_to_cpu((__le16 __force)__raw_readw(PCI_IOBASE + addr));
  487. __io_par(val);
  488. return val;
  489. }
  490. #endif
  491. #if !defined(inl) && !defined(_inl)
  492. #define _inl _inl
  493. static inline u32 _inl(unsigned long addr)
  494. {
  495. u32 val;
  496. __io_pbr();
  497. val = __le32_to_cpu((__le32 __force)__raw_readl(PCI_IOBASE + addr));
  498. __io_par(val);
  499. return val;
  500. }
  501. #endif
  502. #if !defined(outb) && !defined(_outb)
  503. #define _outb _outb
  504. static inline void _outb(u8 value, unsigned long addr)
  505. {
  506. __io_pbw();
  507. __raw_writeb(value, PCI_IOBASE + addr);
  508. __io_paw();
  509. }
  510. #endif
  511. #if !defined(outw) && !defined(_outw)
  512. #define _outw _outw
  513. static inline void _outw(u16 value, unsigned long addr)
  514. {
  515. __io_pbw();
  516. __raw_writew((u16 __force)cpu_to_le16(value), PCI_IOBASE + addr);
  517. __io_paw();
  518. }
  519. #endif
  520. #if !defined(outl) && !defined(_outl)
  521. #define _outl _outl
  522. static inline void _outl(u32 value, unsigned long addr)
  523. {
  524. __io_pbw();
  525. __raw_writel((u32 __force)cpu_to_le32(value), PCI_IOBASE + addr);
  526. __io_paw();
  527. }
  528. #endif
  529. #include <linux/logic_pio.h>
  530. #ifndef inb
  531. #define inb _inb
  532. #endif
  533. #ifndef inw
  534. #define inw _inw
  535. #endif
  536. #ifndef inl
  537. #define inl _inl
  538. #endif
  539. #ifndef outb
  540. #define outb _outb
  541. #endif
  542. #ifndef outw
  543. #define outw _outw
  544. #endif
  545. #ifndef outl
  546. #define outl _outl
  547. #endif
  548. #ifndef inb_p
  549. #define inb_p inb_p
  550. static inline u8 inb_p(unsigned long addr)
  551. {
  552. return inb(addr);
  553. }
  554. #endif
  555. #ifndef inw_p
  556. #define inw_p inw_p
  557. static inline u16 inw_p(unsigned long addr)
  558. {
  559. return inw(addr);
  560. }
  561. #endif
  562. #ifndef inl_p
  563. #define inl_p inl_p
  564. static inline u32 inl_p(unsigned long addr)
  565. {
  566. return inl(addr);
  567. }
  568. #endif
  569. #ifndef outb_p
  570. #define outb_p outb_p
  571. static inline void outb_p(u8 value, unsigned long addr)
  572. {
  573. outb(value, addr);
  574. }
  575. #endif
  576. #ifndef outw_p
  577. #define outw_p outw_p
  578. static inline void outw_p(u16 value, unsigned long addr)
  579. {
  580. outw(value, addr);
  581. }
  582. #endif
  583. #ifndef outl_p
  584. #define outl_p outl_p
  585. static inline void outl_p(u32 value, unsigned long addr)
  586. {
  587. outl(value, addr);
  588. }
  589. #endif
  590. /*
  591. * {in,out}s{b,w,l}{,_p}() are variants of the above that repeatedly access a
  592. * single I/O port multiple times.
  593. */
  594. #ifndef insb
  595. #define insb insb
  596. static inline void insb(unsigned long addr, void *buffer, unsigned int count)
  597. {
  598. readsb(PCI_IOBASE + addr, buffer, count);
  599. }
  600. #endif
  601. #ifndef insw
  602. #define insw insw
  603. static inline void insw(unsigned long addr, void *buffer, unsigned int count)
  604. {
  605. readsw(PCI_IOBASE + addr, buffer, count);
  606. }
  607. #endif
  608. #ifndef insl
  609. #define insl insl
  610. static inline void insl(unsigned long addr, void *buffer, unsigned int count)
  611. {
  612. readsl(PCI_IOBASE + addr, buffer, count);
  613. }
  614. #endif
  615. #ifndef outsb
  616. #define outsb outsb
  617. static inline void outsb(unsigned long addr, const void *buffer,
  618. unsigned int count)
  619. {
  620. writesb(PCI_IOBASE + addr, buffer, count);
  621. }
  622. #endif
  623. #ifndef outsw
  624. #define outsw outsw
  625. static inline void outsw(unsigned long addr, const void *buffer,
  626. unsigned int count)
  627. {
  628. writesw(PCI_IOBASE + addr, buffer, count);
  629. }
  630. #endif
  631. #ifndef outsl
  632. #define outsl outsl
  633. static inline void outsl(unsigned long addr, const void *buffer,
  634. unsigned int count)
  635. {
  636. writesl(PCI_IOBASE + addr, buffer, count);
  637. }
  638. #endif
  639. #ifndef insb_p
  640. #define insb_p insb_p
  641. static inline void insb_p(unsigned long addr, void *buffer, unsigned int count)
  642. {
  643. insb(addr, buffer, count);
  644. }
  645. #endif
  646. #ifndef insw_p
  647. #define insw_p insw_p
  648. static inline void insw_p(unsigned long addr, void *buffer, unsigned int count)
  649. {
  650. insw(addr, buffer, count);
  651. }
  652. #endif
  653. #ifndef insl_p
  654. #define insl_p insl_p
  655. static inline void insl_p(unsigned long addr, void *buffer, unsigned int count)
  656. {
  657. insl(addr, buffer, count);
  658. }
  659. #endif
  660. #ifndef outsb_p
  661. #define outsb_p outsb_p
  662. static inline void outsb_p(unsigned long addr, const void *buffer,
  663. unsigned int count)
  664. {
  665. outsb(addr, buffer, count);
  666. }
  667. #endif
  668. #ifndef outsw_p
  669. #define outsw_p outsw_p
  670. static inline void outsw_p(unsigned long addr, const void *buffer,
  671. unsigned int count)
  672. {
  673. outsw(addr, buffer, count);
  674. }
  675. #endif
  676. #ifndef outsl_p
  677. #define outsl_p outsl_p
  678. static inline void outsl_p(unsigned long addr, const void *buffer,
  679. unsigned int count)
  680. {
  681. outsl(addr, buffer, count);
  682. }
  683. #endif
  684. #ifndef CONFIG_GENERIC_IOMAP
  685. #ifndef ioread8
  686. #define ioread8 ioread8
  687. static inline u8 ioread8(const volatile void __iomem *addr)
  688. {
  689. return readb(addr);
  690. }
  691. #endif
  692. #ifndef ioread16
  693. #define ioread16 ioread16
  694. static inline u16 ioread16(const volatile void __iomem *addr)
  695. {
  696. return readw(addr);
  697. }
  698. #endif
  699. #ifndef ioread32
  700. #define ioread32 ioread32
  701. static inline u32 ioread32(const volatile void __iomem *addr)
  702. {
  703. return readl(addr);
  704. }
  705. #endif
  706. #ifdef CONFIG_64BIT
  707. #ifndef ioread64
  708. #define ioread64 ioread64
  709. static inline u64 ioread64(const volatile void __iomem *addr)
  710. {
  711. return readq(addr);
  712. }
  713. #endif
  714. #endif /* CONFIG_64BIT */
  715. #ifndef iowrite8
  716. #define iowrite8 iowrite8
  717. static inline void iowrite8(u8 value, volatile void __iomem *addr)
  718. {
  719. writeb(value, addr);
  720. }
  721. #endif
  722. #ifndef iowrite16
  723. #define iowrite16 iowrite16
  724. static inline void iowrite16(u16 value, volatile void __iomem *addr)
  725. {
  726. writew(value, addr);
  727. }
  728. #endif
  729. #ifndef iowrite32
  730. #define iowrite32 iowrite32
  731. static inline void iowrite32(u32 value, volatile void __iomem *addr)
  732. {
  733. writel(value, addr);
  734. }
  735. #endif
  736. #ifdef CONFIG_64BIT
  737. #ifndef iowrite64
  738. #define iowrite64 iowrite64
  739. static inline void iowrite64(u64 value, volatile void __iomem *addr)
  740. {
  741. writeq(value, addr);
  742. }
  743. #endif
  744. #endif /* CONFIG_64BIT */
  745. #ifndef ioread16be
  746. #define ioread16be ioread16be
  747. static inline u16 ioread16be(const volatile void __iomem *addr)
  748. {
  749. return swab16(readw(addr));
  750. }
  751. #endif
  752. #ifndef ioread32be
  753. #define ioread32be ioread32be
  754. static inline u32 ioread32be(const volatile void __iomem *addr)
  755. {
  756. return swab32(readl(addr));
  757. }
  758. #endif
  759. #ifdef CONFIG_64BIT
  760. #ifndef ioread64be
  761. #define ioread64be ioread64be
  762. static inline u64 ioread64be(const volatile void __iomem *addr)
  763. {
  764. return swab64(readq(addr));
  765. }
  766. #endif
  767. #endif /* CONFIG_64BIT */
  768. #ifndef iowrite16be
  769. #define iowrite16be iowrite16be
  770. static inline void iowrite16be(u16 value, void volatile __iomem *addr)
  771. {
  772. writew(swab16(value), addr);
  773. }
  774. #endif
  775. #ifndef iowrite32be
  776. #define iowrite32be iowrite32be
  777. static inline void iowrite32be(u32 value, volatile void __iomem *addr)
  778. {
  779. writel(swab32(value), addr);
  780. }
  781. #endif
  782. #ifdef CONFIG_64BIT
  783. #ifndef iowrite64be
  784. #define iowrite64be iowrite64be
  785. static inline void iowrite64be(u64 value, volatile void __iomem *addr)
  786. {
  787. writeq(swab64(value), addr);
  788. }
  789. #endif
  790. #endif /* CONFIG_64BIT */
  791. #ifndef ioread8_rep
  792. #define ioread8_rep ioread8_rep
  793. static inline void ioread8_rep(const volatile void __iomem *addr, void *buffer,
  794. unsigned int count)
  795. {
  796. readsb(addr, buffer, count);
  797. }
  798. #endif
  799. #ifndef ioread16_rep
  800. #define ioread16_rep ioread16_rep
  801. static inline void ioread16_rep(const volatile void __iomem *addr,
  802. void *buffer, unsigned int count)
  803. {
  804. readsw(addr, buffer, count);
  805. }
  806. #endif
  807. #ifndef ioread32_rep
  808. #define ioread32_rep ioread32_rep
  809. static inline void ioread32_rep(const volatile void __iomem *addr,
  810. void *buffer, unsigned int count)
  811. {
  812. readsl(addr, buffer, count);
  813. }
  814. #endif
  815. #ifdef CONFIG_64BIT
  816. #ifndef ioread64_rep
  817. #define ioread64_rep ioread64_rep
  818. static inline void ioread64_rep(const volatile void __iomem *addr,
  819. void *buffer, unsigned int count)
  820. {
  821. readsq(addr, buffer, count);
  822. }
  823. #endif
  824. #endif /* CONFIG_64BIT */
  825. #ifndef iowrite8_rep
  826. #define iowrite8_rep iowrite8_rep
  827. static inline void iowrite8_rep(volatile void __iomem *addr,
  828. const void *buffer,
  829. unsigned int count)
  830. {
  831. writesb(addr, buffer, count);
  832. }
  833. #endif
  834. #ifndef iowrite16_rep
  835. #define iowrite16_rep iowrite16_rep
  836. static inline void iowrite16_rep(volatile void __iomem *addr,
  837. const void *buffer,
  838. unsigned int count)
  839. {
  840. writesw(addr, buffer, count);
  841. }
  842. #endif
  843. #ifndef iowrite32_rep
  844. #define iowrite32_rep iowrite32_rep
  845. static inline void iowrite32_rep(volatile void __iomem *addr,
  846. const void *buffer,
  847. unsigned int count)
  848. {
  849. writesl(addr, buffer, count);
  850. }
  851. #endif
  852. #ifdef CONFIG_64BIT
  853. #ifndef iowrite64_rep
  854. #define iowrite64_rep iowrite64_rep
  855. static inline void iowrite64_rep(volatile void __iomem *addr,
  856. const void *buffer,
  857. unsigned int count)
  858. {
  859. writesq(addr, buffer, count);
  860. }
  861. #endif
  862. #endif /* CONFIG_64BIT */
  863. #endif /* CONFIG_GENERIC_IOMAP */
  864. #ifdef __KERNEL__
  865. #include <linux/vmalloc.h>
  866. #define __io_virt(x) ((void __force *)(x))
  867. /*
  868. * Change virtual addresses to physical addresses and vv.
  869. * These are pretty trivial
  870. */
  871. #ifndef virt_to_phys
  872. #define virt_to_phys virt_to_phys
  873. static inline unsigned long virt_to_phys(volatile void *address)
  874. {
  875. return __pa((unsigned long)address);
  876. }
  877. #endif
  878. #ifndef phys_to_virt
  879. #define phys_to_virt phys_to_virt
  880. static inline void *phys_to_virt(unsigned long address)
  881. {
  882. return __va(address);
  883. }
  884. #endif
  885. /**
  886. * DOC: ioremap() and ioremap_*() variants
  887. *
  888. * Architectures with an MMU are expected to provide ioremap() and iounmap()
  889. * themselves or rely on GENERIC_IOREMAP. For NOMMU architectures we provide
  890. * a default nop-op implementation that expect that the physical address used
  891. * for MMIO are already marked as uncached, and can be used as kernel virtual
  892. * addresses.
  893. *
  894. * ioremap_wc() and ioremap_wt() can provide more relaxed caching attributes
  895. * for specific drivers if the architecture choses to implement them. If they
  896. * are not implemented we fall back to plain ioremap. Conversely, ioremap_np()
  897. * can provide stricter non-posted write semantics if the architecture
  898. * implements them.
  899. */
  900. #ifndef CONFIG_MMU
  901. #ifndef ioremap
  902. #define ioremap ioremap
  903. static inline void __iomem *ioremap(phys_addr_t offset, size_t size)
  904. {
  905. return (void __iomem *)(unsigned long)offset;
  906. }
  907. #endif
  908. #ifndef iounmap
  909. #define iounmap iounmap
  910. static inline void iounmap(volatile void __iomem *addr)
  911. {
  912. }
  913. #endif
  914. #elif defined(CONFIG_GENERIC_IOREMAP)
  915. #include <linux/pgtable.h>
  916. /*
  917. * Arch code can implement the following two hooks when using GENERIC_IOREMAP
  918. * ioremap_allowed() return a bool,
  919. * - true means continue to remap
  920. * - false means skip remap and return directly
  921. * iounmap_allowed() return a bool,
  922. * - true means continue to vunmap
  923. * - false means skip vunmap and return directly
  924. */
  925. #ifndef ioremap_allowed
  926. #define ioremap_allowed ioremap_allowed
  927. static inline bool ioremap_allowed(phys_addr_t phys_addr, size_t size,
  928. unsigned long prot)
  929. {
  930. return true;
  931. }
  932. #endif
  933. #ifndef iounmap_allowed
  934. #define iounmap_allowed iounmap_allowed
  935. static inline bool iounmap_allowed(void *addr)
  936. {
  937. return true;
  938. }
  939. #endif
  940. void __iomem *ioremap_prot(phys_addr_t phys_addr, size_t size,
  941. unsigned long prot);
  942. void iounmap(volatile void __iomem *addr);
  943. static inline void __iomem *ioremap(phys_addr_t addr, size_t size)
  944. {
  945. /* _PAGE_IOREMAP needs to be supplied by the architecture */
  946. return ioremap_prot(addr, size, _PAGE_IOREMAP);
  947. }
  948. #endif /* !CONFIG_MMU || CONFIG_GENERIC_IOREMAP */
  949. #ifndef ioremap_wc
  950. #define ioremap_wc ioremap
  951. #endif
  952. #ifndef ioremap_wt
  953. #define ioremap_wt ioremap
  954. #endif
  955. /*
  956. * ioremap_uc is special in that we do require an explicit architecture
  957. * implementation. In general you do not want to use this function in a
  958. * driver and use plain ioremap, which is uncached by default. Similarly
  959. * architectures should not implement it unless they have a very good
  960. * reason.
  961. */
  962. #ifndef ioremap_uc
  963. #define ioremap_uc ioremap_uc
  964. static inline void __iomem *ioremap_uc(phys_addr_t offset, size_t size)
  965. {
  966. return NULL;
  967. }
  968. #endif
  969. /*
  970. * ioremap_np needs an explicit architecture implementation, as it
  971. * requests stronger semantics than regular ioremap(). Portable drivers
  972. * should instead use one of the higher-level abstractions, like
  973. * devm_ioremap_resource(), to choose the correct variant for any given
  974. * device and bus. Portable drivers with a good reason to want non-posted
  975. * write semantics should always provide an ioremap() fallback in case
  976. * ioremap_np() is not available.
  977. */
  978. #ifndef ioremap_np
  979. #define ioremap_np ioremap_np
  980. static inline void __iomem *ioremap_np(phys_addr_t offset, size_t size)
  981. {
  982. return NULL;
  983. }
  984. #endif
  985. #ifdef CONFIG_HAS_IOPORT_MAP
  986. #ifndef CONFIG_GENERIC_IOMAP
  987. #ifndef ioport_map
  988. #define ioport_map ioport_map
  989. static inline void __iomem *ioport_map(unsigned long port, unsigned int nr)
  990. {
  991. port &= IO_SPACE_LIMIT;
  992. return (port > MMIO_UPPER_LIMIT) ? NULL : PCI_IOBASE + port;
  993. }
  994. #define ARCH_HAS_GENERIC_IOPORT_MAP
  995. #endif
  996. #ifndef ioport_unmap
  997. #define ioport_unmap ioport_unmap
  998. static inline void ioport_unmap(void __iomem *p)
  999. {
  1000. }
  1001. #endif
  1002. #else /* CONFIG_GENERIC_IOMAP */
  1003. extern void __iomem *ioport_map(unsigned long port, unsigned int nr);
  1004. extern void ioport_unmap(void __iomem *p);
  1005. #endif /* CONFIG_GENERIC_IOMAP */
  1006. #endif /* CONFIG_HAS_IOPORT_MAP */
  1007. #ifndef CONFIG_GENERIC_IOMAP
  1008. #ifndef pci_iounmap
  1009. #define ARCH_WANTS_GENERIC_PCI_IOUNMAP
  1010. #endif
  1011. #endif
  1012. #ifndef xlate_dev_mem_ptr
  1013. #define xlate_dev_mem_ptr xlate_dev_mem_ptr
  1014. static inline void *xlate_dev_mem_ptr(phys_addr_t addr)
  1015. {
  1016. return __va(addr);
  1017. }
  1018. #endif
  1019. #ifndef unxlate_dev_mem_ptr
  1020. #define unxlate_dev_mem_ptr unxlate_dev_mem_ptr
  1021. static inline void unxlate_dev_mem_ptr(phys_addr_t phys, void *addr)
  1022. {
  1023. }
  1024. #endif
  1025. #ifndef memset_io
  1026. #define memset_io memset_io
  1027. /**
  1028. * memset_io Set a range of I/O memory to a constant value
  1029. * @addr: The beginning of the I/O-memory range to set
  1030. * @val: The value to set the memory to
  1031. * @count: The number of bytes to set
  1032. *
  1033. * Set a range of I/O memory to a given value.
  1034. */
  1035. static inline void memset_io(volatile void __iomem *addr, int value,
  1036. size_t size)
  1037. {
  1038. memset(__io_virt(addr), value, size);
  1039. }
  1040. #endif
  1041. #ifndef memcpy_fromio
  1042. #define memcpy_fromio memcpy_fromio
  1043. /**
  1044. * memcpy_fromio Copy a block of data from I/O memory
  1045. * @dst: The (RAM) destination for the copy
  1046. * @src: The (I/O memory) source for the data
  1047. * @count: The number of bytes to copy
  1048. *
  1049. * Copy a block of data from I/O memory.
  1050. */
  1051. static inline void memcpy_fromio(void *buffer,
  1052. const volatile void __iomem *addr,
  1053. size_t size)
  1054. {
  1055. memcpy(buffer, __io_virt(addr), size);
  1056. }
  1057. #endif
  1058. #ifndef memcpy_toio
  1059. #define memcpy_toio memcpy_toio
  1060. /**
  1061. * memcpy_toio Copy a block of data into I/O memory
  1062. * @dst: The (I/O memory) destination for the copy
  1063. * @src: The (RAM) source for the data
  1064. * @count: The number of bytes to copy
  1065. *
  1066. * Copy a block of data to I/O memory.
  1067. */
  1068. static inline void memcpy_toio(volatile void __iomem *addr, const void *buffer,
  1069. size_t size)
  1070. {
  1071. memcpy(__io_virt(addr), buffer, size);
  1072. }
  1073. #endif
  1074. extern int devmem_is_allowed(unsigned long pfn);
  1075. #endif /* __KERNEL__ */
  1076. #endif /* __ASM_GENERIC_IO_H */