pci-dma.c 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. ** PARISC 1.1 Dynamic DMA mapping support.
  4. ** This implementation is for PA-RISC platforms that do not support
  5. ** I/O TLBs (aka DMA address translation hardware).
  6. ** See Documentation/core-api/dma-api-howto.rst for interface definitions.
  7. **
  8. ** (c) Copyright 1999,2000 Hewlett-Packard Company
  9. ** (c) Copyright 2000 Grant Grundler
  10. ** (c) Copyright 2000 Philipp Rumpf <[email protected]>
  11. ** (c) Copyright 2000 John Marvin
  12. **
  13. ** "leveraged" from 2.3.47: arch/ia64/kernel/pci-dma.c.
  14. ** (I assume it's from David Mosberger-Tang but there was no Copyright)
  15. **
  16. ** AFAIK, all PA7100LC and PA7300LC platforms can use this code.
  17. **
  18. ** - ggg
  19. */
  20. #include <linux/init.h>
  21. #include <linux/gfp.h>
  22. #include <linux/mm.h>
  23. #include <linux/proc_fs.h>
  24. #include <linux/seq_file.h>
  25. #include <linux/string.h>
  26. #include <linux/types.h>
  27. #include <linux/dma-direct.h>
  28. #include <linux/dma-map-ops.h>
  29. #include <asm/cacheflush.h>
  30. #include <asm/dma.h> /* for DMA_CHUNK_SIZE */
  31. #include <asm/io.h>
  32. #include <asm/page.h> /* get_order */
  33. #include <linux/uaccess.h>
  34. #include <asm/tlbflush.h> /* for purge_tlb_*() macros */
  35. static struct proc_dir_entry * proc_gsc_root __read_mostly = NULL;
  36. static unsigned long pcxl_used_bytes __read_mostly;
  37. static unsigned long pcxl_used_pages __read_mostly;
  38. extern unsigned long pcxl_dma_start; /* Start of pcxl dma mapping area */
  39. static DEFINE_SPINLOCK(pcxl_res_lock);
  40. static char *pcxl_res_map;
  41. static int pcxl_res_hint;
  42. static int pcxl_res_size;
  43. #ifdef DEBUG_PCXL_RESOURCE
  44. #define DBG_RES(x...) printk(x)
  45. #else
  46. #define DBG_RES(x...)
  47. #endif
  48. /*
  49. ** Dump a hex representation of the resource map.
  50. */
  51. #ifdef DUMP_RESMAP
  52. static
  53. void dump_resmap(void)
  54. {
  55. u_long *res_ptr = (unsigned long *)pcxl_res_map;
  56. u_long i = 0;
  57. printk("res_map: ");
  58. for(; i < (pcxl_res_size / sizeof(unsigned long)); ++i, ++res_ptr)
  59. printk("%08lx ", *res_ptr);
  60. printk("\n");
  61. }
  62. #else
  63. static inline void dump_resmap(void) {;}
  64. #endif
  65. static inline int map_pte_uncached(pte_t * pte,
  66. unsigned long vaddr,
  67. unsigned long size, unsigned long *paddr_ptr)
  68. {
  69. unsigned long end;
  70. unsigned long orig_vaddr = vaddr;
  71. vaddr &= ~PMD_MASK;
  72. end = vaddr + size;
  73. if (end > PMD_SIZE)
  74. end = PMD_SIZE;
  75. do {
  76. unsigned long flags;
  77. if (!pte_none(*pte))
  78. printk(KERN_ERR "map_pte_uncached: page already exists\n");
  79. purge_tlb_start(flags);
  80. set_pte(pte, __mk_pte(*paddr_ptr, PAGE_KERNEL_UNC));
  81. pdtlb(SR_KERNEL, orig_vaddr);
  82. purge_tlb_end(flags);
  83. vaddr += PAGE_SIZE;
  84. orig_vaddr += PAGE_SIZE;
  85. (*paddr_ptr) += PAGE_SIZE;
  86. pte++;
  87. } while (vaddr < end);
  88. return 0;
  89. }
  90. static inline int map_pmd_uncached(pmd_t * pmd, unsigned long vaddr,
  91. unsigned long size, unsigned long *paddr_ptr)
  92. {
  93. unsigned long end;
  94. unsigned long orig_vaddr = vaddr;
  95. vaddr &= ~PGDIR_MASK;
  96. end = vaddr + size;
  97. if (end > PGDIR_SIZE)
  98. end = PGDIR_SIZE;
  99. do {
  100. pte_t * pte = pte_alloc_kernel(pmd, vaddr);
  101. if (!pte)
  102. return -ENOMEM;
  103. if (map_pte_uncached(pte, orig_vaddr, end - vaddr, paddr_ptr))
  104. return -ENOMEM;
  105. vaddr = (vaddr + PMD_SIZE) & PMD_MASK;
  106. orig_vaddr += PMD_SIZE;
  107. pmd++;
  108. } while (vaddr < end);
  109. return 0;
  110. }
  111. static inline int map_uncached_pages(unsigned long vaddr, unsigned long size,
  112. unsigned long paddr)
  113. {
  114. pgd_t * dir;
  115. unsigned long end = vaddr + size;
  116. dir = pgd_offset_k(vaddr);
  117. do {
  118. p4d_t *p4d;
  119. pud_t *pud;
  120. pmd_t *pmd;
  121. p4d = p4d_offset(dir, vaddr);
  122. pud = pud_offset(p4d, vaddr);
  123. pmd = pmd_alloc(NULL, pud, vaddr);
  124. if (!pmd)
  125. return -ENOMEM;
  126. if (map_pmd_uncached(pmd, vaddr, end - vaddr, &paddr))
  127. return -ENOMEM;
  128. vaddr = vaddr + PGDIR_SIZE;
  129. dir++;
  130. } while (vaddr && (vaddr < end));
  131. return 0;
  132. }
  133. static inline void unmap_uncached_pte(pmd_t * pmd, unsigned long vaddr,
  134. unsigned long size)
  135. {
  136. pte_t * pte;
  137. unsigned long end;
  138. unsigned long orig_vaddr = vaddr;
  139. if (pmd_none(*pmd))
  140. return;
  141. if (pmd_bad(*pmd)) {
  142. pmd_ERROR(*pmd);
  143. pmd_clear(pmd);
  144. return;
  145. }
  146. pte = pte_offset_map(pmd, vaddr);
  147. vaddr &= ~PMD_MASK;
  148. end = vaddr + size;
  149. if (end > PMD_SIZE)
  150. end = PMD_SIZE;
  151. do {
  152. unsigned long flags;
  153. pte_t page = *pte;
  154. pte_clear(&init_mm, vaddr, pte);
  155. purge_tlb_start(flags);
  156. pdtlb(SR_KERNEL, orig_vaddr);
  157. purge_tlb_end(flags);
  158. vaddr += PAGE_SIZE;
  159. orig_vaddr += PAGE_SIZE;
  160. pte++;
  161. if (pte_none(page) || pte_present(page))
  162. continue;
  163. printk(KERN_CRIT "Whee.. Swapped out page in kernel page table\n");
  164. } while (vaddr < end);
  165. }
  166. static inline void unmap_uncached_pmd(pgd_t * dir, unsigned long vaddr,
  167. unsigned long size)
  168. {
  169. pmd_t * pmd;
  170. unsigned long end;
  171. unsigned long orig_vaddr = vaddr;
  172. if (pgd_none(*dir))
  173. return;
  174. if (pgd_bad(*dir)) {
  175. pgd_ERROR(*dir);
  176. pgd_clear(dir);
  177. return;
  178. }
  179. pmd = pmd_offset(pud_offset(p4d_offset(dir, vaddr), vaddr), vaddr);
  180. vaddr &= ~PGDIR_MASK;
  181. end = vaddr + size;
  182. if (end > PGDIR_SIZE)
  183. end = PGDIR_SIZE;
  184. do {
  185. unmap_uncached_pte(pmd, orig_vaddr, end - vaddr);
  186. vaddr = (vaddr + PMD_SIZE) & PMD_MASK;
  187. orig_vaddr += PMD_SIZE;
  188. pmd++;
  189. } while (vaddr < end);
  190. }
  191. static void unmap_uncached_pages(unsigned long vaddr, unsigned long size)
  192. {
  193. pgd_t * dir;
  194. unsigned long end = vaddr + size;
  195. dir = pgd_offset_k(vaddr);
  196. do {
  197. unmap_uncached_pmd(dir, vaddr, end - vaddr);
  198. vaddr = vaddr + PGDIR_SIZE;
  199. dir++;
  200. } while (vaddr && (vaddr < end));
  201. }
  202. #define PCXL_SEARCH_LOOP(idx, mask, size) \
  203. for(; res_ptr < res_end; ++res_ptr) \
  204. { \
  205. if(0 == ((*res_ptr) & mask)) { \
  206. *res_ptr |= mask; \
  207. idx = (int)((u_long)res_ptr - (u_long)pcxl_res_map); \
  208. pcxl_res_hint = idx + (size >> 3); \
  209. goto resource_found; \
  210. } \
  211. }
  212. #define PCXL_FIND_FREE_MAPPING(idx, mask, size) { \
  213. u##size *res_ptr = (u##size *)&(pcxl_res_map[pcxl_res_hint & ~((size >> 3) - 1)]); \
  214. u##size *res_end = (u##size *)&pcxl_res_map[pcxl_res_size]; \
  215. PCXL_SEARCH_LOOP(idx, mask, size); \
  216. res_ptr = (u##size *)&pcxl_res_map[0]; \
  217. PCXL_SEARCH_LOOP(idx, mask, size); \
  218. }
  219. unsigned long
  220. pcxl_alloc_range(size_t size)
  221. {
  222. int res_idx;
  223. u_long mask, flags;
  224. unsigned int pages_needed = size >> PAGE_SHIFT;
  225. mask = (u_long) -1L;
  226. mask >>= BITS_PER_LONG - pages_needed;
  227. DBG_RES("pcxl_alloc_range() size: %d pages_needed %d pages_mask 0x%08lx\n",
  228. size, pages_needed, mask);
  229. spin_lock_irqsave(&pcxl_res_lock, flags);
  230. if(pages_needed <= 8) {
  231. PCXL_FIND_FREE_MAPPING(res_idx, mask, 8);
  232. } else if(pages_needed <= 16) {
  233. PCXL_FIND_FREE_MAPPING(res_idx, mask, 16);
  234. } else if(pages_needed <= 32) {
  235. PCXL_FIND_FREE_MAPPING(res_idx, mask, 32);
  236. } else {
  237. panic("%s: pcxl_alloc_range() Too many pages to map.\n",
  238. __FILE__);
  239. }
  240. dump_resmap();
  241. panic("%s: pcxl_alloc_range() out of dma mapping resources\n",
  242. __FILE__);
  243. resource_found:
  244. DBG_RES("pcxl_alloc_range() res_idx %d mask 0x%08lx res_hint: %d\n",
  245. res_idx, mask, pcxl_res_hint);
  246. pcxl_used_pages += pages_needed;
  247. pcxl_used_bytes += ((pages_needed >> 3) ? (pages_needed >> 3) : 1);
  248. spin_unlock_irqrestore(&pcxl_res_lock, flags);
  249. dump_resmap();
  250. /*
  251. ** return the corresponding vaddr in the pcxl dma map
  252. */
  253. return (pcxl_dma_start + (res_idx << (PAGE_SHIFT + 3)));
  254. }
  255. #define PCXL_FREE_MAPPINGS(idx, m, size) \
  256. u##size *res_ptr = (u##size *)&(pcxl_res_map[(idx) + (((size >> 3) - 1) & (~((size >> 3) - 1)))]); \
  257. /* BUG_ON((*res_ptr & m) != m); */ \
  258. *res_ptr &= ~m;
  259. /*
  260. ** clear bits in the pcxl resource map
  261. */
  262. static void
  263. pcxl_free_range(unsigned long vaddr, size_t size)
  264. {
  265. u_long mask, flags;
  266. unsigned int res_idx = (vaddr - pcxl_dma_start) >> (PAGE_SHIFT + 3);
  267. unsigned int pages_mapped = size >> PAGE_SHIFT;
  268. mask = (u_long) -1L;
  269. mask >>= BITS_PER_LONG - pages_mapped;
  270. DBG_RES("pcxl_free_range() res_idx: %d size: %d pages_mapped %d mask 0x%08lx\n",
  271. res_idx, size, pages_mapped, mask);
  272. spin_lock_irqsave(&pcxl_res_lock, flags);
  273. if(pages_mapped <= 8) {
  274. PCXL_FREE_MAPPINGS(res_idx, mask, 8);
  275. } else if(pages_mapped <= 16) {
  276. PCXL_FREE_MAPPINGS(res_idx, mask, 16);
  277. } else if(pages_mapped <= 32) {
  278. PCXL_FREE_MAPPINGS(res_idx, mask, 32);
  279. } else {
  280. panic("%s: pcxl_free_range() Too many pages to unmap.\n",
  281. __FILE__);
  282. }
  283. pcxl_used_pages -= (pages_mapped ? pages_mapped : 1);
  284. pcxl_used_bytes -= ((pages_mapped >> 3) ? (pages_mapped >> 3) : 1);
  285. spin_unlock_irqrestore(&pcxl_res_lock, flags);
  286. dump_resmap();
  287. }
  288. static int __maybe_unused proc_pcxl_dma_show(struct seq_file *m, void *v)
  289. {
  290. #if 0
  291. u_long i = 0;
  292. unsigned long *res_ptr = (u_long *)pcxl_res_map;
  293. #endif
  294. unsigned long total_pages = pcxl_res_size << 3; /* 8 bits per byte */
  295. seq_printf(m, "\nDMA Mapping Area size : %d bytes (%ld pages)\n",
  296. PCXL_DMA_MAP_SIZE, total_pages);
  297. seq_printf(m, "Resource bitmap : %d bytes\n", pcxl_res_size);
  298. seq_puts(m, " total: free: used: % used:\n");
  299. seq_printf(m, "blocks %8d %8ld %8ld %8ld%%\n", pcxl_res_size,
  300. pcxl_res_size - pcxl_used_bytes, pcxl_used_bytes,
  301. (pcxl_used_bytes * 100) / pcxl_res_size);
  302. seq_printf(m, "pages %8ld %8ld %8ld %8ld%%\n", total_pages,
  303. total_pages - pcxl_used_pages, pcxl_used_pages,
  304. (pcxl_used_pages * 100 / total_pages));
  305. #if 0
  306. seq_puts(m, "\nResource bitmap:");
  307. for(; i < (pcxl_res_size / sizeof(u_long)); ++i, ++res_ptr) {
  308. if ((i & 7) == 0)
  309. seq_puts(m,"\n ");
  310. seq_printf(m, "%s %08lx", buf, *res_ptr);
  311. }
  312. #endif
  313. seq_putc(m, '\n');
  314. return 0;
  315. }
  316. static int __init
  317. pcxl_dma_init(void)
  318. {
  319. if (pcxl_dma_start == 0)
  320. return 0;
  321. pcxl_res_size = PCXL_DMA_MAP_SIZE >> (PAGE_SHIFT + 3);
  322. pcxl_res_hint = 0;
  323. pcxl_res_map = (char *)__get_free_pages(GFP_KERNEL,
  324. get_order(pcxl_res_size));
  325. memset(pcxl_res_map, 0, pcxl_res_size);
  326. proc_gsc_root = proc_mkdir("gsc", NULL);
  327. if (!proc_gsc_root)
  328. printk(KERN_WARNING
  329. "pcxl_dma_init: Unable to create gsc /proc dir entry\n");
  330. else {
  331. struct proc_dir_entry* ent;
  332. ent = proc_create_single("pcxl_dma", 0, proc_gsc_root,
  333. proc_pcxl_dma_show);
  334. if (!ent)
  335. printk(KERN_WARNING
  336. "pci-dma.c: Unable to create pcxl_dma /proc entry.\n");
  337. }
  338. return 0;
  339. }
  340. __initcall(pcxl_dma_init);
  341. void *arch_dma_alloc(struct device *dev, size_t size,
  342. dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
  343. {
  344. unsigned long vaddr;
  345. unsigned long paddr;
  346. int order;
  347. if (boot_cpu_data.cpu_type != pcxl2 && boot_cpu_data.cpu_type != pcxl)
  348. return NULL;
  349. order = get_order(size);
  350. size = 1 << (order + PAGE_SHIFT);
  351. vaddr = pcxl_alloc_range(size);
  352. paddr = __get_free_pages(gfp | __GFP_ZERO, order);
  353. flush_kernel_dcache_range(paddr, size);
  354. paddr = __pa(paddr);
  355. map_uncached_pages(vaddr, size, paddr);
  356. *dma_handle = (dma_addr_t) paddr;
  357. #if 0
  358. /* This probably isn't needed to support EISA cards.
  359. ** ISA cards will certainly only support 24-bit DMA addressing.
  360. ** Not clear if we can, want, or need to support ISA.
  361. */
  362. if (!dev || *dev->coherent_dma_mask < 0xffffffff)
  363. gfp |= GFP_DMA;
  364. #endif
  365. return (void *)vaddr;
  366. }
  367. void arch_dma_free(struct device *dev, size_t size, void *vaddr,
  368. dma_addr_t dma_handle, unsigned long attrs)
  369. {
  370. int order = get_order(size);
  371. WARN_ON_ONCE(boot_cpu_data.cpu_type != pcxl2 &&
  372. boot_cpu_data.cpu_type != pcxl);
  373. size = 1 << (order + PAGE_SHIFT);
  374. unmap_uncached_pages((unsigned long)vaddr, size);
  375. pcxl_free_range((unsigned long)vaddr, size);
  376. free_pages((unsigned long)__va(dma_handle), order);
  377. }
  378. void arch_sync_dma_for_device(phys_addr_t paddr, size_t size,
  379. enum dma_data_direction dir)
  380. {
  381. /*
  382. * fdc: The data cache line is written back to memory, if and only if
  383. * it is dirty, and then invalidated from the data cache.
  384. */
  385. flush_kernel_dcache_range((unsigned long)phys_to_virt(paddr), size);
  386. }
  387. void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size,
  388. enum dma_data_direction dir)
  389. {
  390. unsigned long addr = (unsigned long) phys_to_virt(paddr);
  391. switch (dir) {
  392. case DMA_TO_DEVICE:
  393. case DMA_BIDIRECTIONAL:
  394. flush_kernel_dcache_range(addr, size);
  395. return;
  396. case DMA_FROM_DEVICE:
  397. purge_kernel_dcache_range_asm(addr, addr + size);
  398. return;
  399. default:
  400. BUG();
  401. }
  402. }