dma-noncoherent.c 3.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * PowerPC version derived from arch/arm/mm/consistent.c
  4. * Copyright (C) 2001 Dan Malek ([email protected])
  5. *
  6. * Copyright (C) 2000 Russell King
  7. */
  8. #include <linux/kernel.h>
  9. #include <linux/errno.h>
  10. #include <linux/types.h>
  11. #include <linux/highmem.h>
  12. #include <linux/dma-direct.h>
  13. #include <linux/dma-map-ops.h>
  14. #include <asm/tlbflush.h>
  15. #include <asm/dma.h>
  16. /*
  17. * make an area consistent.
  18. */
  19. static void __dma_sync(void *vaddr, size_t size, int direction)
  20. {
  21. unsigned long start = (unsigned long)vaddr;
  22. unsigned long end = start + size;
  23. switch (direction) {
  24. case DMA_NONE:
  25. BUG();
  26. case DMA_FROM_DEVICE:
  27. /*
  28. * invalidate only when cache-line aligned otherwise there is
  29. * the potential for discarding uncommitted data from the cache
  30. */
  31. if ((start | end) & (L1_CACHE_BYTES - 1))
  32. flush_dcache_range(start, end);
  33. else
  34. invalidate_dcache_range(start, end);
  35. break;
  36. case DMA_TO_DEVICE: /* writeback only */
  37. clean_dcache_range(start, end);
  38. break;
  39. case DMA_BIDIRECTIONAL: /* writeback and invalidate */
  40. flush_dcache_range(start, end);
  41. break;
  42. }
  43. }
  44. #ifdef CONFIG_HIGHMEM
  45. /*
  46. * __dma_sync_page() implementation for systems using highmem.
  47. * In this case, each page of a buffer must be kmapped/kunmapped
  48. * in order to have a virtual address for __dma_sync(). This must
  49. * not sleep so kmap_atomic()/kunmap_atomic() are used.
  50. *
  51. * Note: yes, it is possible and correct to have a buffer extend
  52. * beyond the first page.
  53. */
  54. static inline void __dma_sync_page_highmem(struct page *page,
  55. unsigned long offset, size_t size, int direction)
  56. {
  57. size_t seg_size = min((size_t)(PAGE_SIZE - offset), size);
  58. size_t cur_size = seg_size;
  59. unsigned long flags, start, seg_offset = offset;
  60. int nr_segs = 1 + ((size - seg_size) + PAGE_SIZE - 1)/PAGE_SIZE;
  61. int seg_nr = 0;
  62. local_irq_save(flags);
  63. do {
  64. start = (unsigned long)kmap_atomic(page + seg_nr) + seg_offset;
  65. /* Sync this buffer segment */
  66. __dma_sync((void *)start, seg_size, direction);
  67. kunmap_atomic((void *)start);
  68. seg_nr++;
  69. /* Calculate next buffer segment size */
  70. seg_size = min((size_t)PAGE_SIZE, size - cur_size);
  71. /* Add the segment size to our running total */
  72. cur_size += seg_size;
  73. seg_offset = 0;
  74. } while (seg_nr < nr_segs);
  75. local_irq_restore(flags);
  76. }
  77. #endif /* CONFIG_HIGHMEM */
  78. /*
  79. * __dma_sync_page makes memory consistent. identical to __dma_sync, but
  80. * takes a struct page instead of a virtual address
  81. */
  82. static void __dma_sync_page(phys_addr_t paddr, size_t size, int dir)
  83. {
  84. struct page *page = pfn_to_page(paddr >> PAGE_SHIFT);
  85. unsigned offset = paddr & ~PAGE_MASK;
  86. #ifdef CONFIG_HIGHMEM
  87. __dma_sync_page_highmem(page, offset, size, dir);
  88. #else
  89. unsigned long start = (unsigned long)page_address(page) + offset;
  90. __dma_sync((void *)start, size, dir);
  91. #endif
  92. }
  93. void arch_sync_dma_for_device(phys_addr_t paddr, size_t size,
  94. enum dma_data_direction dir)
  95. {
  96. __dma_sync_page(paddr, size, dir);
  97. }
  98. void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size,
  99. enum dma_data_direction dir)
  100. {
  101. __dma_sync_page(paddr, size, dir);
  102. }
  103. void arch_dma_prep_coherent(struct page *page, size_t size)
  104. {
  105. unsigned long kaddr = (unsigned long)page_address(page);
  106. flush_dcache_range(kaddr, kaddr + size);
  107. }