ctvmem.c 5.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (C) 2008, Creative Technology Ltd. All Rights Reserved.
  4. *
  5. * @File ctvmem.c
  6. *
  7. * @Brief
  8. * This file contains the implementation of virtual memory management object
  9. * for card device.
  10. *
  11. * @Author Liu Chun
  12. * @Date Apr 1 2008
  13. */
  14. #include "ctvmem.h"
  15. #include "ctatc.h"
  16. #include <linux/slab.h>
  17. #include <linux/mm.h>
  18. #include <linux/io.h>
  19. #include <sound/pcm.h>
  20. #define CT_PTES_PER_PAGE (CT_PAGE_SIZE / sizeof(void *))
  21. #define CT_ADDRS_PER_PAGE (CT_PTES_PER_PAGE * CT_PAGE_SIZE)
  22. /* *
  23. * Find or create vm block based on requested @size.
  24. * @size must be page aligned.
  25. * */
  26. static struct ct_vm_block *
  27. get_vm_block(struct ct_vm *vm, unsigned int size, struct ct_atc *atc)
  28. {
  29. struct ct_vm_block *block = NULL, *entry;
  30. struct list_head *pos;
  31. size = CT_PAGE_ALIGN(size);
  32. if (size > vm->size) {
  33. dev_err(atc->card->dev,
  34. "Fail! No sufficient device virtual memory space available!\n");
  35. return NULL;
  36. }
  37. mutex_lock(&vm->lock);
  38. list_for_each(pos, &vm->unused) {
  39. entry = list_entry(pos, struct ct_vm_block, list);
  40. if (entry->size >= size)
  41. break; /* found a block that is big enough */
  42. }
  43. if (pos == &vm->unused)
  44. goto out;
  45. if (entry->size == size) {
  46. /* Move the vm node from unused list to used list directly */
  47. list_move(&entry->list, &vm->used);
  48. vm->size -= size;
  49. block = entry;
  50. goto out;
  51. }
  52. block = kzalloc(sizeof(*block), GFP_KERNEL);
  53. if (!block)
  54. goto out;
  55. block->addr = entry->addr;
  56. block->size = size;
  57. list_add(&block->list, &vm->used);
  58. entry->addr += size;
  59. entry->size -= size;
  60. vm->size -= size;
  61. out:
  62. mutex_unlock(&vm->lock);
  63. return block;
  64. }
  65. static void put_vm_block(struct ct_vm *vm, struct ct_vm_block *block)
  66. {
  67. struct ct_vm_block *entry, *pre_ent;
  68. struct list_head *pos, *pre;
  69. block->size = CT_PAGE_ALIGN(block->size);
  70. mutex_lock(&vm->lock);
  71. list_del(&block->list);
  72. vm->size += block->size;
  73. list_for_each(pos, &vm->unused) {
  74. entry = list_entry(pos, struct ct_vm_block, list);
  75. if (entry->addr >= (block->addr + block->size))
  76. break; /* found a position */
  77. }
  78. if (pos == &vm->unused) {
  79. list_add_tail(&block->list, &vm->unused);
  80. entry = block;
  81. } else {
  82. if ((block->addr + block->size) == entry->addr) {
  83. entry->addr = block->addr;
  84. entry->size += block->size;
  85. kfree(block);
  86. } else {
  87. __list_add(&block->list, pos->prev, pos);
  88. entry = block;
  89. }
  90. }
  91. pos = &entry->list;
  92. pre = pos->prev;
  93. while (pre != &vm->unused) {
  94. entry = list_entry(pos, struct ct_vm_block, list);
  95. pre_ent = list_entry(pre, struct ct_vm_block, list);
  96. if ((pre_ent->addr + pre_ent->size) > entry->addr)
  97. break;
  98. pre_ent->size += entry->size;
  99. list_del(pos);
  100. kfree(entry);
  101. pos = pre;
  102. pre = pos->prev;
  103. }
  104. mutex_unlock(&vm->lock);
  105. }
  106. /* Map host addr (kmalloced/vmalloced) to device logical addr. */
  107. static struct ct_vm_block *
  108. ct_vm_map(struct ct_vm *vm, struct snd_pcm_substream *substream, int size)
  109. {
  110. struct ct_vm_block *block;
  111. unsigned int pte_start;
  112. unsigned i, pages;
  113. unsigned long *ptp;
  114. struct ct_atc *atc = snd_pcm_substream_chip(substream);
  115. block = get_vm_block(vm, size, atc);
  116. if (block == NULL) {
  117. dev_err(atc->card->dev,
  118. "No virtual memory block that is big enough to allocate!\n");
  119. return NULL;
  120. }
  121. ptp = (unsigned long *)vm->ptp[0].area;
  122. pte_start = (block->addr >> CT_PAGE_SHIFT);
  123. pages = block->size >> CT_PAGE_SHIFT;
  124. for (i = 0; i < pages; i++) {
  125. unsigned long addr;
  126. addr = snd_pcm_sgbuf_get_addr(substream, i << CT_PAGE_SHIFT);
  127. ptp[pte_start + i] = addr;
  128. }
  129. block->size = size;
  130. return block;
  131. }
  132. static void ct_vm_unmap(struct ct_vm *vm, struct ct_vm_block *block)
  133. {
  134. /* do unmapping */
  135. put_vm_block(vm, block);
  136. }
  137. /* *
  138. * return the host physical addr of the @index-th device
  139. * page table page on success, or ~0UL on failure.
  140. * The first returned ~0UL indicates the termination.
  141. * */
  142. static dma_addr_t
  143. ct_get_ptp_phys(struct ct_vm *vm, int index)
  144. {
  145. return (index >= CT_PTP_NUM) ? ~0UL : vm->ptp[index].addr;
  146. }
  147. int ct_vm_create(struct ct_vm **rvm, struct pci_dev *pci)
  148. {
  149. struct ct_vm *vm;
  150. struct ct_vm_block *block;
  151. int i, err = 0;
  152. *rvm = NULL;
  153. vm = kzalloc(sizeof(*vm), GFP_KERNEL);
  154. if (!vm)
  155. return -ENOMEM;
  156. mutex_init(&vm->lock);
  157. /* Allocate page table pages */
  158. for (i = 0; i < CT_PTP_NUM; i++) {
  159. err = snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV,
  160. &pci->dev,
  161. PAGE_SIZE, &vm->ptp[i]);
  162. if (err < 0)
  163. break;
  164. }
  165. if (err < 0) {
  166. /* no page table pages are allocated */
  167. ct_vm_destroy(vm);
  168. return -ENOMEM;
  169. }
  170. vm->size = CT_ADDRS_PER_PAGE * i;
  171. vm->map = ct_vm_map;
  172. vm->unmap = ct_vm_unmap;
  173. vm->get_ptp_phys = ct_get_ptp_phys;
  174. INIT_LIST_HEAD(&vm->unused);
  175. INIT_LIST_HEAD(&vm->used);
  176. block = kzalloc(sizeof(*block), GFP_KERNEL);
  177. if (NULL != block) {
  178. block->addr = 0;
  179. block->size = vm->size;
  180. list_add(&block->list, &vm->unused);
  181. }
  182. *rvm = vm;
  183. return 0;
  184. }
  185. /* The caller must ensure no mapping pages are being used
  186. * by hardware before calling this function */
  187. void ct_vm_destroy(struct ct_vm *vm)
  188. {
  189. int i;
  190. struct list_head *pos;
  191. struct ct_vm_block *entry;
  192. /* free used and unused list nodes */
  193. while (!list_empty(&vm->used)) {
  194. pos = vm->used.next;
  195. list_del(pos);
  196. entry = list_entry(pos, struct ct_vm_block, list);
  197. kfree(entry);
  198. }
  199. while (!list_empty(&vm->unused)) {
  200. pos = vm->unused.next;
  201. list_del(pos);
  202. entry = list_entry(pos, struct ct_vm_block, list);
  203. kfree(entry);
  204. }
  205. /* free allocated page table pages */
  206. for (i = 0; i < CT_PTP_NUM; i++)
  207. snd_dma_free_pages(&vm->ptp[i]);
  208. vm->size = 0;
  209. kfree(vm);
  210. }