qcom_glink_memshare.c 2.1 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
  4. */
  5. #include <linux/device.h>
  6. #include <linux/dma-mapping.h>
  7. #include <linux/list.h>
  8. #include <linux/types.h>
  9. #include <linux/slab.h>
  10. struct qcom_glink_mem_entry {
  11. struct device *dev;
  12. void *va;
  13. dma_addr_t dma;
  14. size_t len;
  15. u32 da;
  16. struct list_head node;
  17. };
  18. static DEFINE_SPINLOCK(qcom_glink_mem_entry_lock);
  19. static LIST_HEAD(qcom_glink_mem_entries);
  20. struct qcom_glink_mem_entry *
  21. qcom_glink_mem_entry_init(struct device *dev, void *va, dma_addr_t dma, size_t len, u32 da)
  22. {
  23. struct qcom_glink_mem_entry *mem = NULL;
  24. unsigned long flags;
  25. mem = kzalloc(sizeof(*mem), GFP_KERNEL);
  26. if (!mem)
  27. return mem;
  28. mem->dev = dev;
  29. mem->va = va;
  30. mem->dma = dma;
  31. mem->da = da;
  32. mem->len = len;
  33. INIT_LIST_HEAD(&mem->node);
  34. spin_lock_irqsave(&qcom_glink_mem_entry_lock, flags);
  35. list_add_tail(&mem->node, &qcom_glink_mem_entries);
  36. spin_unlock_irqrestore(&qcom_glink_mem_entry_lock, flags);
  37. return mem;
  38. }
  39. EXPORT_SYMBOL(qcom_glink_mem_entry_init);
  40. void qcom_glink_mem_entry_free(struct qcom_glink_mem_entry *mem)
  41. {
  42. struct qcom_glink_mem_entry *entry, *tmp;
  43. unsigned long flags;
  44. spin_lock_irqsave(&qcom_glink_mem_entry_lock, flags);
  45. list_for_each_entry_safe(entry, tmp, &qcom_glink_mem_entries, node) {
  46. if (entry == mem) {
  47. list_del(&mem->node);
  48. break;
  49. }
  50. }
  51. spin_unlock_irqrestore(&qcom_glink_mem_entry_lock, flags);
  52. kfree(mem);
  53. }
  54. EXPORT_SYMBOL(qcom_glink_mem_entry_free);
  55. void *qcom_glink_prepare_da_for_cpu(u64 da, size_t len)
  56. {
  57. struct qcom_glink_mem_entry *mem;
  58. unsigned long flags;
  59. void *ptr = NULL;
  60. spin_lock_irqsave(&qcom_glink_mem_entry_lock, flags);
  61. list_for_each_entry(mem, &qcom_glink_mem_entries, node) {
  62. int offset = da - mem->da;
  63. if (!mem->va)
  64. continue;
  65. if (offset < 0)
  66. continue;
  67. if (offset + len > mem->len)
  68. continue;
  69. ptr = mem->va + offset;
  70. dma_sync_single_for_cpu(mem->dev, da, len, DMA_FROM_DEVICE);
  71. break;
  72. }
  73. spin_unlock_irqrestore(&qcom_glink_mem_entry_lock, flags);
  74. return ptr;
  75. }
  76. EXPORT_SYMBOL(qcom_glink_prepare_da_for_cpu);