etnaviv_iommu.c 4.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Copyright (C) 2014-2018 Etnaviv Project
  4. */
  5. #include <linux/bitops.h>
  6. #include <linux/dma-mapping.h>
  7. #include <linux/platform_device.h>
  8. #include <linux/sizes.h>
  9. #include <linux/slab.h>
  10. #include "etnaviv_gpu.h"
  11. #include "etnaviv_mmu.h"
  12. #include "state_hi.xml.h"
  13. #define PT_SIZE SZ_2M
  14. #define PT_ENTRIES (PT_SIZE / sizeof(u32))
  15. #define GPU_MEM_START 0x80000000
  16. struct etnaviv_iommuv1_context {
  17. struct etnaviv_iommu_context base;
  18. u32 *pgtable_cpu;
  19. dma_addr_t pgtable_dma;
  20. };
  21. static struct etnaviv_iommuv1_context *
  22. to_v1_context(struct etnaviv_iommu_context *context)
  23. {
  24. return container_of(context, struct etnaviv_iommuv1_context, base);
  25. }
  26. static void etnaviv_iommuv1_free(struct etnaviv_iommu_context *context)
  27. {
  28. struct etnaviv_iommuv1_context *v1_context = to_v1_context(context);
  29. drm_mm_takedown(&context->mm);
  30. dma_free_wc(context->global->dev, PT_SIZE, v1_context->pgtable_cpu,
  31. v1_context->pgtable_dma);
  32. context->global->v1.shared_context = NULL;
  33. kfree(v1_context);
  34. }
  35. static int etnaviv_iommuv1_map(struct etnaviv_iommu_context *context,
  36. unsigned long iova, phys_addr_t paddr,
  37. size_t size, int prot)
  38. {
  39. struct etnaviv_iommuv1_context *v1_context = to_v1_context(context);
  40. unsigned int index = (iova - GPU_MEM_START) / SZ_4K;
  41. if (size != SZ_4K)
  42. return -EINVAL;
  43. v1_context->pgtable_cpu[index] = paddr;
  44. return 0;
  45. }
  46. static size_t etnaviv_iommuv1_unmap(struct etnaviv_iommu_context *context,
  47. unsigned long iova, size_t size)
  48. {
  49. struct etnaviv_iommuv1_context *v1_context = to_v1_context(context);
  50. unsigned int index = (iova - GPU_MEM_START) / SZ_4K;
  51. if (size != SZ_4K)
  52. return -EINVAL;
  53. v1_context->pgtable_cpu[index] = context->global->bad_page_dma;
  54. return SZ_4K;
  55. }
  56. static size_t etnaviv_iommuv1_dump_size(struct etnaviv_iommu_context *context)
  57. {
  58. return PT_SIZE;
  59. }
  60. static void etnaviv_iommuv1_dump(struct etnaviv_iommu_context *context,
  61. void *buf)
  62. {
  63. struct etnaviv_iommuv1_context *v1_context = to_v1_context(context);
  64. memcpy(buf, v1_context->pgtable_cpu, PT_SIZE);
  65. }
  66. static void etnaviv_iommuv1_restore(struct etnaviv_gpu *gpu,
  67. struct etnaviv_iommu_context *context)
  68. {
  69. struct etnaviv_iommuv1_context *v1_context = to_v1_context(context);
  70. u32 pgtable;
  71. if (gpu->mmu_context)
  72. etnaviv_iommu_context_put(gpu->mmu_context);
  73. gpu->mmu_context = etnaviv_iommu_context_get(context);
  74. /* set base addresses */
  75. gpu_write(gpu, VIVS_MC_MEMORY_BASE_ADDR_RA, context->global->memory_base);
  76. gpu_write(gpu, VIVS_MC_MEMORY_BASE_ADDR_FE, context->global->memory_base);
  77. gpu_write(gpu, VIVS_MC_MEMORY_BASE_ADDR_TX, context->global->memory_base);
  78. gpu_write(gpu, VIVS_MC_MEMORY_BASE_ADDR_PEZ, context->global->memory_base);
  79. gpu_write(gpu, VIVS_MC_MEMORY_BASE_ADDR_PE, context->global->memory_base);
  80. /* set page table address in MC */
  81. pgtable = (u32)v1_context->pgtable_dma;
  82. gpu_write(gpu, VIVS_MC_MMU_FE_PAGE_TABLE, pgtable);
  83. gpu_write(gpu, VIVS_MC_MMU_TX_PAGE_TABLE, pgtable);
  84. gpu_write(gpu, VIVS_MC_MMU_PE_PAGE_TABLE, pgtable);
  85. gpu_write(gpu, VIVS_MC_MMU_PEZ_PAGE_TABLE, pgtable);
  86. gpu_write(gpu, VIVS_MC_MMU_RA_PAGE_TABLE, pgtable);
  87. }
  88. const struct etnaviv_iommu_ops etnaviv_iommuv1_ops = {
  89. .free = etnaviv_iommuv1_free,
  90. .map = etnaviv_iommuv1_map,
  91. .unmap = etnaviv_iommuv1_unmap,
  92. .dump_size = etnaviv_iommuv1_dump_size,
  93. .dump = etnaviv_iommuv1_dump,
  94. .restore = etnaviv_iommuv1_restore,
  95. };
  96. struct etnaviv_iommu_context *
  97. etnaviv_iommuv1_context_alloc(struct etnaviv_iommu_global *global)
  98. {
  99. struct etnaviv_iommuv1_context *v1_context;
  100. struct etnaviv_iommu_context *context;
  101. mutex_lock(&global->lock);
  102. /*
  103. * MMUv1 does not support switching between different contexts without
  104. * a stop the world operation, so we only support a single shared
  105. * context with this version.
  106. */
  107. if (global->v1.shared_context) {
  108. context = global->v1.shared_context;
  109. etnaviv_iommu_context_get(context);
  110. mutex_unlock(&global->lock);
  111. return context;
  112. }
  113. v1_context = kzalloc(sizeof(*v1_context), GFP_KERNEL);
  114. if (!v1_context) {
  115. mutex_unlock(&global->lock);
  116. return NULL;
  117. }
  118. v1_context->pgtable_cpu = dma_alloc_wc(global->dev, PT_SIZE,
  119. &v1_context->pgtable_dma,
  120. GFP_KERNEL);
  121. if (!v1_context->pgtable_cpu)
  122. goto out_free;
  123. memset32(v1_context->pgtable_cpu, global->bad_page_dma, PT_ENTRIES);
  124. context = &v1_context->base;
  125. context->global = global;
  126. kref_init(&context->refcount);
  127. mutex_init(&context->lock);
  128. INIT_LIST_HEAD(&context->mappings);
  129. drm_mm_init(&context->mm, GPU_MEM_START, PT_ENTRIES * SZ_4K);
  130. context->global->v1.shared_context = context;
  131. mutex_unlock(&global->lock);
  132. return context;
  133. out_free:
  134. mutex_unlock(&global->lock);
  135. kfree(v1_context);
  136. return NULL;
  137. }