12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784 |
- // SPDX-License-Identifier: GPL-2.0-only
- /*
- * Copyright (c) 2016-2021, The Linux Foundation. All rights reserved.
- */
- #include <linux/module.h>
- #include <linux/types.h>
- #include <linux/mutex.h>
- #include <linux/slab.h>
- #include <linux/dma-buf.h>
- #include <linux/version.h>
- #include <linux/debugfs.h>
- #if IS_REACHABLE(CONFIG_DMABUF_HEAPS)
- #include <linux/mem-buf.h>
- #include <soc/qcom/secure_buffer.h>
- #endif
- #include "cam_compat.h"
- #include "cam_req_mgr_util.h"
- #include "cam_mem_mgr.h"
- #include "cam_smmu_api.h"
- #include "cam_debug_util.h"
- #include "cam_trace.h"
- #include "cam_common_util.h"
- #define CAM_MEM_SHARED_BUFFER_PAD_4K (4 * 1024)
- static struct cam_mem_table tbl;
- static atomic_t cam_mem_mgr_state = ATOMIC_INIT(CAM_MEM_MGR_UNINITIALIZED);
- #if IS_REACHABLE(CONFIG_DMABUF_HEAPS)
- static void cam_mem_mgr_put_dma_heaps(void);
- static int cam_mem_mgr_get_dma_heaps(void);
- #endif
- static void cam_mem_mgr_print_tbl(void)
- {
- int i;
- uint64_t ms, tmp, hrs, min, sec;
- struct timespec64 *ts = NULL;
- struct timespec64 current_ts;
- ktime_get_real_ts64(&(current_ts));
- tmp = current_ts.tv_sec;
- ms = (current_ts.tv_nsec) / 1000000;
- sec = do_div(tmp, 60);
- min = do_div(tmp, 60);
- hrs = do_div(tmp, 24);
- CAM_INFO(CAM_MEM, "***%llu:%llu:%llu:%llu Mem mgr table dump***",
- hrs, min, sec, ms);
- for (i = 1; i < CAM_MEM_BUFQ_MAX; i++) {
- if (tbl.bufq[i].active) {
- ts = &tbl.bufq[i].timestamp;
- tmp = ts->tv_sec;
- ms = (ts->tv_nsec) / 1000000;
- sec = do_div(tmp, 60);
- min = do_div(tmp, 60);
- hrs = do_div(tmp, 24);
- CAM_INFO(CAM_MEM,
- "%llu:%llu:%llu:%llu idx %d fd %d size %llu",
- hrs, min, sec, ms, i, tbl.bufq[i].fd,
- tbl.bufq[i].len);
- }
- }
- }
- static int cam_mem_util_get_dma_dir(uint32_t flags)
- {
- int rc = -EINVAL;
- if (flags & CAM_MEM_FLAG_HW_READ_ONLY)
- rc = DMA_TO_DEVICE;
- else if (flags & CAM_MEM_FLAG_HW_WRITE_ONLY)
- rc = DMA_FROM_DEVICE;
- else if (flags & CAM_MEM_FLAG_HW_READ_WRITE)
- rc = DMA_BIDIRECTIONAL;
- else if (flags & CAM_MEM_FLAG_PROTECTED_MODE)
- rc = DMA_BIDIRECTIONAL;
- return rc;
- }
- static int cam_mem_util_map_cpu_va(struct dma_buf *dmabuf,
- uintptr_t *vaddr,
- size_t *len)
- {
- int rc = 0;
- void *addr;
- /*
- * dma_buf_begin_cpu_access() and dma_buf_end_cpu_access()
- * need to be called in pair to avoid stability issue.
- */
- rc = dma_buf_begin_cpu_access(dmabuf, DMA_BIDIRECTIONAL);
- if (rc) {
- CAM_ERR(CAM_MEM, "dma begin access failed rc=%d", rc);
- return rc;
- }
- addr = dma_buf_vmap(dmabuf);
- if (!addr) {
- CAM_ERR(CAM_MEM, "kernel map fail");
- *vaddr = 0;
- *len = 0;
- rc = -ENOSPC;
- goto fail;
- }
- *vaddr = (uint64_t)addr;
- *len = dmabuf->size;
- return 0;
- fail:
- dma_buf_end_cpu_access(dmabuf, DMA_BIDIRECTIONAL);
- return rc;
- }
- static int cam_mem_util_unmap_cpu_va(struct dma_buf *dmabuf,
- uint64_t vaddr)
- {
- int rc = 0;
- if (!dmabuf || !vaddr) {
- CAM_ERR(CAM_MEM, "Invalid input args %pK %llX", dmabuf, vaddr);
- return -EINVAL;
- }
- dma_buf_vunmap(dmabuf, (void *)vaddr);
- /*
- * dma_buf_begin_cpu_access() and
- * dma_buf_end_cpu_access() need to be called in pair
- * to avoid stability issue.
- */
- rc = dma_buf_end_cpu_access(dmabuf, DMA_BIDIRECTIONAL);
- if (rc) {
- CAM_ERR(CAM_MEM, "Failed in end cpu access, dmabuf=%pK",
- dmabuf);
- return rc;
- }
- return rc;
- }
- static int cam_mem_mgr_create_debug_fs(void)
- {
- int rc = 0;
- struct dentry *dbgfileptr = NULL;
- dbgfileptr = debugfs_create_dir("camera_memmgr", NULL);
- if (!dbgfileptr) {
- CAM_ERR(CAM_MEM,"DebugFS could not create directory!");
- rc = -ENOENT;
- goto end;
- }
- /* Store parent inode for cleanup in caller */
- tbl.dentry = dbgfileptr;
- dbgfileptr = debugfs_create_bool("alloc_profile_enable", 0644,
- tbl.dentry, &tbl.alloc_profile_enable);
- if (IS_ERR(dbgfileptr)) {
- if (PTR_ERR(dbgfileptr) == -ENODEV)
- CAM_WARN(CAM_MEM, "DebugFS not enabled in kernel!");
- else
- rc = PTR_ERR(dbgfileptr);
- }
- end:
- return rc;
- }
- int cam_mem_mgr_init(void)
- {
- int i;
- int bitmap_size;
- int rc = 0;
- memset(tbl.bufq, 0, sizeof(tbl.bufq));
- if (cam_smmu_need_force_alloc_cached(&tbl.force_cache_allocs)) {
- CAM_ERR(CAM_MEM, "Error in getting force cache alloc flag");
- return -EINVAL;
- }
- tbl.need_shared_buffer_padding = cam_smmu_need_shared_buffer_padding();
- #if IS_REACHABLE(CONFIG_DMABUF_HEAPS)
- rc = cam_mem_mgr_get_dma_heaps();
- if (rc) {
- CAM_ERR(CAM_MEM, "Failed in getting dma heaps rc=%d", rc);
- return rc;
- }
- #endif
- bitmap_size = BITS_TO_LONGS(CAM_MEM_BUFQ_MAX) * sizeof(long);
- tbl.bitmap = kzalloc(bitmap_size, GFP_KERNEL);
- if (!tbl.bitmap) {
- rc = -ENOMEM;
- goto put_heaps;
- }
- tbl.bits = bitmap_size * BITS_PER_BYTE;
- bitmap_zero(tbl.bitmap, tbl.bits);
- /* We need to reserve slot 0 because 0 is invalid */
- set_bit(0, tbl.bitmap);
- for (i = 1; i < CAM_MEM_BUFQ_MAX; i++) {
- tbl.bufq[i].fd = -1;
- tbl.bufq[i].buf_handle = -1;
- }
- mutex_init(&tbl.m_lock);
- atomic_set(&cam_mem_mgr_state, CAM_MEM_MGR_INITIALIZED);
- cam_mem_mgr_create_debug_fs();
- return 0;
- put_heaps:
- #if IS_REACHABLE(CONFIG_DMABUF_HEAPS)
- cam_mem_mgr_put_dma_heaps();
- #endif
- return rc;
- }
- static int32_t cam_mem_get_slot(void)
- {
- int32_t idx;
- mutex_lock(&tbl.m_lock);
- idx = find_first_zero_bit(tbl.bitmap, tbl.bits);
- if (idx >= CAM_MEM_BUFQ_MAX || idx <= 0) {
- mutex_unlock(&tbl.m_lock);
- return -ENOMEM;
- }
- set_bit(idx, tbl.bitmap);
- tbl.bufq[idx].active = true;
- ktime_get_real_ts64(&(tbl.bufq[idx].timestamp));
- mutex_init(&tbl.bufq[idx].q_lock);
- mutex_unlock(&tbl.m_lock);
- return idx;
- }
- static void cam_mem_put_slot(int32_t idx)
- {
- mutex_lock(&tbl.m_lock);
- mutex_lock(&tbl.bufq[idx].q_lock);
- tbl.bufq[idx].active = false;
- tbl.bufq[idx].is_internal = false;
- memset(&tbl.bufq[idx].timestamp, 0, sizeof(struct timespec64));
- mutex_unlock(&tbl.bufq[idx].q_lock);
- mutex_destroy(&tbl.bufq[idx].q_lock);
- clear_bit(idx, tbl.bitmap);
- mutex_unlock(&tbl.m_lock);
- }
- int cam_mem_get_io_buf(int32_t buf_handle, int32_t mmu_handle,
- dma_addr_t *iova_ptr, size_t *len_ptr)
- {
- int rc = 0, idx;
- *len_ptr = 0;
- if (!atomic_read(&cam_mem_mgr_state)) {
- CAM_ERR(CAM_MEM, "failed. mem_mgr not initialized");
- return -EINVAL;
- }
- idx = CAM_MEM_MGR_GET_HDL_IDX(buf_handle);
- if (idx >= CAM_MEM_BUFQ_MAX || idx <= 0)
- return -ENOENT;
- if (!tbl.bufq[idx].active) {
- CAM_ERR(CAM_MEM, "Buffer at idx=%d is already unmapped,",
- idx);
- return -EAGAIN;
- }
- mutex_lock(&tbl.bufq[idx].q_lock);
- if (buf_handle != tbl.bufq[idx].buf_handle) {
- rc = -EINVAL;
- goto handle_mismatch;
- }
- if (CAM_MEM_MGR_IS_SECURE_HDL(buf_handle))
- rc = cam_smmu_get_stage2_iova(mmu_handle,
- tbl.bufq[idx].fd,
- iova_ptr,
- len_ptr);
- else
- rc = cam_smmu_get_iova(mmu_handle,
- tbl.bufq[idx].fd,
- iova_ptr,
- len_ptr);
- if (rc) {
- CAM_ERR(CAM_MEM,
- "fail to map buf_hdl:0x%x, mmu_hdl: 0x%x for fd:%d",
- buf_handle, mmu_handle, tbl.bufq[idx].fd);
- goto handle_mismatch;
- }
- CAM_DBG(CAM_MEM,
- "handle:0x%x fd:%d iova_ptr:%pK len_ptr:%llu",
- mmu_handle, tbl.bufq[idx].fd, iova_ptr, *len_ptr);
- handle_mismatch:
- mutex_unlock(&tbl.bufq[idx].q_lock);
- return rc;
- }
- EXPORT_SYMBOL(cam_mem_get_io_buf);
- int cam_mem_get_cpu_buf(int32_t buf_handle, uintptr_t *vaddr_ptr, size_t *len)
- {
- int idx;
- if (!atomic_read(&cam_mem_mgr_state)) {
- CAM_ERR(CAM_MEM, "failed. mem_mgr not initialized");
- return -EINVAL;
- }
- if (!atomic_read(&cam_mem_mgr_state)) {
- CAM_ERR(CAM_MEM, "failed. mem_mgr not initialized");
- return -EINVAL;
- }
- if (!buf_handle || !vaddr_ptr || !len)
- return -EINVAL;
- idx = CAM_MEM_MGR_GET_HDL_IDX(buf_handle);
- if (idx >= CAM_MEM_BUFQ_MAX || idx <= 0)
- return -EINVAL;
- if (!tbl.bufq[idx].active) {
- CAM_ERR(CAM_MEM, "Buffer at idx=%d is already unmapped,",
- idx);
- return -EPERM;
- }
- if (buf_handle != tbl.bufq[idx].buf_handle)
- return -EINVAL;
- if (!(tbl.bufq[idx].flags & CAM_MEM_FLAG_KMD_ACCESS))
- return -EINVAL;
- if (tbl.bufq[idx].kmdvaddr) {
- *vaddr_ptr = tbl.bufq[idx].kmdvaddr;
- *len = tbl.bufq[idx].len;
- } else {
- CAM_ERR(CAM_MEM, "No KMD access was requested for 0x%x handle",
- buf_handle);
- return -EINVAL;
- }
- return 0;
- }
- EXPORT_SYMBOL(cam_mem_get_cpu_buf);
- int cam_mem_mgr_cache_ops(struct cam_mem_cache_ops_cmd *cmd)
- {
- int rc = 0, idx;
- uint32_t cache_dir;
- unsigned long dmabuf_flag = 0;
- if (!atomic_read(&cam_mem_mgr_state)) {
- CAM_ERR(CAM_MEM, "failed. mem_mgr not initialized");
- return -EINVAL;
- }
- if (!cmd)
- return -EINVAL;
- idx = CAM_MEM_MGR_GET_HDL_IDX(cmd->buf_handle);
- if (idx >= CAM_MEM_BUFQ_MAX || idx <= 0)
- return -EINVAL;
- mutex_lock(&tbl.bufq[idx].q_lock);
- if (!tbl.bufq[idx].active) {
- CAM_ERR(CAM_MEM, "Buffer at idx=%d is already unmapped,",
- idx);
- rc = -EINVAL;
- goto end;
- }
- if (cmd->buf_handle != tbl.bufq[idx].buf_handle) {
- rc = -EINVAL;
- goto end;
- }
- rc = dma_buf_get_flags(tbl.bufq[idx].dma_buf, &dmabuf_flag);
- if (rc) {
- CAM_ERR(CAM_MEM, "cache get flags failed %d", rc);
- goto end;
- }
- #if IS_REACHABLE(CONFIG_DMABUF_HEAPS)
- CAM_DBG(CAM_MEM, "Calling dmap buf APIs for cache operations");
- cache_dir = DMA_BIDIRECTIONAL;
- #else
- if (dmabuf_flag & ION_FLAG_CACHED) {
- switch (cmd->mem_cache_ops) {
- case CAM_MEM_CLEAN_CACHE:
- cache_dir = DMA_TO_DEVICE;
- break;
- case CAM_MEM_INV_CACHE:
- cache_dir = DMA_FROM_DEVICE;
- break;
- case CAM_MEM_CLEAN_INV_CACHE:
- cache_dir = DMA_BIDIRECTIONAL;
- break;
- default:
- CAM_ERR(CAM_MEM,
- "invalid cache ops :%d", cmd->mem_cache_ops);
- rc = -EINVAL;
- goto end;
- }
- } else {
- CAM_DBG(CAM_MEM, "BUF is not cached");
- goto end;
- }
- #endif
- rc = dma_buf_begin_cpu_access(tbl.bufq[idx].dma_buf,
- (cmd->mem_cache_ops == CAM_MEM_CLEAN_INV_CACHE) ?
- DMA_BIDIRECTIONAL : DMA_TO_DEVICE);
- if (rc) {
- CAM_ERR(CAM_MEM, "dma begin access failed rc=%d", rc);
- goto end;
- }
- rc = dma_buf_end_cpu_access(tbl.bufq[idx].dma_buf,
- cache_dir);
- if (rc) {
- CAM_ERR(CAM_MEM, "dma end access failed rc=%d", rc);
- goto end;
- }
- end:
- mutex_unlock(&tbl.bufq[idx].q_lock);
- return rc;
- }
- EXPORT_SYMBOL(cam_mem_mgr_cache_ops);
- #if IS_REACHABLE(CONFIG_DMABUF_HEAPS)
- #define CAM_MAX_VMIDS 4
- static void cam_mem_mgr_put_dma_heaps(void)
- {
- CAM_DBG(CAM_MEM, "Releasing DMA Buf heaps usage");
- }
- static int cam_mem_mgr_get_dma_heaps(void)
- {
- int rc = 0;
- tbl.system_heap = NULL;
- tbl.system_uncached_heap = NULL;
- tbl.camera_heap = NULL;
- tbl.camera_uncached_heap = NULL;
- tbl.secure_display_heap = NULL;
- tbl.system_heap = dma_heap_find("qcom,system");
- if (IS_ERR_OR_NULL(tbl.system_heap)) {
- rc = PTR_ERR(tbl.system_heap);
- CAM_ERR(CAM_MEM, "qcom system heap not found, rc=%d", rc);
- tbl.system_heap = NULL;
- goto put_heaps;
- }
- tbl.system_uncached_heap = dma_heap_find("qcom,system-uncached");
- if (IS_ERR_OR_NULL(tbl.system_uncached_heap)) {
- if (tbl.force_cache_allocs) {
- /* optional, we anyway do not use uncached */
- CAM_DBG(CAM_MEM,
- "qcom system-uncached heap not found, err=%d",
- PTR_ERR(tbl.system_uncached_heap));
- tbl.system_uncached_heap = NULL;
- } else {
- /* fatal, must need uncached heaps */
- rc = PTR_ERR(tbl.system_uncached_heap);
- CAM_ERR(CAM_MEM,
- "qcom system-uncached heap not found, rc=%d",
- rc);
- tbl.system_uncached_heap = NULL;
- goto put_heaps;
- }
- }
- tbl.secure_display_heap = dma_heap_find("qcom,display");
- if (IS_ERR_OR_NULL(tbl.secure_display_heap)) {
- rc = PTR_ERR(tbl.secure_display_heap);
- CAM_ERR(CAM_MEM, "qcom,display heap not found, rc=%d",
- rc);
- tbl.secure_display_heap = NULL;
- goto put_heaps;
- }
- tbl.camera_heap = dma_heap_find("qcom,camera");
- if (IS_ERR_OR_NULL(tbl.camera_heap)) {
- /* optional heap, not a fatal error */
- CAM_DBG(CAM_MEM, "qcom camera heap not found, err=%d",
- PTR_ERR(tbl.camera_heap));
- tbl.camera_heap = NULL;
- }
- tbl.camera_uncached_heap = dma_heap_find("qcom,camera-uncached");
- if (IS_ERR_OR_NULL(tbl.camera_uncached_heap)) {
- /* optional heap, not a fatal error */
- CAM_DBG(CAM_MEM, "qcom camera heap not found, err=%d",
- PTR_ERR(tbl.camera_uncached_heap));
- tbl.camera_uncached_heap = NULL;
- }
- CAM_INFO(CAM_MEM,
- "Heaps : system=%pK, system_uncached=%pK, camera=%pK, camera-uncached=%pK, secure_display=%pK",
- tbl.system_heap, tbl.system_uncached_heap,
- tbl.camera_heap, tbl.camera_uncached_heap,
- tbl.secure_display_heap);
- return 0;
- put_heaps:
- cam_mem_mgr_put_dma_heaps();
- return rc;
- }
- static int cam_mem_util_get_dma_buf(size_t len,
- unsigned int cam_flags,
- struct dma_buf **buf)
- {
- int rc = 0;
- struct dma_heap *heap;
- struct dma_heap *try_heap = NULL;
- struct timespec64 ts1, ts2;
- long microsec = 0;
- bool use_cached_heap = false;
- struct mem_buf_lend_kernel_arg arg;
- int vmids[CAM_MAX_VMIDS];
- int perms[CAM_MAX_VMIDS];
- int num_vmids = 0;
- if (!buf) {
- CAM_ERR(CAM_MEM, "Invalid params");
- return -EINVAL;
- }
- if (tbl.alloc_profile_enable)
- CAM_GET_TIMESTAMP(ts1);
- if ((cam_flags & CAM_MEM_FLAG_CACHE) ||
- (tbl.force_cache_allocs &&
- (!(cam_flags & CAM_MEM_FLAG_PROTECTED_MODE)))) {
- CAM_DBG(CAM_MEM,
- "Using CACHED heap, cam_flags=0x%x, force_cache_allocs=%d",
- cam_flags, tbl.force_cache_allocs);
- use_cached_heap = true;
- } else if (cam_flags & CAM_MEM_FLAG_PROTECTED_MODE) {
- use_cached_heap = true;
- CAM_DBG(CAM_MEM,
- "Using CACHED heap for secure, cam_flags=0x%x, force_cache_allocs=%d",
- cam_flags, tbl.force_cache_allocs);
- } else {
- use_cached_heap = false;
- CAM_ERR(CAM_MEM,
- "Using UNCACHED heap not supported, cam_flags=0x%x, force_cache_allocs=%d",
- cam_flags, tbl.force_cache_allocs);
- /*
- * Need a better handling based on whether dma-buf-heaps support
- * uncached heaps or not. For now, assume not supported.
- */
- return -EINVAL;
- }
- if (cam_flags & CAM_MEM_FLAG_PROTECTED_MODE) {
- heap = tbl.secure_display_heap;
- vmids[num_vmids] = VMID_CP_CAMERA;
- perms[num_vmids] = PERM_READ | PERM_WRITE;
- num_vmids++;
- if (cam_flags & CAM_MEM_FLAG_CDSP_OUTPUT) {
- CAM_DBG(CAM_MEM, "Secure mode CDSP flags");
- vmids[num_vmids] = VMID_CP_CDSP;
- perms[num_vmids] = PERM_READ | PERM_WRITE;
- num_vmids++;
- }
- } else if (use_cached_heap) {
- try_heap = tbl.camera_heap;
- heap = tbl.system_heap;
- } else {
- try_heap = tbl.camera_uncached_heap;
- heap = tbl.system_uncached_heap;
- }
- CAM_DBG(CAM_MEM, "Using heaps : try=%pK, heap=%pK", try_heap, heap);
- *buf = NULL;
- if (!try_heap && !heap) {
- CAM_ERR(CAM_MEM,
- "No heap available for allocation, cant allocate");
- return -EINVAL;
- }
- if (try_heap) {
- *buf = dma_heap_buffer_alloc(try_heap, len, O_RDWR, 0);
- if (IS_ERR(*buf)) {
- CAM_WARN(CAM_MEM,
- "Failed in allocating from try heap, heap=%pK, len=%zu, err=%d",
- try_heap, len, PTR_ERR(*buf));
- *buf = NULL;
- }
- }
- if (*buf == NULL) {
- *buf = dma_heap_buffer_alloc(heap, len, O_RDWR, 0);
- if (IS_ERR(*buf)) {
- rc = PTR_ERR(*buf);
- CAM_ERR(CAM_MEM,
- "Failed in allocating from heap, heap=%pK, len=%zu, err=%d",
- heap, len, rc);
- *buf = NULL;
- return rc;
- }
- }
- if (cam_flags & CAM_MEM_FLAG_PROTECTED_MODE) {
- if (num_vmids >= CAM_MAX_VMIDS) {
- CAM_ERR(CAM_MEM, "Insufficient array size for vmids %d", num_vmids);
- rc = -EINVAL;
- goto end;
- }
- arg.nr_acl_entries = num_vmids;
- arg.vmids = vmids;
- arg.perms = perms;
- rc = mem_buf_lend(*buf, &arg);
- if (rc) {
- CAM_ERR(CAM_MEM,
- "Failed in buf lend rc=%d, buf=%pK, vmids [0]=0x%x, [1]=0x%x, [2]=0x%x",
- rc, *buf, vmids[0], vmids[1], vmids[2]);
- goto end;
- }
- }
- CAM_DBG(CAM_MEM, "Allocate success, len=%zu, *buf=%pK", len, *buf);
- if (tbl.alloc_profile_enable) {
- CAM_GET_TIMESTAMP(ts2);
- CAM_GET_TIMESTAMP_DIFF_IN_MICRO(ts1, ts2, microsec);
- trace_cam_log_event("IONAllocProfile", "size and time in micro",
- len, microsec);
- }
- return rc;
- end:
- dma_buf_put(*buf);
- return rc;
- }
- #else
- static int cam_mem_util_get_dma_buf(size_t len,
- unsigned int cam_flags,
- struct dma_buf **buf)
- {
- int rc = 0;
- unsigned int heap_id;
- int32_t ion_flag = 0;
- struct timespec64 ts1, ts2;
- long microsec = 0;
- if (!buf) {
- CAM_ERR(CAM_MEM, "Invalid params");
- return -EINVAL;
- }
- if (tbl.alloc_profile_enable)
- CAM_GET_TIMESTAMP(ts1);
- if ((cam_flags & CAM_MEM_FLAG_PROTECTED_MODE) &&
- (cam_flags & CAM_MEM_FLAG_CDSP_OUTPUT)) {
- heap_id = ION_HEAP(ION_SECURE_DISPLAY_HEAP_ID);
- ion_flag |=
- ION_FLAG_SECURE | ION_FLAG_CP_CAMERA | ION_FLAG_CP_CDSP;
- } else if (cam_flags & CAM_MEM_FLAG_PROTECTED_MODE) {
- heap_id = ION_HEAP(ION_SECURE_DISPLAY_HEAP_ID);
- ion_flag |= ION_FLAG_SECURE | ION_FLAG_CP_CAMERA;
- } else {
- heap_id = ION_HEAP(ION_SYSTEM_HEAP_ID) |
- ION_HEAP(ION_CAMERA_HEAP_ID);
- }
- if (cam_flags & CAM_MEM_FLAG_CACHE)
- ion_flag |= ION_FLAG_CACHED;
- else
- ion_flag &= ~ION_FLAG_CACHED;
- if (tbl.force_cache_allocs && (!(ion_flag & ION_FLAG_SECURE)))
- ion_flag |= ION_FLAG_CACHED;
- *buf = ion_alloc(len, heap_id, ion_flag);
- if (IS_ERR_OR_NULL(*buf))
- return -ENOMEM;
- if (tbl.alloc_profile_enable) {
- CAM_GET_TIMESTAMP(ts2);
- CAM_GET_TIMESTAMP_DIFF_IN_MICRO(ts1, ts2, microsec);
- trace_cam_log_event("IONAllocProfile", "size and time in micro",
- len, microsec);
- }
- return rc;
- }
- #endif
- static int cam_mem_util_buffer_alloc(size_t len, uint32_t flags,
- struct dma_buf **dmabuf,
- int *fd)
- {
- int rc;
- struct dma_buf *temp_dmabuf = NULL;
- rc = cam_mem_util_get_dma_buf(len, flags, dmabuf);
- if (rc) {
- CAM_ERR(CAM_MEM,
- "Error allocating dma buf : len=%llu, flags=0x%x",
- len, flags);
- return rc;
- }
- *fd = dma_buf_fd(*dmabuf, O_CLOEXEC);
- if (*fd < 0) {
- CAM_ERR(CAM_MEM, "get fd fail, *fd=%d", *fd);
- rc = -EINVAL;
- goto put_buf;
- }
- CAM_DBG(CAM_MEM, "Alloc success : len=%zu, *dmabuf=%pK, fd=%d",
- len, *dmabuf, *fd);
- /*
- * increment the ref count so that ref count becomes 2 here
- * when we close fd, refcount becomes 1 and when we do
- * dmap_put_buf, ref count becomes 0 and memory will be freed.
- */
- temp_dmabuf = dma_buf_get(*fd);
- if (IS_ERR_OR_NULL(temp_dmabuf)) {
- CAM_ERR(CAM_MEM, "dma_buf_get failed, *fd=%d", *fd);
- rc = -EINVAL;
- goto put_buf;
- }
- return rc;
- put_buf:
- dma_buf_put(*dmabuf);
- return rc;
- }
- static int cam_mem_util_check_alloc_flags(struct cam_mem_mgr_alloc_cmd *cmd)
- {
- if (cmd->num_hdl > CAM_MEM_MMU_MAX_HANDLE) {
- CAM_ERR(CAM_MEM, "Num of mmu hdl exceeded maximum(%d)",
- CAM_MEM_MMU_MAX_HANDLE);
- return -EINVAL;
- }
- if (cmd->flags & CAM_MEM_FLAG_PROTECTED_MODE &&
- cmd->flags & CAM_MEM_FLAG_KMD_ACCESS) {
- CAM_ERR(CAM_MEM, "Kernel mapping in secure mode not allowed");
- return -EINVAL;
- }
- return 0;
- }
- static int cam_mem_util_check_map_flags(struct cam_mem_mgr_map_cmd *cmd)
- {
- if (!cmd->flags) {
- CAM_ERR(CAM_MEM, "Invalid flags");
- return -EINVAL;
- }
- if (cmd->num_hdl > CAM_MEM_MMU_MAX_HANDLE) {
- CAM_ERR(CAM_MEM, "Num of mmu hdl %d exceeded maximum(%d)",
- cmd->num_hdl, CAM_MEM_MMU_MAX_HANDLE);
- return -EINVAL;
- }
- if (cmd->flags & CAM_MEM_FLAG_PROTECTED_MODE &&
- cmd->flags & CAM_MEM_FLAG_KMD_ACCESS) {
- CAM_ERR(CAM_MEM,
- "Kernel mapping in secure mode not allowed, flags=0x%x",
- cmd->flags);
- return -EINVAL;
- }
- if (cmd->flags & CAM_MEM_FLAG_HW_SHARED_ACCESS) {
- CAM_ERR(CAM_MEM,
- "Shared memory buffers are not allowed to be mapped");
- return -EINVAL;
- }
- return 0;
- }
- static int cam_mem_util_map_hw_va(uint32_t flags,
- int32_t *mmu_hdls,
- int32_t num_hdls,
- int fd,
- dma_addr_t *hw_vaddr,
- size_t *len,
- enum cam_smmu_region_id region,
- bool is_internal)
- {
- int i;
- int rc = -1;
- int dir = cam_mem_util_get_dma_dir(flags);
- bool dis_delayed_unmap = false;
- if (dir < 0) {
- CAM_ERR(CAM_MEM, "fail to map DMA direction, dir=%d", dir);
- return dir;
- }
- if (flags & CAM_MEM_FLAG_DISABLE_DELAYED_UNMAP)
- dis_delayed_unmap = true;
- CAM_DBG(CAM_MEM,
- "map_hw_va : fd = %d, flags = 0x%x, dir=%d, num_hdls=%d",
- fd, flags, dir, num_hdls);
- if (flags & CAM_MEM_FLAG_PROTECTED_MODE) {
- for (i = 0; i < num_hdls; i++) {
- rc = cam_smmu_map_stage2_iova(mmu_hdls[i],
- fd,
- dir,
- hw_vaddr,
- len);
- if (rc < 0) {
- CAM_ERR(CAM_MEM,
- "Failed to securely map to smmu, i=%d, fd=%d, dir=%d, mmu_hdl=%d, rc=%d",
- i, fd, dir, mmu_hdls[i], rc);
- goto multi_map_fail;
- }
- }
- } else {
- for (i = 0; i < num_hdls; i++) {
- rc = cam_smmu_map_user_iova(mmu_hdls[i],
- fd,
- dis_delayed_unmap,
- dir,
- (dma_addr_t *)hw_vaddr,
- len,
- region,
- is_internal);
- if (rc < 0) {
- CAM_ERR(CAM_MEM,
- "Failed to map to smmu, i=%d, fd=%d, dir=%d, mmu_hdl=%d, region=%d, rc=%d",
- i, fd, dir, mmu_hdls[i], region, rc);
- goto multi_map_fail;
- }
- }
- }
- return rc;
- multi_map_fail:
- if (flags & CAM_MEM_FLAG_PROTECTED_MODE)
- for (--i; i >= 0; i--)
- cam_smmu_unmap_stage2_iova(mmu_hdls[i], fd);
- else
- for (--i; i >= 0; i--)
- cam_smmu_unmap_user_iova(mmu_hdls[i],
- fd,
- CAM_SMMU_REGION_IO);
- return rc;
- }
- int cam_mem_mgr_alloc_and_map(struct cam_mem_mgr_alloc_cmd *cmd)
- {
- int rc;
- int32_t idx;
- struct dma_buf *dmabuf = NULL;
- int fd = -1;
- dma_addr_t hw_vaddr = 0;
- size_t len;
- uintptr_t kvaddr = 0;
- size_t klen;
- if (!atomic_read(&cam_mem_mgr_state)) {
- CAM_ERR(CAM_MEM, "failed. mem_mgr not initialized");
- return -EINVAL;
- }
- if (!cmd) {
- CAM_ERR(CAM_MEM, " Invalid argument");
- return -EINVAL;
- }
- len = cmd->len;
- if (tbl.need_shared_buffer_padding &&
- (cmd->flags & CAM_MEM_FLAG_HW_SHARED_ACCESS)) {
- len += CAM_MEM_SHARED_BUFFER_PAD_4K;
- CAM_DBG(CAM_MEM, "Pad 4k size, actual %llu, allocating %zu",
- cmd->len, len);
- }
- rc = cam_mem_util_check_alloc_flags(cmd);
- if (rc) {
- CAM_ERR(CAM_MEM, "Invalid flags: flags = 0x%X, rc=%d",
- cmd->flags, rc);
- return rc;
- }
- rc = cam_mem_util_buffer_alloc(len, cmd->flags, &dmabuf, &fd);
- if (rc) {
- CAM_ERR(CAM_MEM,
- "Ion Alloc failed, len=%llu, align=%llu, flags=0x%x, num_hdl=%d",
- len, cmd->align, cmd->flags, cmd->num_hdl);
- cam_mem_mgr_print_tbl();
- return rc;
- }
- if (!dmabuf) {
- CAM_ERR(CAM_MEM,
- "Ion Alloc return NULL dmabuf! fd=%d, len=%d", fd, len);
- cam_mem_mgr_print_tbl();
- return rc;
- }
- idx = cam_mem_get_slot();
- if (idx < 0) {
- CAM_ERR(CAM_MEM, "Failed in getting mem slot, idx=%d", idx);
- rc = -ENOMEM;
- goto slot_fail;
- }
- if ((cmd->flags & CAM_MEM_FLAG_HW_READ_WRITE) ||
- (cmd->flags & CAM_MEM_FLAG_HW_SHARED_ACCESS) ||
- (cmd->flags & CAM_MEM_FLAG_PROTECTED_MODE)) {
- enum cam_smmu_region_id region;
- if (cmd->flags & CAM_MEM_FLAG_HW_READ_WRITE)
- region = CAM_SMMU_REGION_IO;
- if (cmd->flags & CAM_MEM_FLAG_HW_SHARED_ACCESS)
- region = CAM_SMMU_REGION_SHARED;
- if (cmd->flags & CAM_MEM_FLAG_PROTECTED_MODE)
- region = CAM_SMMU_REGION_IO;
- rc = cam_mem_util_map_hw_va(cmd->flags,
- cmd->mmu_hdls,
- cmd->num_hdl,
- fd,
- &hw_vaddr,
- &len,
- region,
- true);
- if (rc) {
- CAM_ERR(CAM_MEM,
- "Failed in map_hw_va len=%llu, flags=0x%x, fd=%d, region=%d, num_hdl=%d, rc=%d",
- len, cmd->flags,
- fd, region, cmd->num_hdl, rc);
- if (rc == -EALREADY) {
- if ((size_t)dmabuf->size != len)
- rc = -EBADR;
- cam_mem_mgr_print_tbl();
- }
- goto map_hw_fail;
- }
- }
- mutex_lock(&tbl.bufq[idx].q_lock);
- tbl.bufq[idx].fd = fd;
- tbl.bufq[idx].dma_buf = NULL;
- tbl.bufq[idx].flags = cmd->flags;
- tbl.bufq[idx].buf_handle = GET_MEM_HANDLE(idx, fd);
- tbl.bufq[idx].is_internal = true;
- if (cmd->flags & CAM_MEM_FLAG_PROTECTED_MODE)
- CAM_MEM_MGR_SET_SECURE_HDL(tbl.bufq[idx].buf_handle, true);
- if (cmd->flags & CAM_MEM_FLAG_KMD_ACCESS) {
- rc = cam_mem_util_map_cpu_va(dmabuf, &kvaddr, &klen);
- if (rc) {
- CAM_ERR(CAM_MEM, "dmabuf: %pK mapping failed: %d",
- dmabuf, rc);
- goto map_kernel_fail;
- }
- }
- if (cmd->flags & CAM_MEM_FLAG_KMD_DEBUG_FLAG)
- tbl.dbg_buf_idx = idx;
- tbl.bufq[idx].kmdvaddr = kvaddr;
- tbl.bufq[idx].vaddr = hw_vaddr;
- tbl.bufq[idx].dma_buf = dmabuf;
- tbl.bufq[idx].len = len;
- tbl.bufq[idx].num_hdl = cmd->num_hdl;
- memcpy(tbl.bufq[idx].hdls, cmd->mmu_hdls,
- sizeof(int32_t) * cmd->num_hdl);
- tbl.bufq[idx].is_imported = false;
- mutex_unlock(&tbl.bufq[idx].q_lock);
- cmd->out.buf_handle = tbl.bufq[idx].buf_handle;
- cmd->out.fd = tbl.bufq[idx].fd;
- cmd->out.vaddr = 0;
- CAM_DBG(CAM_MEM,
- "fd=%d, flags=0x%x, num_hdl=%d, idx=%d, buf handle=%x, len=%zu",
- cmd->out.fd, cmd->flags, cmd->num_hdl, idx, cmd->out.buf_handle,
- tbl.bufq[idx].len);
- return rc;
- map_kernel_fail:
- mutex_unlock(&tbl.bufq[idx].q_lock);
- map_hw_fail:
- cam_mem_put_slot(idx);
- slot_fail:
- dma_buf_put(dmabuf);
- return rc;
- }
- static bool cam_mem_util_is_map_internal(int32_t fd)
- {
- uint32_t i;
- bool is_internal = false;
- mutex_lock(&tbl.m_lock);
- for_each_set_bit(i, tbl.bitmap, tbl.bits) {
- if (tbl.bufq[i].fd == fd) {
- is_internal = tbl.bufq[i].is_internal;
- break;
- }
- }
- mutex_unlock(&tbl.m_lock);
- return is_internal;
- }
- int cam_mem_mgr_map(struct cam_mem_mgr_map_cmd *cmd)
- {
- int32_t idx;
- int rc;
- struct dma_buf *dmabuf;
- dma_addr_t hw_vaddr = 0;
- size_t len = 0;
- bool is_internal = false;
- if (!atomic_read(&cam_mem_mgr_state)) {
- CAM_ERR(CAM_MEM, "failed. mem_mgr not initialized");
- return -EINVAL;
- }
- if (!cmd || (cmd->fd < 0)) {
- CAM_ERR(CAM_MEM, "Invalid argument");
- return -EINVAL;
- }
- if (cmd->num_hdl > CAM_MEM_MMU_MAX_HANDLE) {
- CAM_ERR(CAM_MEM, "Num of mmu hdl %d exceeded maximum(%d)",
- cmd->num_hdl, CAM_MEM_MMU_MAX_HANDLE);
- return -EINVAL;
- }
- rc = cam_mem_util_check_map_flags(cmd);
- if (rc) {
- CAM_ERR(CAM_MEM, "Invalid flags: flags = %X", cmd->flags);
- return rc;
- }
- dmabuf = dma_buf_get(cmd->fd);
- if (IS_ERR_OR_NULL((void *)(dmabuf))) {
- CAM_ERR(CAM_MEM, "Failed to import dma_buf fd");
- return -EINVAL;
- }
- is_internal = cam_mem_util_is_map_internal(cmd->fd);
- idx = cam_mem_get_slot();
- if (idx < 0) {
- CAM_ERR(CAM_MEM, "Failed in getting mem slot, idx=%d, fd=%d",
- idx, cmd->fd);
- rc = -ENOMEM;
- goto slot_fail;
- }
- if ((cmd->flags & CAM_MEM_FLAG_HW_READ_WRITE) ||
- (cmd->flags & CAM_MEM_FLAG_PROTECTED_MODE)) {
- rc = cam_mem_util_map_hw_va(cmd->flags,
- cmd->mmu_hdls,
- cmd->num_hdl,
- cmd->fd,
- &hw_vaddr,
- &len,
- CAM_SMMU_REGION_IO,
- is_internal);
- if (rc) {
- CAM_ERR(CAM_MEM,
- "Failed in map_hw_va, flags=0x%x, fd=%d, len=%llu, region=%d, num_hdl=%d, rc=%d",
- cmd->flags, cmd->fd, len,
- CAM_SMMU_REGION_IO, cmd->num_hdl, rc);
- if (rc == -EALREADY) {
- if ((size_t)dmabuf->size != len) {
- rc = -EBADR;
- cam_mem_mgr_print_tbl();
- }
- }
- goto map_fail;
- }
- }
- mutex_lock(&tbl.bufq[idx].q_lock);
- tbl.bufq[idx].fd = cmd->fd;
- tbl.bufq[idx].dma_buf = NULL;
- tbl.bufq[idx].flags = cmd->flags;
- tbl.bufq[idx].buf_handle = GET_MEM_HANDLE(idx, cmd->fd);
- if (cmd->flags & CAM_MEM_FLAG_PROTECTED_MODE)
- CAM_MEM_MGR_SET_SECURE_HDL(tbl.bufq[idx].buf_handle, true);
- tbl.bufq[idx].kmdvaddr = 0;
- if (cmd->num_hdl > 0)
- tbl.bufq[idx].vaddr = hw_vaddr;
- else
- tbl.bufq[idx].vaddr = 0;
- tbl.bufq[idx].dma_buf = dmabuf;
- tbl.bufq[idx].len = len;
- tbl.bufq[idx].num_hdl = cmd->num_hdl;
- memcpy(tbl.bufq[idx].hdls, cmd->mmu_hdls,
- sizeof(int32_t) * cmd->num_hdl);
- tbl.bufq[idx].is_imported = true;
- tbl.bufq[idx].is_internal = is_internal;
- mutex_unlock(&tbl.bufq[idx].q_lock);
- cmd->out.buf_handle = tbl.bufq[idx].buf_handle;
- cmd->out.vaddr = 0;
- cmd->out.size = (uint32_t)len;
- CAM_DBG(CAM_MEM,
- "fd=%d, flags=0x%x, num_hdl=%d, idx=%d, buf handle=%x, len=%zu",
- cmd->fd, cmd->flags, cmd->num_hdl, idx, cmd->out.buf_handle,
- tbl.bufq[idx].len);
- return rc;
- map_fail:
- cam_mem_put_slot(idx);
- slot_fail:
- dma_buf_put(dmabuf);
- return rc;
- }
- static int cam_mem_util_unmap_hw_va(int32_t idx,
- enum cam_smmu_region_id region,
- enum cam_smmu_mapping_client client)
- {
- int i;
- uint32_t flags;
- int32_t *mmu_hdls;
- int num_hdls;
- int fd;
- int rc = 0;
- if (idx >= CAM_MEM_BUFQ_MAX || idx <= 0) {
- CAM_ERR(CAM_MEM, "Incorrect index");
- return -EINVAL;
- }
- flags = tbl.bufq[idx].flags;
- mmu_hdls = tbl.bufq[idx].hdls;
- num_hdls = tbl.bufq[idx].num_hdl;
- fd = tbl.bufq[idx].fd;
- CAM_DBG(CAM_MEM,
- "unmap_hw_va : idx=%d, fd=%x, flags=0x%x, num_hdls=%d, client=%d",
- idx, fd, flags, num_hdls, client);
- if (flags & CAM_MEM_FLAG_PROTECTED_MODE) {
- for (i = 0; i < num_hdls; i++) {
- rc = cam_smmu_unmap_stage2_iova(mmu_hdls[i], fd);
- if (rc < 0) {
- CAM_ERR(CAM_MEM,
- "Failed in secure unmap, i=%d, fd=%d, mmu_hdl=%d, rc=%d",
- i, fd, mmu_hdls[i], rc);
- goto unmap_end;
- }
- }
- } else {
- for (i = 0; i < num_hdls; i++) {
- if (client == CAM_SMMU_MAPPING_USER) {
- rc = cam_smmu_unmap_user_iova(mmu_hdls[i],
- fd, region);
- } else if (client == CAM_SMMU_MAPPING_KERNEL) {
- rc = cam_smmu_unmap_kernel_iova(mmu_hdls[i],
- tbl.bufq[idx].dma_buf, region);
- } else {
- CAM_ERR(CAM_MEM,
- "invalid caller for unmapping : %d",
- client);
- rc = -EINVAL;
- }
- if (rc < 0) {
- CAM_ERR(CAM_MEM,
- "Failed in unmap, i=%d, fd=%d, mmu_hdl=%d, region=%d, rc=%d",
- i, fd, mmu_hdls[i], region, rc);
- goto unmap_end;
- }
- }
- }
- return rc;
- unmap_end:
- CAM_ERR(CAM_MEM, "unmapping failed");
- return rc;
- }
- static void cam_mem_mgr_unmap_active_buf(int idx)
- {
- enum cam_smmu_region_id region = CAM_SMMU_REGION_SHARED;
- if (tbl.bufq[idx].flags & CAM_MEM_FLAG_HW_SHARED_ACCESS)
- region = CAM_SMMU_REGION_SHARED;
- else if (tbl.bufq[idx].flags & CAM_MEM_FLAG_HW_READ_WRITE)
- region = CAM_SMMU_REGION_IO;
- cam_mem_util_unmap_hw_va(idx, region, CAM_SMMU_MAPPING_USER);
- if (tbl.bufq[idx].flags & CAM_MEM_FLAG_KMD_ACCESS)
- cam_mem_util_unmap_cpu_va(tbl.bufq[idx].dma_buf,
- tbl.bufq[idx].kmdvaddr);
- }
- static int cam_mem_mgr_cleanup_table(void)
- {
- int i;
- mutex_lock(&tbl.m_lock);
- for (i = 1; i < CAM_MEM_BUFQ_MAX; i++) {
- if (!tbl.bufq[i].active) {
- CAM_DBG(CAM_MEM,
- "Buffer inactive at idx=%d, continuing", i);
- continue;
- } else {
- CAM_DBG(CAM_MEM,
- "Active buffer at idx=%d, possible leak needs unmapping",
- i);
- cam_mem_mgr_unmap_active_buf(i);
- }
- mutex_lock(&tbl.bufq[i].q_lock);
- if (tbl.bufq[i].dma_buf) {
- dma_buf_put(tbl.bufq[i].dma_buf);
- tbl.bufq[i].dma_buf = NULL;
- }
- tbl.bufq[i].fd = -1;
- tbl.bufq[i].flags = 0;
- tbl.bufq[i].buf_handle = -1;
- tbl.bufq[i].vaddr = 0;
- tbl.bufq[i].len = 0;
- memset(tbl.bufq[i].hdls, 0,
- sizeof(int32_t) * tbl.bufq[i].num_hdl);
- tbl.bufq[i].num_hdl = 0;
- tbl.bufq[i].dma_buf = NULL;
- tbl.bufq[i].active = false;
- tbl.bufq[i].is_internal = false;
- mutex_unlock(&tbl.bufq[i].q_lock);
- mutex_destroy(&tbl.bufq[i].q_lock);
- }
- bitmap_zero(tbl.bitmap, tbl.bits);
- /* We need to reserve slot 0 because 0 is invalid */
- set_bit(0, tbl.bitmap);
- mutex_unlock(&tbl.m_lock);
- return 0;
- }
- void cam_mem_mgr_deinit(void)
- {
- atomic_set(&cam_mem_mgr_state, CAM_MEM_MGR_UNINITIALIZED);
- cam_mem_mgr_cleanup_table();
- debugfs_remove_recursive(tbl.dentry);
- mutex_lock(&tbl.m_lock);
- bitmap_zero(tbl.bitmap, tbl.bits);
- kfree(tbl.bitmap);
- tbl.bitmap = NULL;
- tbl.dbg_buf_idx = -1;
- mutex_unlock(&tbl.m_lock);
- mutex_destroy(&tbl.m_lock);
- }
- static int cam_mem_util_unmap(int32_t idx,
- enum cam_smmu_mapping_client client)
- {
- int rc = 0;
- enum cam_smmu_region_id region = CAM_SMMU_REGION_SHARED;
- if (idx >= CAM_MEM_BUFQ_MAX || idx <= 0) {
- CAM_ERR(CAM_MEM, "Incorrect index");
- return -EINVAL;
- }
- CAM_DBG(CAM_MEM, "Flags = %X idx %d", tbl.bufq[idx].flags, idx);
- mutex_lock(&tbl.m_lock);
- if ((!tbl.bufq[idx].active) &&
- (tbl.bufq[idx].vaddr) == 0) {
- CAM_WARN(CAM_MEM, "Buffer at idx=%d is already unmapped,",
- idx);
- mutex_unlock(&tbl.m_lock);
- return 0;
- }
- /* Deactivate the buffer queue to prevent multiple unmap */
- mutex_lock(&tbl.bufq[idx].q_lock);
- tbl.bufq[idx].active = false;
- tbl.bufq[idx].vaddr = 0;
- mutex_unlock(&tbl.bufq[idx].q_lock);
- mutex_unlock(&tbl.m_lock);
- if (tbl.bufq[idx].flags & CAM_MEM_FLAG_KMD_ACCESS) {
- if (tbl.bufq[idx].dma_buf && tbl.bufq[idx].kmdvaddr) {
- rc = cam_mem_util_unmap_cpu_va(tbl.bufq[idx].dma_buf,
- tbl.bufq[idx].kmdvaddr);
- if (rc)
- CAM_ERR(CAM_MEM,
- "Failed, dmabuf=%pK, kmdvaddr=%pK",
- tbl.bufq[idx].dma_buf,
- (void *) tbl.bufq[idx].kmdvaddr);
- }
- }
- /* SHARED flag gets precedence, all other flags after it */
- if (tbl.bufq[idx].flags & CAM_MEM_FLAG_HW_SHARED_ACCESS) {
- region = CAM_SMMU_REGION_SHARED;
- } else {
- if (tbl.bufq[idx].flags & CAM_MEM_FLAG_HW_READ_WRITE)
- region = CAM_SMMU_REGION_IO;
- }
- if ((tbl.bufq[idx].flags & CAM_MEM_FLAG_HW_READ_WRITE) ||
- (tbl.bufq[idx].flags & CAM_MEM_FLAG_HW_SHARED_ACCESS) ||
- (tbl.bufq[idx].flags & CAM_MEM_FLAG_PROTECTED_MODE)) {
- if (cam_mem_util_unmap_hw_va(idx, region, client))
- CAM_ERR(CAM_MEM, "Failed, dmabuf=%pK",
- tbl.bufq[idx].dma_buf);
- if (client == CAM_SMMU_MAPPING_KERNEL)
- tbl.bufq[idx].dma_buf = NULL;
- }
- mutex_lock(&tbl.m_lock);
- mutex_lock(&tbl.bufq[idx].q_lock);
- tbl.bufq[idx].flags = 0;
- tbl.bufq[idx].buf_handle = -1;
- memset(tbl.bufq[idx].hdls, 0,
- sizeof(int32_t) * CAM_MEM_MMU_MAX_HANDLE);
- CAM_DBG(CAM_MEM,
- "Ion buf at idx = %d freeing fd = %d, imported %d, dma_buf %pK",
- idx, tbl.bufq[idx].fd,
- tbl.bufq[idx].is_imported,
- tbl.bufq[idx].dma_buf);
- if (tbl.bufq[idx].dma_buf)
- dma_buf_put(tbl.bufq[idx].dma_buf);
- tbl.bufq[idx].fd = -1;
- tbl.bufq[idx].dma_buf = NULL;
- tbl.bufq[idx].is_imported = false;
- tbl.bufq[idx].is_internal = false;
- tbl.bufq[idx].len = 0;
- tbl.bufq[idx].num_hdl = 0;
- memset(&tbl.bufq[idx].timestamp, 0, sizeof(struct timespec64));
- mutex_unlock(&tbl.bufq[idx].q_lock);
- mutex_destroy(&tbl.bufq[idx].q_lock);
- clear_bit(idx, tbl.bitmap);
- mutex_unlock(&tbl.m_lock);
- return rc;
- }
- int cam_mem_mgr_release(struct cam_mem_mgr_release_cmd *cmd)
- {
- int idx;
- int rc;
- if (!atomic_read(&cam_mem_mgr_state)) {
- CAM_ERR(CAM_MEM, "failed. mem_mgr not initialized");
- return -EINVAL;
- }
- if (!cmd) {
- CAM_ERR(CAM_MEM, "Invalid argument");
- return -EINVAL;
- }
- idx = CAM_MEM_MGR_GET_HDL_IDX(cmd->buf_handle);
- if (idx >= CAM_MEM_BUFQ_MAX || idx <= 0) {
- CAM_ERR(CAM_MEM, "Incorrect index %d extracted from mem handle",
- idx);
- return -EINVAL;
- }
- if (!tbl.bufq[idx].active) {
- CAM_ERR(CAM_MEM, "Released buffer state should be active");
- return -EINVAL;
- }
- if (tbl.bufq[idx].buf_handle != cmd->buf_handle) {
- CAM_ERR(CAM_MEM,
- "Released buf handle %d not matching within table %d, idx=%d",
- cmd->buf_handle, tbl.bufq[idx].buf_handle, idx);
- return -EINVAL;
- }
- CAM_DBG(CAM_MEM, "Releasing hdl = %x, idx = %d", cmd->buf_handle, idx);
- rc = cam_mem_util_unmap(idx, CAM_SMMU_MAPPING_USER);
- return rc;
- }
- int cam_mem_mgr_request_mem(struct cam_mem_mgr_request_desc *inp,
- struct cam_mem_mgr_memory_desc *out)
- {
- struct dma_buf *buf = NULL;
- int ion_fd = -1;
- int rc = 0;
- uintptr_t kvaddr;
- dma_addr_t iova = 0;
- size_t request_len = 0;
- uint32_t mem_handle;
- int32_t idx;
- int32_t smmu_hdl = 0;
- int32_t num_hdl = 0;
- enum cam_smmu_region_id region = CAM_SMMU_REGION_SHARED;
- if (!atomic_read(&cam_mem_mgr_state)) {
- CAM_ERR(CAM_MEM, "failed. mem_mgr not initialized");
- return -EINVAL;
- }
- if (!inp || !out) {
- CAM_ERR(CAM_MEM, "Invalid params");
- return -EINVAL;
- }
- if (!(inp->flags & CAM_MEM_FLAG_HW_READ_WRITE ||
- inp->flags & CAM_MEM_FLAG_HW_SHARED_ACCESS ||
- inp->flags & CAM_MEM_FLAG_CACHE)) {
- CAM_ERR(CAM_MEM, "Invalid flags for request mem");
- return -EINVAL;
- }
- rc = cam_mem_util_get_dma_buf(inp->size, inp->flags, &buf);
- if (rc) {
- CAM_ERR(CAM_MEM, "ION alloc failed for shared buffer");
- goto ion_fail;
- } else if (!buf) {
- CAM_ERR(CAM_MEM, "ION alloc returned NULL buffer");
- goto ion_fail;
- } else {
- CAM_DBG(CAM_MEM, "Got dma_buf = %pK", buf);
- }
- /*
- * we are mapping kva always here,
- * update flags so that we do unmap properly
- */
- inp->flags |= CAM_MEM_FLAG_KMD_ACCESS;
- rc = cam_mem_util_map_cpu_va(buf, &kvaddr, &request_len);
- if (rc) {
- CAM_ERR(CAM_MEM, "Failed to get kernel vaddr");
- goto map_fail;
- }
- if (!inp->smmu_hdl) {
- CAM_ERR(CAM_MEM, "Invalid SMMU handle");
- rc = -EINVAL;
- goto smmu_fail;
- }
- /* SHARED flag gets precedence, all other flags after it */
- if (inp->flags & CAM_MEM_FLAG_HW_SHARED_ACCESS) {
- region = CAM_SMMU_REGION_SHARED;
- } else {
- if (inp->flags & CAM_MEM_FLAG_HW_READ_WRITE)
- region = CAM_SMMU_REGION_IO;
- }
- rc = cam_smmu_map_kernel_iova(inp->smmu_hdl,
- buf,
- CAM_SMMU_MAP_RW,
- &iova,
- &request_len,
- region);
- if (rc < 0) {
- CAM_ERR(CAM_MEM, "SMMU mapping failed");
- goto smmu_fail;
- }
- smmu_hdl = inp->smmu_hdl;
- num_hdl = 1;
- idx = cam_mem_get_slot();
- if (idx < 0) {
- CAM_ERR(CAM_MEM, "Failed in getting mem slot, idx=%d", idx);
- rc = -ENOMEM;
- goto slot_fail;
- }
- mutex_lock(&tbl.bufq[idx].q_lock);
- mem_handle = GET_MEM_HANDLE(idx, ion_fd);
- tbl.bufq[idx].dma_buf = buf;
- tbl.bufq[idx].fd = -1;
- tbl.bufq[idx].flags = inp->flags;
- tbl.bufq[idx].buf_handle = mem_handle;
- tbl.bufq[idx].kmdvaddr = kvaddr;
- tbl.bufq[idx].vaddr = iova;
- tbl.bufq[idx].len = inp->size;
- tbl.bufq[idx].num_hdl = num_hdl;
- memcpy(tbl.bufq[idx].hdls, &smmu_hdl,
- sizeof(int32_t));
- tbl.bufq[idx].is_imported = false;
- mutex_unlock(&tbl.bufq[idx].q_lock);
- out->kva = kvaddr;
- out->iova = (uint32_t)iova;
- out->smmu_hdl = smmu_hdl;
- out->mem_handle = mem_handle;
- out->len = inp->size;
- out->region = region;
- return rc;
- slot_fail:
- cam_smmu_unmap_kernel_iova(inp->smmu_hdl,
- buf, region);
- smmu_fail:
- cam_mem_util_unmap_cpu_va(buf, kvaddr);
- map_fail:
- dma_buf_put(buf);
- ion_fail:
- return rc;
- }
- EXPORT_SYMBOL(cam_mem_mgr_request_mem);
- int cam_mem_mgr_release_mem(struct cam_mem_mgr_memory_desc *inp)
- {
- int32_t idx;
- int rc;
- if (!atomic_read(&cam_mem_mgr_state)) {
- CAM_ERR(CAM_MEM, "failed. mem_mgr not initialized");
- return -EINVAL;
- }
- if (!inp) {
- CAM_ERR(CAM_MEM, "Invalid argument");
- return -EINVAL;
- }
- idx = CAM_MEM_MGR_GET_HDL_IDX(inp->mem_handle);
- if (idx >= CAM_MEM_BUFQ_MAX || idx <= 0) {
- CAM_ERR(CAM_MEM, "Incorrect index extracted from mem handle");
- return -EINVAL;
- }
- if (!tbl.bufq[idx].active) {
- if (tbl.bufq[idx].vaddr == 0) {
- CAM_ERR(CAM_MEM, "buffer is released already");
- return 0;
- }
- CAM_ERR(CAM_MEM, "Released buffer state should be active");
- return -EINVAL;
- }
- if (tbl.bufq[idx].buf_handle != inp->mem_handle) {
- CAM_ERR(CAM_MEM,
- "Released buf handle not matching within table");
- return -EINVAL;
- }
- CAM_DBG(CAM_MEM, "Releasing hdl = %X", inp->mem_handle);
- rc = cam_mem_util_unmap(idx, CAM_SMMU_MAPPING_KERNEL);
- return rc;
- }
- EXPORT_SYMBOL(cam_mem_mgr_release_mem);
- int cam_mem_mgr_reserve_memory_region(struct cam_mem_mgr_request_desc *inp,
- enum cam_smmu_region_id region,
- struct cam_mem_mgr_memory_desc *out)
- {
- struct dma_buf *buf = NULL;
- int rc = 0;
- int ion_fd = -1;
- dma_addr_t iova = 0;
- size_t request_len = 0;
- uint32_t mem_handle;
- int32_t idx;
- int32_t smmu_hdl = 0;
- int32_t num_hdl = 0;
- uintptr_t kvaddr = 0;
- if (!atomic_read(&cam_mem_mgr_state)) {
- CAM_ERR(CAM_MEM, "failed. mem_mgr not initialized");
- return -EINVAL;
- }
- if (!inp || !out) {
- CAM_ERR(CAM_MEM, "Invalid param(s)");
- return -EINVAL;
- }
- if (!inp->smmu_hdl) {
- CAM_ERR(CAM_MEM, "Invalid SMMU handle");
- return -EINVAL;
- }
- if ((region != CAM_SMMU_REGION_SECHEAP) &&
- (region != CAM_SMMU_REGION_FWUNCACHED)) {
- CAM_ERR(CAM_MEM, "Only secondary heap supported");
- return -EINVAL;
- }
- rc = cam_mem_util_get_dma_buf(inp->size, 0, &buf);
- if (rc) {
- CAM_ERR(CAM_MEM, "ION alloc failed for sec heap buffer");
- goto ion_fail;
- } else if (!buf) {
- CAM_ERR(CAM_MEM, "ION alloc returned NULL buffer");
- goto ion_fail;
- } else {
- CAM_DBG(CAM_MEM, "Got dma_buf = %pK", buf);
- }
- if (inp->flags & CAM_MEM_FLAG_KMD_ACCESS) {
- rc = cam_mem_util_map_cpu_va(buf, &kvaddr, &request_len);
- if (rc) {
- CAM_ERR(CAM_MEM, "Failed to get kernel vaddr");
- goto kmap_fail;
- }
- }
- rc = cam_smmu_reserve_buf_region(region,
- inp->smmu_hdl, buf, &iova, &request_len);
- if (rc) {
- CAM_ERR(CAM_MEM, "Reserving secondary heap failed");
- goto smmu_fail;
- }
- smmu_hdl = inp->smmu_hdl;
- num_hdl = 1;
- idx = cam_mem_get_slot();
- if (idx < 0) {
- CAM_ERR(CAM_MEM, "Failed in getting mem slot, idx=%d", idx);
- rc = -ENOMEM;
- goto slot_fail;
- }
- mutex_lock(&tbl.bufq[idx].q_lock);
- mem_handle = GET_MEM_HANDLE(idx, ion_fd);
- tbl.bufq[idx].fd = -1;
- tbl.bufq[idx].dma_buf = buf;
- tbl.bufq[idx].flags = inp->flags;
- tbl.bufq[idx].buf_handle = mem_handle;
- tbl.bufq[idx].kmdvaddr = kvaddr;
- tbl.bufq[idx].vaddr = iova;
- tbl.bufq[idx].len = request_len;
- tbl.bufq[idx].num_hdl = num_hdl;
- memcpy(tbl.bufq[idx].hdls, &smmu_hdl,
- sizeof(int32_t));
- tbl.bufq[idx].is_imported = false;
- mutex_unlock(&tbl.bufq[idx].q_lock);
- out->kva = kvaddr;
- out->iova = (uint32_t)iova;
- out->smmu_hdl = smmu_hdl;
- out->mem_handle = mem_handle;
- out->len = request_len;
- out->region = region;
- return rc;
- slot_fail:
- cam_smmu_release_buf_region(region, smmu_hdl);
- smmu_fail:
- if (region == CAM_SMMU_REGION_FWUNCACHED)
- cam_mem_util_unmap_cpu_va(buf, kvaddr);
- kmap_fail:
- dma_buf_put(buf);
- ion_fail:
- return rc;
- }
- EXPORT_SYMBOL(cam_mem_mgr_reserve_memory_region);
- int cam_mem_mgr_free_memory_region(struct cam_mem_mgr_memory_desc *inp)
- {
- int32_t idx;
- int rc;
- int32_t smmu_hdl;
- if (!atomic_read(&cam_mem_mgr_state)) {
- CAM_ERR(CAM_MEM, "failed. mem_mgr not initialized");
- return -EINVAL;
- }
- if (!inp) {
- CAM_ERR(CAM_MEM, "Invalid argument");
- return -EINVAL;
- }
- if ((inp->region != CAM_SMMU_REGION_SECHEAP) &&
- (inp->region != CAM_SMMU_REGION_FWUNCACHED)) {
- CAM_ERR(CAM_MEM, "Only secondary heap supported");
- return -EINVAL;
- }
- idx = CAM_MEM_MGR_GET_HDL_IDX(inp->mem_handle);
- if (idx >= CAM_MEM_BUFQ_MAX || idx <= 0) {
- CAM_ERR(CAM_MEM, "Incorrect index extracted from mem handle");
- return -EINVAL;
- }
- if (!tbl.bufq[idx].active) {
- if (tbl.bufq[idx].vaddr == 0) {
- CAM_ERR(CAM_MEM, "buffer is released already");
- return 0;
- }
- CAM_ERR(CAM_MEM, "Released buffer state should be active");
- return -EINVAL;
- }
- if (tbl.bufq[idx].buf_handle != inp->mem_handle) {
- CAM_ERR(CAM_MEM,
- "Released buf handle not matching within table");
- return -EINVAL;
- }
- if (tbl.bufq[idx].num_hdl != 1) {
- CAM_ERR(CAM_MEM,
- "Sec heap region should have only one smmu hdl");
- return -ENODEV;
- }
- memcpy(&smmu_hdl, tbl.bufq[idx].hdls,
- sizeof(int32_t));
- if (inp->smmu_hdl != smmu_hdl) {
- CAM_ERR(CAM_MEM,
- "Passed SMMU handle doesn't match with internal hdl");
- return -ENODEV;
- }
- rc = cam_smmu_release_buf_region(inp->region, inp->smmu_hdl);
- if (rc) {
- CAM_ERR(CAM_MEM,
- "Sec heap region release failed");
- return -ENODEV;
- }
- CAM_DBG(CAM_MEM, "Releasing hdl = %X", inp->mem_handle);
- rc = cam_mem_util_unmap(idx, CAM_SMMU_MAPPING_KERNEL);
- if (rc)
- CAM_ERR(CAM_MEM, "unmapping secondary heap failed");
- return rc;
- }
- EXPORT_SYMBOL(cam_mem_mgr_free_memory_region);
|