cam_compat.c 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2014-2021, The Linux Foundation. All rights reserved.
  4. * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
  5. */
  6. #include <linux/dma-mapping.h>
  7. #include <linux/dma-buf.h>
  8. #include <linux/of_address.h>
  9. #include <linux/slab.h>
  10. #include <soc/qcom/rpmh.h>
  11. #include "cam_compat.h"
  12. #include "cam_debug_util.h"
  13. #include "cam_cpas_api.h"
  14. #include "camera_main.h"
  15. #include "cam_eeprom_dev.h"
  16. #include "cam_eeprom_core.h"
  17. #if IS_ENABLED(CONFIG_SPECTRA_USE_RPMH_DRV_API)
  18. #define CAM_RSC_DRV_IDENTIFIER "cam_rsc"
  19. const struct device *cam_cpas_get_rsc_dev_for_drv(uint32_t index)
  20. {
  21. const struct device *rsc_dev;
  22. rsc_dev = rpmh_get_device(CAM_RSC_DRV_IDENTIFIER, index);
  23. if (!rsc_dev) {
  24. CAM_ERR(CAM_CPAS, "Invalid dev for index: %u", index);
  25. return NULL;
  26. }
  27. return rsc_dev;
  28. }
  29. int cam_cpas_start_drv_for_dev(const struct device *dev)
  30. {
  31. int rc = 0;
  32. if (!dev) {
  33. CAM_ERR(CAM_CPAS, "Invalid dev for DRV enable");
  34. return -EINVAL;
  35. }
  36. rc = rpmh_drv_start(dev);
  37. if (rc) {
  38. CAM_ERR(CAM_CPAS, "[%s] Failed in DRV start", dev_name(dev));
  39. return rc;
  40. }
  41. return rc;
  42. }
  43. int cam_cpas_stop_drv_for_dev(const struct device *dev)
  44. {
  45. int rc = 0;
  46. if (!dev) {
  47. CAM_ERR(CAM_CPAS, "Invalid dev for DRV disable");
  48. return -EINVAL;
  49. }
  50. rc = rpmh_drv_stop(dev);
  51. if (rc) {
  52. CAM_ERR(CAM_CPAS, "[%s] Failed in DRV stop", dev_name(dev));
  53. return rc;
  54. }
  55. return rc;
  56. }
  57. int cam_cpas_drv_channel_switch_for_dev(const struct device *dev)
  58. {
  59. int rc = 0;
  60. if (!dev) {
  61. CAM_ERR(CAM_CPAS, "Invalid dev for DRV channel switch");
  62. return -EINVAL;
  63. }
  64. rc = rpmh_write_sleep_and_wake_no_child(dev);
  65. if (rc) {
  66. CAM_ERR(CAM_CPAS, "[%s] Failed in DRV channel switch", dev_name(dev));
  67. return rc;
  68. }
  69. return rc;
  70. }
  71. #else
  72. const struct device *cam_cpas_get_rsc_dev_for_drv(uint32_t index)
  73. {
  74. return NULL;
  75. }
  76. int cam_cpas_start_drv_for_dev(const struct device *dev)
  77. {
  78. return 0;
  79. }
  80. int cam_cpas_stop_drv_for_dev(const struct device *dev)
  81. {
  82. return 0;
  83. }
  84. int cam_cpas_drv_channel_switch_for_dev(const struct device *dev)
  85. {
  86. return 0;
  87. }
  88. #endif
  89. int cam_smmu_fetch_csf_version(struct cam_csf_version *csf_version)
  90. {
  91. #if KERNEL_VERSION(6, 0, 0) <= LINUX_VERSION_CODE
  92. struct csf_version csf_ver;
  93. int rc;
  94. /* Fetch CSF version from SMMU proxy driver */
  95. rc = smmu_proxy_get_csf_version(&csf_ver);
  96. if (rc) {
  97. CAM_ERR(CAM_SMMU,
  98. "Failed to get CSF version from SMMU proxy: %d", rc);
  99. return rc;
  100. }
  101. csf_version->arch_ver = csf_ver.arch_ver;
  102. csf_version->max_ver = csf_ver.max_ver;
  103. csf_version->min_ver = csf_ver.min_ver;
  104. #else
  105. /* This defaults to the legacy version */
  106. csf_version->arch_ver = 2;
  107. csf_version->max_ver = 0;
  108. csf_version->min_ver = 0;
  109. #endif
  110. return 0;
  111. }
  112. unsigned long cam_update_dma_map_attributes(unsigned long attrs)
  113. {
  114. #if KERNEL_VERSION(6, 0, 0) <= LINUX_VERSION_CODE
  115. attrs |= DMA_ATTR_QTI_SMMU_PROXY_MAP;
  116. #endif
  117. return attrs;
  118. }
  119. size_t cam_align_dma_buf_size(size_t len)
  120. {
  121. #if KERNEL_VERSION(6, 0, 0) <= LINUX_VERSION_CODE
  122. len = ALIGN(len, SMMU_PROXY_MEM_ALIGNMENT);
  123. #endif
  124. return len;
  125. }
  126. #if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 4, 0)
  127. int cam_reserve_icp_fw(struct cam_fw_alloc_info *icp_fw, size_t fw_length)
  128. {
  129. int rc = 0;
  130. struct device_node *of_node;
  131. struct device_node *mem_node;
  132. struct resource res;
  133. of_node = (icp_fw->fw_dev)->of_node;
  134. mem_node = of_parse_phandle(of_node, "memory-region", 0);
  135. if (!mem_node) {
  136. rc = -ENOMEM;
  137. CAM_ERR(CAM_SMMU, "FW memory carveout not found");
  138. goto end;
  139. }
  140. rc = of_address_to_resource(mem_node, 0, &res);
  141. of_node_put(mem_node);
  142. if (rc < 0) {
  143. CAM_ERR(CAM_SMMU, "Unable to get start of FW mem carveout");
  144. goto end;
  145. }
  146. icp_fw->fw_hdl = res.start;
  147. icp_fw->fw_kva = ioremap_wc(icp_fw->fw_hdl, fw_length);
  148. if (!icp_fw->fw_kva) {
  149. CAM_ERR(CAM_SMMU, "Failed to map the FW.");
  150. rc = -ENOMEM;
  151. goto end;
  152. }
  153. memset_io(icp_fw->fw_kva, 0, fw_length);
  154. end:
  155. return rc;
  156. }
  157. void cam_unreserve_icp_fw(struct cam_fw_alloc_info *icp_fw, size_t fw_length)
  158. {
  159. iounmap(icp_fw->fw_kva);
  160. }
  161. int cam_ife_notify_safe_lut_scm(bool safe_trigger)
  162. {
  163. const uint32_t smmu_se_ife = 0;
  164. uint32_t camera_hw_version, rc = 0;
  165. rc = cam_cpas_get_cpas_hw_version(&camera_hw_version);
  166. if (!rc) {
  167. switch (camera_hw_version) {
  168. case CAM_CPAS_TITAN_170_V100:
  169. case CAM_CPAS_TITAN_170_V110:
  170. case CAM_CPAS_TITAN_175_V100:
  171. if (qcom_scm_smmu_notify_secure_lut(smmu_se_ife, safe_trigger)) {
  172. CAM_ERR(CAM_ISP, "scm call to enable safe failed");
  173. rc = -EINVAL;
  174. }
  175. break;
  176. default:
  177. break;
  178. }
  179. }
  180. return rc;
  181. }
  182. void cam_cpastop_scm_write(struct cam_cpas_hw_errata_wa *errata_wa)
  183. {
  184. int reg_val;
  185. qcom_scm_io_readl(errata_wa->data.reg_info.offset, &reg_val);
  186. reg_val |= errata_wa->data.reg_info.value;
  187. qcom_scm_io_writel(errata_wa->data.reg_info.offset, reg_val);
  188. }
  189. static int camera_platform_compare_dev(struct device *dev, const void *data)
  190. {
  191. return platform_bus_type.match(dev, (struct device_driver *) data);
  192. }
  193. static int camera_i2c_compare_dev(struct device *dev, const void *data)
  194. {
  195. return i2c_bus_type.match(dev, (struct device_driver *) data);
  196. }
  197. #else
  198. int cam_reserve_icp_fw(struct cam_fw_alloc_info *icp_fw, size_t fw_length)
  199. {
  200. int rc = 0;
  201. icp_fw->fw_kva = dma_alloc_coherent(icp_fw->fw_dev, fw_length,
  202. &icp_fw->fw_hdl, GFP_KERNEL);
  203. if (!icp_fw->fw_kva) {
  204. CAM_ERR(CAM_SMMU, "FW memory alloc failed");
  205. rc = -ENOMEM;
  206. }
  207. return rc;
  208. }
  209. void cam_unreserve_icp_fw(struct cam_fw_alloc_info *icp_fw, size_t fw_length)
  210. {
  211. dma_free_coherent(icp_fw->fw_dev, fw_length, icp_fw->fw_kva,
  212. icp_fw->fw_hdl);
  213. }
  214. int cam_ife_notify_safe_lut_scm(bool safe_trigger)
  215. {
  216. const uint32_t smmu_se_ife = 0;
  217. uint32_t camera_hw_version, rc = 0;
  218. struct scm_desc description = {
  219. .arginfo = SCM_ARGS(2, SCM_VAL, SCM_VAL),
  220. .args[0] = smmu_se_ife,
  221. .args[1] = safe_trigger,
  222. };
  223. rc = cam_cpas_get_cpas_hw_version(&camera_hw_version);
  224. if (!rc) {
  225. switch (camera_hw_version) {
  226. case CAM_CPAS_TITAN_170_V100:
  227. case CAM_CPAS_TITAN_170_V110:
  228. case CAM_CPAS_TITAN_175_V100:
  229. if (scm_call2(SCM_SIP_FNID(0x15, 0x3), &description)) {
  230. CAM_ERR(CAM_ISP, "scm call to enable safe failed");
  231. rc = -EINVAL;
  232. }
  233. break;
  234. default:
  235. break;
  236. }
  237. }
  238. return rc;
  239. }
  240. void cam_cpastop_scm_write(struct cam_cpas_hw_errata_wa *errata_wa)
  241. {
  242. int reg_val;
  243. reg_val = scm_io_read(errata_wa->data.reg_info.offset);
  244. reg_val |= errata_wa->data.reg_info.value;
  245. scm_io_write(errata_wa->data.reg_info.offset, reg_val);
  246. }
  247. static int camera_platform_compare_dev(struct device *dev, void *data)
  248. {
  249. return platform_bus_type.match(dev, (struct device_driver *) data);
  250. }
  251. static int camera_i2c_compare_dev(struct device *dev, void *data)
  252. {
  253. return i2c_bus_type.match(dev, (struct device_driver *) data);
  254. }
  255. #endif
  256. #if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 10, 0)
  257. void cam_free_clear(const void * ptr)
  258. {
  259. kfree_sensitive(ptr);
  260. }
  261. #else
  262. void cam_free_clear(const void * ptr)
  263. {
  264. kzfree(ptr);
  265. }
  266. #endif
  267. bool cam_is_mink_api_available(void)
  268. {
  269. #if KERNEL_VERSION(6, 0, 0) <= LINUX_VERSION_CODE
  270. return true;
  271. #else
  272. return false;
  273. #endif
  274. }
  275. #if KERNEL_VERSION(6, 0, 0) <= LINUX_VERSION_CODE
  276. int cam_csiphy_notify_secure_mode(struct csiphy_device *csiphy_dev,
  277. bool protect, int32_t offset, bool is_shutdown)
  278. {
  279. int rc = 0;
  280. struct Object client_env, sc_object;
  281. ITCDriverSensorInfo params = {0};
  282. struct cam_csiphy_secure_info *secure_info;
  283. if (offset >= CSIPHY_MAX_INSTANCES_PER_PHY) {
  284. CAM_ERR(CAM_CSIPHY, "Invalid CSIPHY offset");
  285. return -EINVAL;
  286. }
  287. if (!is_shutdown) {
  288. rc = get_client_env_object(&client_env);
  289. if (rc) {
  290. CAM_ERR(CAM_CSIPHY, "Failed getting mink env object, rc: %d", rc);
  291. return rc;
  292. }
  293. rc = IClientEnv_open(client_env, CTrustedCameraDriver_UID, &sc_object);
  294. if (rc) {
  295. CAM_ERR(CAM_CSIPHY, "Failed getting mink sc_object, rc: %d", rc);
  296. return rc;
  297. }
  298. secure_info = &csiphy_dev->csiphy_info[offset].secure_info;
  299. params.csid_hw_idx_mask = secure_info->csid_hw_idx_mask;
  300. params.cdm_hw_idx_mask = secure_info->cdm_hw_idx_mask;
  301. params.vc_mask = secure_info->vc_mask;
  302. params.phy_lane_sel_mask =
  303. csiphy_dev->csiphy_info[offset].csiphy_phy_lane_sel_mask;
  304. params.protect = protect ? 1 : 0;
  305. rc = ITrustedCameraDriver_dynamicProtectSensor(sc_object, &params);
  306. if (rc) {
  307. CAM_ERR(CAM_CSIPHY, "Mink secure call failed, rc: %d", rc);
  308. return rc;
  309. }
  310. rc = Object_release(sc_object);
  311. if (rc) {
  312. CAM_ERR(CAM_CSIPHY, "Failed releasing secure camera object, rc: %d", rc);
  313. return rc;
  314. }
  315. rc = Object_release(client_env);
  316. if (rc) {
  317. CAM_ERR(CAM_CSIPHY, "Failed releasing mink env object, rc: %d", rc);
  318. return rc;
  319. }
  320. } else {
  321. /* This is a temporary work around until the SMC Invoke driver is
  322. * refactored to avoid the dependency on FDs, which was causing issues
  323. * during process shutdown.
  324. */
  325. rc = qcom_scm_camera_protect_phy_lanes(protect, 0);
  326. if (rc) {
  327. CAM_ERR(CAM_CSIPHY, "SCM call to hypervisor failed");
  328. return rc;
  329. }
  330. }
  331. return 0;
  332. }
  333. #elif KERNEL_VERSION(5, 4, 0) <= LINUX_VERSION_CODE
  334. int cam_csiphy_notify_secure_mode(struct csiphy_device *csiphy_dev,
  335. bool protect, int32_t offset)
  336. {
  337. int rc = 0;
  338. /**
  339. * A check here is made if the target is using
  340. * an older version of the kernel driver (< 6.0)
  341. * with domain id feature present. In this case,
  342. * we are to fail this call, as the new mink call
  343. * is only supported on kernel driver versions 6.0
  344. * and above, and the new domain id scheme is not
  345. * backwards compatible with the older scheme.
  346. */
  347. if (csiphy_dev->domain_id_security) {
  348. CAM_ERR(CAM_CSIPHY,
  349. "Domain id support not present on current kernel driver: %d",
  350. LINUX_VERSION_CODE);
  351. return -EINVAL;
  352. }
  353. if (offset >= CSIPHY_MAX_INSTANCES_PER_PHY) {
  354. CAM_ERR(CAM_CSIPHY, "Invalid CSIPHY offset");
  355. rc = -EINVAL;
  356. } else if (qcom_scm_camera_protect_phy_lanes(protect,
  357. csiphy_dev->csiphy_info[offset].csiphy_cpas_cp_reg_mask)) {
  358. CAM_ERR(CAM_CSIPHY, "SCM call to hypervisor failed");
  359. rc = -EINVAL;
  360. }
  361. return rc;
  362. }
  363. #else
  364. int cam_csiphy_notify_secure_mode(struct csiphy_device *csiphy_dev,
  365. bool protect, int32_t offset)
  366. {
  367. int rc = 0;
  368. struct scm_desc description = {
  369. .arginfo = SCM_ARGS(2, SCM_VAL, SCM_VAL),
  370. .args[0] = protect,
  371. .args[1] = csiphy_dev->csiphy_info[offset]
  372. .csiphy_cpas_cp_reg_mask,
  373. };
  374. if (offset >= CSIPHY_MAX_INSTANCES_PER_PHY) {
  375. CAM_ERR(CAM_CSIPHY, "Invalid CSIPHY offset");
  376. rc = -EINVAL;
  377. } else if (scm_call2(SCM_SIP_FNID(0x18, 0x7), &description)) {
  378. CAM_ERR(CAM_CSIPHY, "SCM call to hypervisor failed");
  379. rc = -EINVAL;
  380. }
  381. return rc;
  382. }
  383. #endif
  384. /* Callback to compare device from match list before adding as component */
  385. static inline int camera_component_compare_dev(struct device *dev, void *data)
  386. {
  387. return dev == data;
  388. }
  389. /* Add component matches to list for master of aggregate driver */
  390. int camera_component_match_add_drivers(struct device *master_dev,
  391. struct component_match **match_list)
  392. {
  393. int i, rc = 0;
  394. struct platform_device *pdev = NULL;
  395. struct i2c_client *client = NULL;
  396. struct device *start_dev = NULL, *match_dev = NULL;
  397. if (!master_dev || !match_list) {
  398. CAM_ERR(CAM_UTIL, "Invalid parameters for component match add");
  399. rc = -EINVAL;
  400. goto end;
  401. }
  402. for (i = 0; i < ARRAY_SIZE(cam_component_platform_drivers); i++) {
  403. #if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 4, 0)
  404. struct device_driver const *drv =
  405. &cam_component_platform_drivers[i]->driver;
  406. const void *drv_ptr = (const void *)drv;
  407. #else
  408. struct device_driver *drv = &cam_component_platform_drivers[i]->driver;
  409. void *drv_ptr = (void *)drv;
  410. #endif
  411. start_dev = NULL;
  412. while ((match_dev = bus_find_device(&platform_bus_type,
  413. start_dev, drv_ptr, &camera_platform_compare_dev))) {
  414. put_device(start_dev);
  415. pdev = to_platform_device(match_dev);
  416. CAM_DBG(CAM_UTIL, "Adding matched component:%s", pdev->name);
  417. component_match_add(master_dev, match_list,
  418. camera_component_compare_dev, match_dev);
  419. start_dev = match_dev;
  420. }
  421. put_device(start_dev);
  422. }
  423. for (i = 0; i < ARRAY_SIZE(cam_component_i2c_drivers); i++) {
  424. #if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 4, 0)
  425. struct device_driver const *drv =
  426. &cam_component_i2c_drivers[i]->driver;
  427. const void *drv_ptr = (const void *)drv;
  428. #else
  429. struct device_driver *drv = &cam_component_i2c_drivers[i]->driver;
  430. void *drv_ptr = (void *)drv;
  431. #endif
  432. start_dev = NULL;
  433. while ((match_dev = bus_find_device(&i2c_bus_type,
  434. start_dev, drv_ptr, &camera_i2c_compare_dev))) {
  435. put_device(start_dev);
  436. client = to_i2c_client(match_dev);
  437. CAM_DBG(CAM_UTIL, "Adding matched component:%s", client->name);
  438. component_match_add(master_dev, match_list,
  439. camera_component_compare_dev, match_dev);
  440. start_dev = match_dev;
  441. }
  442. put_device(start_dev);
  443. }
  444. end:
  445. return rc;
  446. }
  447. #if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 10, 0)
  448. #include <linux/qcom-iommu-util.h>
  449. void cam_check_iommu_faults(struct iommu_domain *domain,
  450. struct cam_smmu_pf_info *pf_info)
  451. {
  452. struct qcom_iommu_fault_ids fault_ids = {0, 0, 0};
  453. if (qcom_iommu_get_fault_ids(domain, &fault_ids))
  454. CAM_ERR(CAM_SMMU, "Cannot get smmu fault ids");
  455. else
  456. CAM_ERR(CAM_SMMU, "smmu fault ids bid:%d pid:%d mid:%d",
  457. fault_ids.bid, fault_ids.pid, fault_ids.mid);
  458. pf_info->bid = fault_ids.bid;
  459. pf_info->pid = fault_ids.pid;
  460. pf_info->mid = fault_ids.mid;
  461. }
  462. #else
  463. void cam_check_iommu_faults(struct iommu_domain *domain,
  464. struct cam_smmu_pf_info *pf_info)
  465. {
  466. struct iommu_fault_ids fault_ids = {0, 0, 0};
  467. if (iommu_get_fault_ids(domain, &fault_ids))
  468. CAM_ERR(CAM_SMMU, "Error: Can not get smmu fault ids");
  469. CAM_ERR(CAM_SMMU, "smmu fault ids bid:%d pid:%d mid:%d",
  470. fault_ids.bid, fault_ids.pid, fault_ids.mid);
  471. pf_info->bid = fault_ids.bid;
  472. pf_info->pid = fault_ids.pid;
  473. pf_info->mid = fault_ids.mid;
  474. }
  475. #endif
  476. static int inline cam_subdev_list_cmp(struct cam_subdev *entry_1, struct cam_subdev *entry_2)
  477. {
  478. if (entry_1->close_seq_prior > entry_2->close_seq_prior)
  479. return 1;
  480. else if (entry_1->close_seq_prior < entry_2->close_seq_prior)
  481. return -1;
  482. else
  483. return 0;
  484. }
  485. #if (KERNEL_VERSION(5, 18, 0) <= LINUX_VERSION_CODE)
  486. int cam_compat_util_get_dmabuf_va(struct dma_buf *dmabuf, uintptr_t *vaddr)
  487. {
  488. struct iosys_map mapping;
  489. int error_code = dma_buf_vmap(dmabuf, &mapping);
  490. if (error_code) {
  491. *vaddr = 0;
  492. } else {
  493. *vaddr = (mapping.is_iomem) ?
  494. (uintptr_t)mapping.vaddr_iomem : (uintptr_t)mapping.vaddr;
  495. CAM_DBG(CAM_MEM,
  496. "dmabuf=%p, *vaddr=%p, is_iomem=%d, vaddr_iomem=%p, vaddr=%p",
  497. dmabuf, *vaddr, mapping.is_iomem, mapping.vaddr_iomem, mapping.vaddr);
  498. }
  499. return error_code;
  500. }
  501. void cam_compat_util_put_dmabuf_va(struct dma_buf *dmabuf, void *vaddr)
  502. {
  503. struct iosys_map mapping = IOSYS_MAP_INIT_VADDR(vaddr);
  504. dma_buf_vunmap(dmabuf, &mapping);
  505. }
  506. #elif (KERNEL_VERSION(5, 15, 0) <= LINUX_VERSION_CODE)
  507. int cam_compat_util_get_dmabuf_va(struct dma_buf *dmabuf, uintptr_t *vaddr)
  508. {
  509. struct dma_buf_map mapping;
  510. int error_code = dma_buf_vmap(dmabuf, &mapping);
  511. if (error_code) {
  512. *vaddr = 0;
  513. } else {
  514. *vaddr = (mapping.is_iomem) ?
  515. (uintptr_t)mapping.vaddr_iomem : (uintptr_t)mapping.vaddr;
  516. CAM_DBG(CAM_MEM,
  517. "dmabuf=%p, *vaddr=%p, is_iomem=%d, vaddr_iomem=%p, vaddr=%p",
  518. dmabuf, *vaddr, mapping.is_iomem, mapping.vaddr_iomem, mapping.vaddr);
  519. }
  520. return error_code;
  521. }
  522. void cam_compat_util_put_dmabuf_va(struct dma_buf *dmabuf, void *vaddr)
  523. {
  524. struct dma_buf_map mapping = DMA_BUF_MAP_INIT_VADDR(vaddr);
  525. dma_buf_vunmap(dmabuf, &mapping);
  526. }
  527. #else
  528. int cam_compat_util_get_dmabuf_va(struct dma_buf *dmabuf, uintptr_t *vaddr)
  529. {
  530. int error_code = 0;
  531. void *addr = dma_buf_vmap(dmabuf);
  532. if (!addr) {
  533. *vaddr = 0;
  534. error_code = -ENOSPC;
  535. } else {
  536. *vaddr = (uintptr_t)addr;
  537. }
  538. return error_code;
  539. }
  540. void cam_compat_util_put_dmabuf_va(struct dma_buf *dmabuf, void *vaddr)
  541. {
  542. dma_buf_vunmap(dmabuf, vaddr);
  543. }
  544. #endif
  545. #if (KERNEL_VERSION(5, 15, 0) <= LINUX_VERSION_CODE)
  546. void cam_smmu_util_iommu_custom(struct device *dev,
  547. dma_addr_t discard_start, size_t discard_length)
  548. {
  549. }
  550. int cam_req_mgr_ordered_list_cmp(void *priv,
  551. const struct list_head *head_1, const struct list_head *head_2)
  552. {
  553. return cam_subdev_list_cmp(list_entry(head_1, struct cam_subdev, list),
  554. list_entry(head_2, struct cam_subdev, list));
  555. }
  556. void cam_i3c_driver_remove(struct i3c_device *client)
  557. {
  558. CAM_DBG(CAM_SENSOR, "I3C remove invoked for %s",
  559. (client ? dev_name(&client->dev) : "none"));
  560. }
  561. #else
  562. void cam_smmu_util_iommu_custom(struct device *dev,
  563. dma_addr_t discard_start, size_t discard_length)
  564. {
  565. iommu_dma_enable_best_fit_algo(dev);
  566. if (discard_start)
  567. iommu_dma_reserve_iova(dev, discard_start, discard_length);
  568. return;
  569. }
  570. int cam_req_mgr_ordered_list_cmp(void *priv,
  571. struct list_head *head_1, struct list_head *head_2)
  572. {
  573. return cam_subdev_list_cmp(list_entry(head_1, struct cam_subdev, list),
  574. list_entry(head_2, struct cam_subdev, list));
  575. }
  576. int cam_i3c_driver_remove(struct i3c_device *client)
  577. {
  578. CAM_DBG(CAM_SENSOR, "I3C remove invoked for %s",
  579. (client ? dev_name(&client->dev) : "none"));
  580. return 0;
  581. }
  582. #endif
  583. #if (KERNEL_VERSION(5, 15, 0) <= LINUX_VERSION_CODE && \
  584. KERNEL_VERSION(5, 18, 0) > LINUX_VERSION_CODE)
  585. long cam_dma_buf_set_name(struct dma_buf *dmabuf, const char *name)
  586. {
  587. long ret = 0;
  588. ret = dma_buf_set_name(dmabuf, name);
  589. return ret;
  590. }
  591. #else
  592. long cam_dma_buf_set_name(struct dma_buf *dmabuf, const char *name)
  593. {
  594. return 0;
  595. }
  596. #endif
  597. #if KERNEL_VERSION(5, 18, 0) <= LINUX_VERSION_CODE
  598. void cam_eeprom_spi_driver_remove(struct spi_device *sdev)
  599. {
  600. struct v4l2_subdev *sd = spi_get_drvdata(sdev);
  601. struct cam_eeprom_ctrl_t *e_ctrl;
  602. struct cam_eeprom_soc_private *soc_private;
  603. if (!sd) {
  604. CAM_ERR(CAM_EEPROM, "Subdevice is NULL");
  605. return;
  606. }
  607. e_ctrl = (struct cam_eeprom_ctrl_t *)v4l2_get_subdevdata(sd);
  608. if (!e_ctrl) {
  609. CAM_ERR(CAM_EEPROM, "eeprom device is NULL");
  610. return;
  611. }
  612. mutex_lock(&(e_ctrl->eeprom_mutex));
  613. cam_eeprom_shutdown(e_ctrl);
  614. mutex_unlock(&(e_ctrl->eeprom_mutex));
  615. mutex_destroy(&(e_ctrl->eeprom_mutex));
  616. cam_unregister_subdev(&(e_ctrl->v4l2_dev_str));
  617. kfree(e_ctrl->io_master_info.spi_client);
  618. e_ctrl->io_master_info.spi_client = NULL;
  619. soc_private =
  620. (struct cam_eeprom_soc_private *)e_ctrl->soc_info.soc_private;
  621. if (soc_private) {
  622. kfree(soc_private->power_info.gpio_num_info);
  623. soc_private->power_info.gpio_num_info = NULL;
  624. kfree(soc_private);
  625. soc_private = NULL;
  626. }
  627. v4l2_set_subdevdata(&e_ctrl->v4l2_dev_str.sd, NULL);
  628. kfree(e_ctrl);
  629. }
  630. int cam_compat_util_get_irq(struct cam_hw_soc_info *soc_info)
  631. {
  632. int rc = 0;
  633. soc_info->irq_num[0] = platform_get_irq(soc_info->pdev, 0);
  634. if (soc_info->irq_num[0] < 0) {
  635. rc = soc_info->irq_num[0];
  636. return rc;
  637. }
  638. return rc;
  639. }
  640. #else
  641. int cam_eeprom_spi_driver_remove(struct spi_device *sdev)
  642. {
  643. struct v4l2_subdev *sd = spi_get_drvdata(sdev);
  644. struct cam_eeprom_ctrl_t *e_ctrl;
  645. struct cam_eeprom_soc_private *soc_private;
  646. struct cam_hw_soc_info *soc_info;
  647. if (!sd) {
  648. CAM_ERR(CAM_EEPROM, "Subdevice is NULL");
  649. return -EINVAL;
  650. }
  651. e_ctrl = (struct cam_eeprom_ctrl_t *)v4l2_get_subdevdata(sd);
  652. if (!e_ctrl) {
  653. CAM_ERR(CAM_EEPROM, "eeprom device is NULL");
  654. return -EINVAL;
  655. }
  656. soc_info = &e_ctrl->soc_info;
  657. mutex_lock(&(e_ctrl->eeprom_mutex));
  658. cam_eeprom_shutdown(e_ctrl);
  659. mutex_unlock(&(e_ctrl->eeprom_mutex));
  660. mutex_destroy(&(e_ctrl->eeprom_mutex));
  661. cam_unregister_subdev(&(e_ctrl->v4l2_dev_str));
  662. kfree(e_ctrl->io_master_info.spi_client);
  663. e_ctrl->io_master_info.spi_client = NULL;
  664. soc_private =
  665. (struct cam_eeprom_soc_private *)e_ctrl->soc_info.soc_private;
  666. if (soc_private) {
  667. kfree(soc_private->power_info.gpio_num_info);
  668. soc_private->power_info.gpio_num_info = NULL;
  669. kfree(soc_private);
  670. soc_private = NULL;
  671. }
  672. v4l2_set_subdevdata(&e_ctrl->v4l2_dev_str.sd, NULL);
  673. kfree(e_ctrl);
  674. return 0;
  675. }
  676. int cam_compat_util_get_irq(struct cam_hw_soc_info *soc_info)
  677. {
  678. int rc = 0, i;
  679. for (i = 0; i < soc_info->irq_count; i++) {
  680. soc_info->irq_line[i] = platform_get_resource_byname(soc_info->pdev,
  681. IORESOURCE_IRQ, soc_info->irq_name[i]);
  682. if (!soc_info->irq_line[i]) {
  683. CAM_ERR(CAM_UTIL, "Failed to get IRQ line for irq: %s of %s",
  684. soc_info->irq_name[i], soc_info->dev_name);
  685. rc = -ENODEV;
  686. return rc;
  687. }
  688. soc_info->irq_num[i] = soc_info->irq_line[i]->start;
  689. }
  690. return rc;
  691. }
  692. #endif
  693. bool cam_secure_get_vfe_fd_port_config(void)
  694. {
  695. #if KERNEL_VERSION(6, 0, 0) <= LINUX_VERSION_CODE
  696. return false;
  697. #else
  698. return true;
  699. #endif
  700. }