cam_compat.c 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2014-2021, The Linux Foundation. All rights reserved.
  4. * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved.
  5. */
  6. #include <linux/dma-mapping.h>
  7. #include <linux/dma-buf.h>
  8. #include <linux/of_address.h>
  9. #include <linux/slab.h>
  10. #include <soc/qcom/rpmh.h>
  11. #include <soc/qcom/socinfo.h>
  12. #include "cam_compat.h"
  13. #include "cam_debug_util.h"
  14. #include "cam_cpas_api.h"
  15. #include "camera_main.h"
  16. #include "cam_eeprom_dev.h"
  17. #include "cam_eeprom_core.h"
  18. #if IS_ENABLED(CONFIG_SPECTRA_USE_RPMH_DRV_API)
  19. #define CAM_RSC_DRV_IDENTIFIER "cam_rsc"
  20. const struct device *cam_cpas_get_rsc_dev_for_drv(uint32_t index)
  21. {
  22. const struct device *rsc_dev;
  23. rsc_dev = rpmh_get_device(CAM_RSC_DRV_IDENTIFIER, index);
  24. if (!rsc_dev) {
  25. CAM_ERR(CAM_CPAS, "Invalid dev for index: %u", index);
  26. return NULL;
  27. }
  28. return rsc_dev;
  29. }
  30. int cam_cpas_start_drv_for_dev(const struct device *dev)
  31. {
  32. int rc = 0;
  33. if (!dev) {
  34. CAM_ERR(CAM_CPAS, "Invalid dev for DRV enable");
  35. return -EINVAL;
  36. }
  37. rc = rpmh_drv_start(dev);
  38. if (rc) {
  39. CAM_ERR(CAM_CPAS, "[%s] Failed in DRV start", dev_name(dev));
  40. return rc;
  41. }
  42. return rc;
  43. }
  44. int cam_cpas_stop_drv_for_dev(const struct device *dev)
  45. {
  46. int rc = 0;
  47. if (!dev) {
  48. CAM_ERR(CAM_CPAS, "Invalid dev for DRV disable");
  49. return -EINVAL;
  50. }
  51. rc = rpmh_drv_stop(dev);
  52. if (rc) {
  53. CAM_ERR(CAM_CPAS, "[%s] Failed in DRV stop", dev_name(dev));
  54. return rc;
  55. }
  56. return rc;
  57. }
  58. int cam_cpas_drv_channel_switch_for_dev(const struct device *dev)
  59. {
  60. int rc = 0;
  61. if (!dev) {
  62. CAM_ERR(CAM_CPAS, "Invalid dev for DRV channel switch");
  63. return -EINVAL;
  64. }
  65. rc = rpmh_write_sleep_and_wake_no_child(dev);
  66. if (rc) {
  67. CAM_ERR(CAM_CPAS, "[%s] Failed in DRV channel switch", dev_name(dev));
  68. return rc;
  69. }
  70. return rc;
  71. }
  72. #else
  73. const struct device *cam_cpas_get_rsc_dev_for_drv(uint32_t index)
  74. {
  75. return NULL;
  76. }
  77. int cam_cpas_start_drv_for_dev(const struct device *dev)
  78. {
  79. return 0;
  80. }
  81. int cam_cpas_stop_drv_for_dev(const struct device *dev)
  82. {
  83. return 0;
  84. }
  85. int cam_cpas_drv_channel_switch_for_dev(const struct device *dev)
  86. {
  87. return 0;
  88. }
  89. #endif
  90. int cam_smmu_fetch_csf_version(struct cam_csf_version *csf_version)
  91. {
  92. #if KERNEL_VERSION(6, 0, 0) <= LINUX_VERSION_CODE
  93. struct csf_version csf_ver;
  94. int rc;
  95. /* Fetch CSF version from SMMU proxy driver */
  96. rc = smmu_proxy_get_csf_version(&csf_ver);
  97. if (rc) {
  98. CAM_ERR(CAM_SMMU,
  99. "Failed to get CSF version from SMMU proxy: %d", rc);
  100. return rc;
  101. }
  102. csf_version->arch_ver = csf_ver.arch_ver;
  103. csf_version->max_ver = csf_ver.max_ver;
  104. csf_version->min_ver = csf_ver.min_ver;
  105. #else
  106. /* This defaults to the legacy version */
  107. csf_version->arch_ver = 2;
  108. csf_version->max_ver = 0;
  109. csf_version->min_ver = 0;
  110. #endif
  111. return 0;
  112. }
  113. unsigned long cam_update_dma_map_attributes(unsigned long attrs)
  114. {
  115. #if KERNEL_VERSION(6, 0, 0) <= LINUX_VERSION_CODE
  116. attrs |= DMA_ATTR_QTI_SMMU_PROXY_MAP;
  117. #endif
  118. return attrs;
  119. }
  120. size_t cam_align_dma_buf_size(size_t len)
  121. {
  122. #if KERNEL_VERSION(6, 0, 0) <= LINUX_VERSION_CODE
  123. len = ALIGN(len, SMMU_PROXY_MEM_ALIGNMENT);
  124. #endif
  125. return len;
  126. }
  127. #if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 4, 0)
  128. int cam_reserve_icp_fw(struct cam_fw_alloc_info *icp_fw, size_t fw_length)
  129. {
  130. int rc = 0;
  131. struct device_node *of_node;
  132. struct device_node *mem_node;
  133. struct resource res;
  134. of_node = (icp_fw->fw_dev)->of_node;
  135. mem_node = of_parse_phandle(of_node, "memory-region", 0);
  136. if (!mem_node) {
  137. rc = -ENOMEM;
  138. CAM_ERR(CAM_SMMU, "FW memory carveout not found");
  139. goto end;
  140. }
  141. rc = of_address_to_resource(mem_node, 0, &res);
  142. of_node_put(mem_node);
  143. if (rc < 0) {
  144. CAM_ERR(CAM_SMMU, "Unable to get start of FW mem carveout");
  145. goto end;
  146. }
  147. icp_fw->fw_hdl = res.start;
  148. icp_fw->fw_kva = ioremap_wc(icp_fw->fw_hdl, fw_length);
  149. if (!icp_fw->fw_kva) {
  150. CAM_ERR(CAM_SMMU, "Failed to map the FW.");
  151. rc = -ENOMEM;
  152. goto end;
  153. }
  154. memset_io(icp_fw->fw_kva, 0, fw_length);
  155. end:
  156. return rc;
  157. }
  158. void cam_unreserve_icp_fw(struct cam_fw_alloc_info *icp_fw, size_t fw_length)
  159. {
  160. iounmap(icp_fw->fw_kva);
  161. }
  162. int cam_ife_notify_safe_lut_scm(bool safe_trigger)
  163. {
  164. const uint32_t smmu_se_ife = 0;
  165. uint32_t camera_hw_version, rc = 0;
  166. rc = cam_cpas_get_cpas_hw_version(&camera_hw_version);
  167. if (!rc) {
  168. switch (camera_hw_version) {
  169. case CAM_CPAS_TITAN_170_V100:
  170. case CAM_CPAS_TITAN_170_V110:
  171. case CAM_CPAS_TITAN_175_V100:
  172. if (qcom_scm_smmu_notify_secure_lut(smmu_se_ife, safe_trigger)) {
  173. CAM_ERR(CAM_ISP, "scm call to enable safe failed");
  174. rc = -EINVAL;
  175. }
  176. break;
  177. default:
  178. break;
  179. }
  180. }
  181. return rc;
  182. }
  183. void cam_cpastop_scm_write(struct cam_cpas_hw_errata_wa *errata_wa)
  184. {
  185. int reg_val;
  186. qcom_scm_io_readl(errata_wa->data.reg_info.offset, &reg_val);
  187. reg_val |= errata_wa->data.reg_info.value;
  188. qcom_scm_io_writel(errata_wa->data.reg_info.offset, reg_val);
  189. }
  190. static int camera_platform_compare_dev(struct device *dev, const void *data)
  191. {
  192. return platform_bus_type.match(dev, (struct device_driver *) data);
  193. }
  194. static int camera_i2c_compare_dev(struct device *dev, const void *data)
  195. {
  196. return i2c_bus_type.match(dev, (struct device_driver *) data);
  197. }
  198. #else
  199. int cam_reserve_icp_fw(struct cam_fw_alloc_info *icp_fw, size_t fw_length)
  200. {
  201. int rc = 0;
  202. icp_fw->fw_kva = dma_alloc_coherent(icp_fw->fw_dev, fw_length,
  203. &icp_fw->fw_hdl, GFP_KERNEL);
  204. if (!icp_fw->fw_kva) {
  205. CAM_ERR(CAM_SMMU, "FW memory alloc failed");
  206. rc = -ENOMEM;
  207. }
  208. return rc;
  209. }
  210. void cam_unreserve_icp_fw(struct cam_fw_alloc_info *icp_fw, size_t fw_length)
  211. {
  212. dma_free_coherent(icp_fw->fw_dev, fw_length, icp_fw->fw_kva,
  213. icp_fw->fw_hdl);
  214. }
  215. int cam_ife_notify_safe_lut_scm(bool safe_trigger)
  216. {
  217. const uint32_t smmu_se_ife = 0;
  218. uint32_t camera_hw_version, rc = 0;
  219. struct scm_desc description = {
  220. .arginfo = SCM_ARGS(2, SCM_VAL, SCM_VAL),
  221. .args[0] = smmu_se_ife,
  222. .args[1] = safe_trigger,
  223. };
  224. rc = cam_cpas_get_cpas_hw_version(&camera_hw_version);
  225. if (!rc) {
  226. switch (camera_hw_version) {
  227. case CAM_CPAS_TITAN_170_V100:
  228. case CAM_CPAS_TITAN_170_V110:
  229. case CAM_CPAS_TITAN_175_V100:
  230. if (scm_call2(SCM_SIP_FNID(0x15, 0x3), &description)) {
  231. CAM_ERR(CAM_ISP, "scm call to enable safe failed");
  232. rc = -EINVAL;
  233. }
  234. break;
  235. default:
  236. break;
  237. }
  238. }
  239. return rc;
  240. }
  241. void cam_cpastop_scm_write(struct cam_cpas_hw_errata_wa *errata_wa)
  242. {
  243. int reg_val;
  244. reg_val = scm_io_read(errata_wa->data.reg_info.offset);
  245. reg_val |= errata_wa->data.reg_info.value;
  246. scm_io_write(errata_wa->data.reg_info.offset, reg_val);
  247. }
  248. static int camera_platform_compare_dev(struct device *dev, void *data)
  249. {
  250. return platform_bus_type.match(dev, (struct device_driver *) data);
  251. }
  252. static int camera_i2c_compare_dev(struct device *dev, void *data)
  253. {
  254. return i2c_bus_type.match(dev, (struct device_driver *) data);
  255. }
  256. #endif
  257. #if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 10, 0)
  258. void cam_free_clear(const void * ptr)
  259. {
  260. kfree_sensitive(ptr);
  261. }
  262. #else
  263. void cam_free_clear(const void * ptr)
  264. {
  265. kzfree(ptr);
  266. }
  267. #endif
  268. #ifdef CONFIG_CSF_2_5_SECURE_CAMERA
  269. int cam_csiphy_notify_secure_mode(struct csiphy_device *csiphy_dev,
  270. bool protect, int32_t offset, bool is_shutdown)
  271. {
  272. int rc = 0;
  273. struct Object client_env, sc_object;
  274. ITCDriverSensorInfo params = {0};
  275. struct cam_csiphy_secure_info *secure_info;
  276. if (offset >= CSIPHY_MAX_INSTANCES_PER_PHY) {
  277. CAM_ERR(CAM_CSIPHY, "Invalid CSIPHY offset");
  278. return -EINVAL;
  279. }
  280. if (!is_shutdown) {
  281. rc = get_client_env_object(&client_env);
  282. if (rc) {
  283. CAM_ERR(CAM_CSIPHY, "Failed getting mink env object, rc: %d", rc);
  284. return rc;
  285. }
  286. rc = IClientEnv_open(client_env, CTrustedCameraDriver_UID, &sc_object);
  287. if (rc) {
  288. CAM_ERR(CAM_CSIPHY, "Failed getting mink sc_object, rc: %d", rc);
  289. return rc;
  290. }
  291. secure_info = &csiphy_dev->csiphy_info[offset].secure_info;
  292. params.csid_hw_idx_mask = secure_info->csid_hw_idx_mask;
  293. params.cdm_hw_idx_mask = secure_info->cdm_hw_idx_mask;
  294. params.vc_mask = secure_info->vc_mask;
  295. params.phy_lane_sel_mask =
  296. csiphy_dev->csiphy_info[offset].csiphy_phy_lane_sel_mask;
  297. params.protect = protect ? 1 : 0;
  298. rc = ITrustedCameraDriver_dynamicProtectSensor(sc_object, &params);
  299. if (rc) {
  300. CAM_ERR(CAM_CSIPHY, "Mink secure call failed, rc: %d", rc);
  301. return rc;
  302. }
  303. rc = Object_release(sc_object);
  304. if (rc) {
  305. CAM_ERR(CAM_CSIPHY, "Failed releasing secure camera object, rc: %d", rc);
  306. return rc;
  307. }
  308. rc = Object_release(client_env);
  309. if (rc) {
  310. CAM_ERR(CAM_CSIPHY, "Failed releasing mink env object, rc: %d", rc);
  311. return rc;
  312. }
  313. } else {
  314. /* This is a temporary work around until the SMC Invoke driver is
  315. * refactored to avoid the dependency on FDs, which was causing issues
  316. * during process shutdown.
  317. */
  318. rc = qcom_scm_camera_protect_phy_lanes(protect, 0);
  319. if (rc) {
  320. CAM_ERR(CAM_CSIPHY, "SCM call to hypervisor failed");
  321. return rc;
  322. }
  323. }
  324. return 0;
  325. }
  326. #elif KERNEL_VERSION(5, 4, 0) <= LINUX_VERSION_CODE
  327. int cam_csiphy_notify_secure_mode(struct csiphy_device *csiphy_dev,
  328. bool protect, int32_t offset, bool __always_unused is_shutdown)
  329. {
  330. int rc = 0;
  331. /**
  332. * A check here is made if the target is using
  333. * an older version of the kernel driver (< 6.0)
  334. * with domain id feature present. In this case,
  335. * we are to fail this call, as the new mink call
  336. * is only supported on kernel driver versions 6.0
  337. * and above, and the new domain id scheme is not
  338. * backwards compatible with the older scheme.
  339. */
  340. if (csiphy_dev->domain_id_security) {
  341. CAM_ERR(CAM_CSIPHY,
  342. "Domain id support not present on current kernel driver: %d",
  343. LINUX_VERSION_CODE);
  344. return -EINVAL;
  345. }
  346. if (offset >= CSIPHY_MAX_INSTANCES_PER_PHY) {
  347. CAM_ERR(CAM_CSIPHY, "Invalid CSIPHY offset");
  348. rc = -EINVAL;
  349. } else if (qcom_scm_camera_protect_phy_lanes(protect,
  350. csiphy_dev->csiphy_info[offset].csiphy_cpas_cp_reg_mask)) {
  351. CAM_ERR(CAM_CSIPHY, "SCM call to hypervisor failed");
  352. rc = -EINVAL;
  353. }
  354. return rc;
  355. }
  356. #else
  357. int cam_csiphy_notify_secure_mode(struct csiphy_device *csiphy_dev,
  358. bool protect, int32_t offset, bool __always_unused is_shutdown)
  359. {
  360. int rc = 0;
  361. struct scm_desc description = {
  362. .arginfo = SCM_ARGS(2, SCM_VAL, SCM_VAL),
  363. .args[0] = protect,
  364. .args[1] = csiphy_dev->csiphy_info[offset]
  365. .csiphy_cpas_cp_reg_mask,
  366. };
  367. if (offset >= CSIPHY_MAX_INSTANCES_PER_PHY) {
  368. CAM_ERR(CAM_CSIPHY, "Invalid CSIPHY offset");
  369. rc = -EINVAL;
  370. } else if (scm_call2(SCM_SIP_FNID(0x18, 0x7), &description)) {
  371. CAM_ERR(CAM_CSIPHY, "SCM call to hypervisor failed");
  372. rc = -EINVAL;
  373. }
  374. return rc;
  375. }
  376. #endif
  377. int cam_update_camnoc_qos_settings(uint32_t use_case_id,
  378. uint32_t qos_cnt, struct qcom_scm_camera_qos *scm_buf)
  379. {
  380. int rc = 0;
  381. rc = qcom_scm_camera_update_camnoc_qos(use_case_id, qos_cnt, scm_buf);
  382. if (rc) {
  383. CAM_ERR(CAM_ISP, "scm call to update QoS failed: %d", rc);
  384. rc = -EINVAL;
  385. }
  386. return rc;
  387. }
  388. /* Callback to compare device from match list before adding as component */
  389. static inline int camera_component_compare_dev(struct device *dev, void *data)
  390. {
  391. return dev == data;
  392. }
  393. /* Add component matches to list for master of aggregate driver */
  394. int camera_component_match_add_drivers(struct device *master_dev,
  395. struct component_match **match_list)
  396. {
  397. int i, rc = 0;
  398. struct platform_device *pdev = NULL;
  399. struct i2c_client *client = NULL;
  400. struct device *start_dev = NULL, *match_dev = NULL;
  401. if (!master_dev || !match_list) {
  402. CAM_ERR(CAM_UTIL, "Invalid parameters for component match add");
  403. rc = -EINVAL;
  404. goto end;
  405. }
  406. for (i = 0; i < ARRAY_SIZE(cam_component_platform_drivers); i++) {
  407. #if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 4, 0)
  408. struct device_driver const *drv =
  409. &cam_component_platform_drivers[i]->driver;
  410. const void *drv_ptr = (const void *)drv;
  411. #else
  412. struct device_driver *drv = &cam_component_platform_drivers[i]->driver;
  413. void *drv_ptr = (void *)drv;
  414. #endif
  415. start_dev = NULL;
  416. while ((match_dev = bus_find_device(&platform_bus_type,
  417. start_dev, drv_ptr, &camera_platform_compare_dev))) {
  418. put_device(start_dev);
  419. pdev = to_platform_device(match_dev);
  420. CAM_DBG(CAM_UTIL, "Adding matched component:%s", pdev->name);
  421. component_match_add(master_dev, match_list,
  422. camera_component_compare_dev, match_dev);
  423. start_dev = match_dev;
  424. }
  425. put_device(start_dev);
  426. }
  427. for (i = 0; i < ARRAY_SIZE(cam_component_i2c_drivers); i++) {
  428. #if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 4, 0)
  429. struct device_driver const *drv =
  430. &cam_component_i2c_drivers[i]->driver;
  431. const void *drv_ptr = (const void *)drv;
  432. #else
  433. struct device_driver *drv = &cam_component_i2c_drivers[i]->driver;
  434. void *drv_ptr = (void *)drv;
  435. #endif
  436. start_dev = NULL;
  437. while ((match_dev = bus_find_device(&i2c_bus_type,
  438. start_dev, drv_ptr, &camera_i2c_compare_dev))) {
  439. put_device(start_dev);
  440. client = to_i2c_client(match_dev);
  441. CAM_DBG(CAM_UTIL, "Adding matched component:%s", client->name);
  442. component_match_add(master_dev, match_list,
  443. camera_component_compare_dev, match_dev);
  444. start_dev = match_dev;
  445. }
  446. put_device(start_dev);
  447. }
  448. end:
  449. return rc;
  450. }
  451. #if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 10, 0)
  452. #include <linux/qcom-iommu-util.h>
  453. void cam_check_iommu_faults(struct iommu_domain *domain,
  454. struct cam_smmu_pf_info *pf_info)
  455. {
  456. struct qcom_iommu_fault_ids fault_ids = {0, 0, 0};
  457. if (qcom_iommu_get_fault_ids(domain, &fault_ids))
  458. CAM_ERR(CAM_SMMU, "Cannot get smmu fault ids");
  459. else
  460. CAM_ERR(CAM_SMMU, "smmu fault ids bid:%d pid:%d mid:%d",
  461. fault_ids.bid, fault_ids.pid, fault_ids.mid);
  462. pf_info->bid = fault_ids.bid;
  463. pf_info->pid = fault_ids.pid;
  464. pf_info->mid = fault_ids.mid;
  465. }
  466. #else
  467. void cam_check_iommu_faults(struct iommu_domain *domain,
  468. struct cam_smmu_pf_info *pf_info)
  469. {
  470. struct iommu_fault_ids fault_ids = {0, 0, 0};
  471. if (iommu_get_fault_ids(domain, &fault_ids))
  472. CAM_ERR(CAM_SMMU, "Error: Can not get smmu fault ids");
  473. CAM_ERR(CAM_SMMU, "smmu fault ids bid:%d pid:%d mid:%d",
  474. fault_ids.bid, fault_ids.pid, fault_ids.mid);
  475. pf_info->bid = fault_ids.bid;
  476. pf_info->pid = fault_ids.pid;
  477. pf_info->mid = fault_ids.mid;
  478. }
  479. #endif
  480. static int inline cam_subdev_list_cmp(struct cam_subdev *entry_1, struct cam_subdev *entry_2)
  481. {
  482. if (entry_1->close_seq_prior > entry_2->close_seq_prior)
  483. return 1;
  484. else if (entry_1->close_seq_prior < entry_2->close_seq_prior)
  485. return -1;
  486. else
  487. return 0;
  488. }
  489. #if (KERNEL_VERSION(5, 18, 0) <= LINUX_VERSION_CODE)
  490. int cam_compat_util_get_dmabuf_va(struct dma_buf *dmabuf, uintptr_t *vaddr)
  491. {
  492. struct iosys_map mapping = {0};
  493. int error_code = dma_buf_vmap(dmabuf, &mapping);
  494. if (error_code) {
  495. *vaddr = 0;
  496. } else {
  497. *vaddr = (mapping.is_iomem) ?
  498. (uintptr_t)mapping.vaddr_iomem : (uintptr_t)mapping.vaddr;
  499. CAM_DBG(CAM_MEM,
  500. "dmabuf=%p, *vaddr=%p, is_iomem=%d, vaddr_iomem=%p, vaddr=%p",
  501. dmabuf, *vaddr, mapping.is_iomem, mapping.vaddr_iomem, mapping.vaddr);
  502. }
  503. return error_code;
  504. }
  505. void cam_compat_util_put_dmabuf_va(struct dma_buf *dmabuf, void *vaddr)
  506. {
  507. struct iosys_map mapping = IOSYS_MAP_INIT_VADDR(vaddr);
  508. dma_buf_vunmap(dmabuf, &mapping);
  509. }
  510. #elif (KERNEL_VERSION(5, 15, 0) <= LINUX_VERSION_CODE)
  511. int cam_compat_util_get_dmabuf_va(struct dma_buf *dmabuf, uintptr_t *vaddr)
  512. {
  513. struct dma_buf_map mapping;
  514. int error_code = dma_buf_vmap(dmabuf, &mapping);
  515. if (error_code) {
  516. *vaddr = 0;
  517. } else {
  518. *vaddr = (mapping.is_iomem) ?
  519. (uintptr_t)mapping.vaddr_iomem : (uintptr_t)mapping.vaddr;
  520. CAM_DBG(CAM_MEM,
  521. "dmabuf=%p, *vaddr=%p, is_iomem=%d, vaddr_iomem=%p, vaddr=%p",
  522. dmabuf, *vaddr, mapping.is_iomem, mapping.vaddr_iomem, mapping.vaddr);
  523. }
  524. return error_code;
  525. }
  526. void cam_compat_util_put_dmabuf_va(struct dma_buf *dmabuf, void *vaddr)
  527. {
  528. struct dma_buf_map mapping = DMA_BUF_MAP_INIT_VADDR(vaddr);
  529. dma_buf_vunmap(dmabuf, &mapping);
  530. }
  531. #else
  532. int cam_compat_util_get_dmabuf_va(struct dma_buf *dmabuf, uintptr_t *vaddr)
  533. {
  534. int error_code = 0;
  535. void *addr = dma_buf_vmap(dmabuf);
  536. if (!addr) {
  537. *vaddr = 0;
  538. error_code = -ENOSPC;
  539. } else {
  540. *vaddr = (uintptr_t)addr;
  541. }
  542. return error_code;
  543. }
  544. void cam_compat_util_put_dmabuf_va(struct dma_buf *dmabuf, void *vaddr)
  545. {
  546. dma_buf_vunmap(dmabuf, vaddr);
  547. }
  548. #endif
  549. #if (KERNEL_VERSION(5, 15, 0) <= LINUX_VERSION_CODE)
  550. void cam_smmu_util_iommu_custom(struct device *dev,
  551. dma_addr_t discard_start, size_t discard_length)
  552. {
  553. }
  554. int cam_req_mgr_ordered_list_cmp(void *priv,
  555. const struct list_head *head_1, const struct list_head *head_2)
  556. {
  557. return cam_subdev_list_cmp(list_entry(head_1, struct cam_subdev, list),
  558. list_entry(head_2, struct cam_subdev, list));
  559. }
  560. void cam_i3c_driver_remove(struct i3c_device *client)
  561. {
  562. CAM_DBG(CAM_SENSOR, "I3C remove invoked for %s",
  563. (client ? dev_name(&client->dev) : "none"));
  564. }
  565. #else
  566. void cam_smmu_util_iommu_custom(struct device *dev,
  567. dma_addr_t discard_start, size_t discard_length)
  568. {
  569. iommu_dma_enable_best_fit_algo(dev);
  570. if (discard_start)
  571. iommu_dma_reserve_iova(dev, discard_start, discard_length);
  572. return;
  573. }
  574. int cam_req_mgr_ordered_list_cmp(void *priv,
  575. struct list_head *head_1, struct list_head *head_2)
  576. {
  577. return cam_subdev_list_cmp(list_entry(head_1, struct cam_subdev, list),
  578. list_entry(head_2, struct cam_subdev, list));
  579. }
  580. int cam_i3c_driver_remove(struct i3c_device *client)
  581. {
  582. CAM_DBG(CAM_SENSOR, "I3C remove invoked for %s",
  583. (client ? dev_name(&client->dev) : "none"));
  584. return 0;
  585. }
  586. #endif
  587. #if (KERNEL_VERSION(5, 10, 0) <= LINUX_VERSION_CODE && \
  588. KERNEL_VERSION(6, 6, 0) > LINUX_VERSION_CODE)
  589. long cam_dma_buf_set_name(struct dma_buf *dmabuf, const char *name)
  590. {
  591. long ret = 0;
  592. ret = dma_buf_set_name(dmabuf, name);
  593. return ret;
  594. }
  595. #else
  596. long cam_dma_buf_set_name(struct dma_buf *dmabuf, const char *name)
  597. {
  598. return 0;
  599. }
  600. #endif
  601. #if KERNEL_VERSION(5, 18, 0) <= LINUX_VERSION_CODE
  602. void cam_eeprom_spi_driver_remove(struct spi_device *sdev)
  603. {
  604. struct v4l2_subdev *sd = spi_get_drvdata(sdev);
  605. struct cam_eeprom_ctrl_t *e_ctrl;
  606. struct cam_eeprom_soc_private *soc_private;
  607. if (!sd) {
  608. CAM_ERR(CAM_EEPROM, "Subdevice is NULL");
  609. return;
  610. }
  611. e_ctrl = (struct cam_eeprom_ctrl_t *)v4l2_get_subdevdata(sd);
  612. if (!e_ctrl) {
  613. CAM_ERR(CAM_EEPROM, "eeprom device is NULL");
  614. return;
  615. }
  616. mutex_lock(&(e_ctrl->eeprom_mutex));
  617. cam_eeprom_shutdown(e_ctrl);
  618. mutex_unlock(&(e_ctrl->eeprom_mutex));
  619. mutex_destroy(&(e_ctrl->eeprom_mutex));
  620. cam_unregister_subdev(&(e_ctrl->v4l2_dev_str));
  621. kfree(e_ctrl->io_master_info.spi_client);
  622. e_ctrl->io_master_info.spi_client = NULL;
  623. soc_private =
  624. (struct cam_eeprom_soc_private *)e_ctrl->soc_info.soc_private;
  625. if (soc_private) {
  626. kfree(soc_private->power_info.gpio_num_info);
  627. soc_private->power_info.gpio_num_info = NULL;
  628. kfree(soc_private);
  629. soc_private = NULL;
  630. }
  631. v4l2_set_subdevdata(&e_ctrl->v4l2_dev_str.sd, NULL);
  632. kfree(e_ctrl);
  633. }
  634. int cam_compat_util_get_irq(struct cam_hw_soc_info *soc_info)
  635. {
  636. int rc = 0;
  637. soc_info->irq_num[0] = platform_get_irq(soc_info->pdev, 0);
  638. if (soc_info->irq_num[0] < 0) {
  639. rc = soc_info->irq_num[0];
  640. return rc;
  641. }
  642. return rc;
  643. }
  644. #else
  645. int cam_eeprom_spi_driver_remove(struct spi_device *sdev)
  646. {
  647. struct v4l2_subdev *sd = spi_get_drvdata(sdev);
  648. struct cam_eeprom_ctrl_t *e_ctrl;
  649. struct cam_eeprom_soc_private *soc_private;
  650. struct cam_hw_soc_info *soc_info;
  651. if (!sd) {
  652. CAM_ERR(CAM_EEPROM, "Subdevice is NULL");
  653. return -EINVAL;
  654. }
  655. e_ctrl = (struct cam_eeprom_ctrl_t *)v4l2_get_subdevdata(sd);
  656. if (!e_ctrl) {
  657. CAM_ERR(CAM_EEPROM, "eeprom device is NULL");
  658. return -EINVAL;
  659. }
  660. soc_info = &e_ctrl->soc_info;
  661. mutex_lock(&(e_ctrl->eeprom_mutex));
  662. cam_eeprom_shutdown(e_ctrl);
  663. mutex_unlock(&(e_ctrl->eeprom_mutex));
  664. mutex_destroy(&(e_ctrl->eeprom_mutex));
  665. cam_unregister_subdev(&(e_ctrl->v4l2_dev_str));
  666. kfree(e_ctrl->io_master_info.spi_client);
  667. e_ctrl->io_master_info.spi_client = NULL;
  668. soc_private =
  669. (struct cam_eeprom_soc_private *)e_ctrl->soc_info.soc_private;
  670. if (soc_private) {
  671. kfree(soc_private->power_info.gpio_num_info);
  672. soc_private->power_info.gpio_num_info = NULL;
  673. kfree(soc_private);
  674. soc_private = NULL;
  675. }
  676. v4l2_set_subdevdata(&e_ctrl->v4l2_dev_str.sd, NULL);
  677. kfree(e_ctrl);
  678. return 0;
  679. }
  680. int cam_compat_util_get_irq(struct cam_hw_soc_info *soc_info)
  681. {
  682. int rc = 0, i;
  683. for (i = 0; i < soc_info->irq_count; i++) {
  684. soc_info->irq_line[i] = platform_get_resource_byname(soc_info->pdev,
  685. IORESOURCE_IRQ, soc_info->irq_name[i]);
  686. if (!soc_info->irq_line[i]) {
  687. CAM_ERR(CAM_UTIL, "Failed to get IRQ line for irq: %s of %s",
  688. soc_info->irq_name[i], soc_info->dev_name);
  689. rc = -ENODEV;
  690. return rc;
  691. }
  692. soc_info->irq_num[i] = soc_info->irq_line[i]->start;
  693. }
  694. return rc;
  695. }
  696. #endif
  697. bool cam_secure_get_vfe_fd_port_config(void)
  698. {
  699. #if KERNEL_VERSION(6, 0, 0) <= LINUX_VERSION_CODE
  700. return false;
  701. #else
  702. return true;
  703. #endif
  704. }
  705. #if KERNEL_VERSION(5, 10, 0) <= LINUX_VERSION_CODE
  706. int cam_get_subpart_info(uint32_t *part_info, uint32_t max_num_cam)
  707. {
  708. int rc = 0;
  709. int num_cam;
  710. num_cam = socinfo_get_part_count(PART_CAMERA);
  711. if (num_cam != max_num_cam) {
  712. CAM_ERR(CAM_CPAS, "Unsupported number of parts: %d", num_cam);
  713. return -EINVAL;
  714. }
  715. /*
  716. * If bit value in part_info is "0" then HW is available.
  717. * If bit value in part_info is "1" then HW is unavailable.
  718. */
  719. rc = socinfo_get_subpart_info(PART_CAMERA, part_info, num_cam);
  720. if (rc) {
  721. CAM_ERR(CAM_CPAS, "Failed while getting subpart_info, rc = %d.", rc);
  722. return rc;
  723. }
  724. return 0;
  725. }
  726. #else
  727. int cam_get_subpart_info(uint32_t *part_info, uint32_t max_num_cam)
  728. {
  729. return 0;
  730. }
  731. #endif