cam_compat.c 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2014-2021, The Linux Foundation. All rights reserved.
  4. * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
  5. */
  6. #include <linux/dma-mapping.h>
  7. #include <linux/dma-buf.h>
  8. #include <linux/of_address.h>
  9. #include <linux/slab.h>
  10. #include <soc/qcom/rpmh.h>
  11. #include "cam_compat.h"
  12. #include "cam_debug_util.h"
  13. #include "cam_cpas_api.h"
  14. #include "camera_main.h"
  15. #include "cam_eeprom_dev.h"
  16. #include "cam_eeprom_core.h"
  17. #if IS_ENABLED(CONFIG_USE_RPMH_DRV_API)
  18. #define CAM_RSC_DRV_IDENTIFIER "cam_rsc"
  19. const struct device *cam_cpas_get_rsc_dev_for_drv(uint32_t index)
  20. {
  21. const struct device *rsc_dev;
  22. rsc_dev = rpmh_get_device(CAM_RSC_DRV_IDENTIFIER, index);
  23. if (!rsc_dev) {
  24. CAM_ERR(CAM_CPAS, "Invalid dev for index: %u", index);
  25. return NULL;
  26. }
  27. return rsc_dev;
  28. }
  29. int cam_cpas_start_drv_for_dev(const struct device *dev)
  30. {
  31. int rc = 0;
  32. if (!dev) {
  33. CAM_ERR(CAM_CPAS, "Invalid dev for DRV enable");
  34. return -EINVAL;
  35. }
  36. rc = rpmh_drv_start(dev);
  37. if (rc) {
  38. CAM_ERR(CAM_CPAS, "[%s] Failed in DRV start", dev_name(dev));
  39. return rc;
  40. }
  41. return rc;
  42. }
  43. int cam_cpas_stop_drv_for_dev(const struct device *dev)
  44. {
  45. int rc = 0;
  46. if (!dev) {
  47. CAM_ERR(CAM_CPAS, "Invalid dev for DRV disable");
  48. return -EINVAL;
  49. }
  50. rc = rpmh_drv_stop(dev);
  51. if (rc) {
  52. CAM_ERR(CAM_CPAS, "[%s] Failed in DRV stop", dev_name(dev));
  53. return rc;
  54. }
  55. return rc;
  56. }
  57. int cam_cpas_drv_channel_switch_for_dev(const struct device *dev)
  58. {
  59. int rc = 0;
  60. if (!dev) {
  61. CAM_ERR(CAM_CPAS, "Invalid dev for DRV channel switch");
  62. return -EINVAL;
  63. }
  64. rc = rpmh_write_sleep_and_wake_no_child(dev);
  65. if (rc) {
  66. CAM_ERR(CAM_CPAS, "[%s] Failed in DRV channel switch", dev_name(dev));
  67. return rc;
  68. }
  69. return rc;
  70. }
  71. #else
  72. const struct device *cam_cpas_get_rsc_dev_for_drv(uint32_t index)
  73. {
  74. return NULL;
  75. }
  76. int cam_cpas_start_drv_for_dev(const struct device *dev)
  77. {
  78. return 0;
  79. }
  80. int cam_cpas_stop_drv_for_dev(const struct device *dev)
  81. {
  82. return 0;
  83. }
  84. int cam_cpas_drv_channel_switch_for_dev(const struct device *dev)
  85. {
  86. return 0;
  87. }
  88. #endif
  89. #if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 4, 0)
  90. int cam_reserve_icp_fw(struct cam_fw_alloc_info *icp_fw, size_t fw_length)
  91. {
  92. int rc = 0;
  93. struct device_node *of_node;
  94. struct device_node *mem_node;
  95. struct resource res;
  96. of_node = (icp_fw->fw_dev)->of_node;
  97. mem_node = of_parse_phandle(of_node, "memory-region", 0);
  98. if (!mem_node) {
  99. rc = -ENOMEM;
  100. CAM_ERR(CAM_SMMU, "FW memory carveout not found");
  101. goto end;
  102. }
  103. rc = of_address_to_resource(mem_node, 0, &res);
  104. of_node_put(mem_node);
  105. if (rc < 0) {
  106. CAM_ERR(CAM_SMMU, "Unable to get start of FW mem carveout");
  107. goto end;
  108. }
  109. icp_fw->fw_hdl = res.start;
  110. icp_fw->fw_kva = ioremap_wc(icp_fw->fw_hdl, fw_length);
  111. if (!icp_fw->fw_kva) {
  112. CAM_ERR(CAM_SMMU, "Failed to map the FW.");
  113. rc = -ENOMEM;
  114. goto end;
  115. }
  116. memset_io(icp_fw->fw_kva, 0, fw_length);
  117. end:
  118. return rc;
  119. }
  120. void cam_unreserve_icp_fw(struct cam_fw_alloc_info *icp_fw, size_t fw_length)
  121. {
  122. iounmap(icp_fw->fw_kva);
  123. }
  124. int cam_ife_notify_safe_lut_scm(bool safe_trigger)
  125. {
  126. const uint32_t smmu_se_ife = 0;
  127. uint32_t camera_hw_version, rc = 0;
  128. rc = cam_cpas_get_cpas_hw_version(&camera_hw_version);
  129. if (!rc && qcom_scm_smmu_notify_secure_lut(smmu_se_ife, safe_trigger)) {
  130. switch (camera_hw_version) {
  131. case CAM_CPAS_TITAN_170_V100:
  132. case CAM_CPAS_TITAN_170_V110:
  133. case CAM_CPAS_TITAN_175_V100:
  134. CAM_ERR(CAM_ISP, "scm call to enable safe failed");
  135. rc = -EINVAL;
  136. break;
  137. default:
  138. break;
  139. }
  140. }
  141. return rc;
  142. }
  143. void cam_cpastop_scm_write(struct cam_cpas_hw_errata_wa *errata_wa)
  144. {
  145. int reg_val;
  146. qcom_scm_io_readl(errata_wa->data.reg_info.offset, &reg_val);
  147. reg_val |= errata_wa->data.reg_info.value;
  148. qcom_scm_io_writel(errata_wa->data.reg_info.offset, reg_val);
  149. }
  150. static int camera_platform_compare_dev(struct device *dev, const void *data)
  151. {
  152. return platform_bus_type.match(dev, (struct device_driver *) data);
  153. }
  154. static int camera_i2c_compare_dev(struct device *dev, const void *data)
  155. {
  156. return i2c_bus_type.match(dev, (struct device_driver *) data);
  157. }
  158. #else
  159. int cam_reserve_icp_fw(struct cam_fw_alloc_info *icp_fw, size_t fw_length)
  160. {
  161. int rc = 0;
  162. icp_fw->fw_kva = dma_alloc_coherent(icp_fw->fw_dev, fw_length,
  163. &icp_fw->fw_hdl, GFP_KERNEL);
  164. if (!icp_fw->fw_kva) {
  165. CAM_ERR(CAM_SMMU, "FW memory alloc failed");
  166. rc = -ENOMEM;
  167. }
  168. return rc;
  169. }
  170. void cam_unreserve_icp_fw(struct cam_fw_alloc_info *icp_fw, size_t fw_length)
  171. {
  172. dma_free_coherent(icp_fw->fw_dev, fw_length, icp_fw->fw_kva,
  173. icp_fw->fw_hdl);
  174. }
  175. int cam_ife_notify_safe_lut_scm(bool safe_trigger)
  176. {
  177. const uint32_t smmu_se_ife = 0;
  178. uint32_t camera_hw_version, rc = 0;
  179. struct scm_desc description = {
  180. .arginfo = SCM_ARGS(2, SCM_VAL, SCM_VAL),
  181. .args[0] = smmu_se_ife,
  182. .args[1] = safe_trigger,
  183. };
  184. rc = cam_cpas_get_cpas_hw_version(&camera_hw_version);
  185. if (!rc && scm_call2(SCM_SIP_FNID(0x15, 0x3), &description)) {
  186. switch (camera_hw_version) {
  187. case CAM_CPAS_TITAN_170_V100:
  188. case CAM_CPAS_TITAN_170_V110:
  189. case CAM_CPAS_TITAN_175_V100:
  190. CAM_ERR(CAM_ISP, "scm call to enable safe failed");
  191. rc = -EINVAL;
  192. break;
  193. default:
  194. break;
  195. }
  196. }
  197. return rc;
  198. }
  199. void cam_cpastop_scm_write(struct cam_cpas_hw_errata_wa *errata_wa)
  200. {
  201. int reg_val;
  202. reg_val = scm_io_read(errata_wa->data.reg_info.offset);
  203. reg_val |= errata_wa->data.reg_info.value;
  204. scm_io_write(errata_wa->data.reg_info.offset, reg_val);
  205. }
  206. static int camera_platform_compare_dev(struct device *dev, void *data)
  207. {
  208. return platform_bus_type.match(dev, (struct device_driver *) data);
  209. }
  210. static int camera_i2c_compare_dev(struct device *dev, void *data)
  211. {
  212. return i2c_bus_type.match(dev, (struct device_driver *) data);
  213. }
  214. #endif
  215. #if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 10, 0)
  216. void cam_free_clear(const void * ptr)
  217. {
  218. kfree_sensitive(ptr);
  219. }
  220. #else
  221. void cam_free_clear(const void * ptr)
  222. {
  223. kzfree(ptr);
  224. }
  225. #endif
  226. #if KERNEL_VERSION(6, 0, 0) <= LINUX_VERSION_CODE
  227. int cam_csiphy_notify_secure_mode(struct csiphy_device *csiphy_dev,
  228. bool protect, int32_t offset)
  229. {
  230. int rc = 0;
  231. struct Object client_env, sc_object;
  232. ITCDriverSensorInfo params = {0};
  233. struct cam_csiphy_secure_info *secure_info;
  234. if (offset >= CSIPHY_MAX_INSTANCES_PER_PHY) {
  235. CAM_ERR(CAM_CSIPHY, "Invalid CSIPHY offset");
  236. return -EINVAL;
  237. }
  238. rc = get_client_env_object(&client_env);
  239. if (rc) {
  240. CAM_ERR(CAM_CSIPHY, "Failed getting mink env object, rc: %d", rc);
  241. return rc;
  242. }
  243. rc = IClientEnv_open(client_env, CTrustedCameraDriver_UID, &sc_object);
  244. if (rc) {
  245. CAM_ERR(CAM_CSIPHY, "Failed getting mink sc_object, rc: %d", rc);
  246. return rc;
  247. }
  248. secure_info = &csiphy_dev->csiphy_info[offset].secure_info;
  249. params.csid_hw_idx_mask = secure_info->csid_hw_idx_mask;
  250. params.cdm_hw_idx_mask = secure_info->cdm_hw_idx_mask;
  251. params.vc_mask = secure_info->vc_mask;
  252. params.phy_lane_sel_mask =
  253. csiphy_dev->csiphy_info[offset].csiphy_phy_lane_sel_mask;
  254. params.protect = protect ? 1 : 0;
  255. rc = ITrustedCameraDriver_dynamicProtectSensor(sc_object, &params);
  256. if (rc) {
  257. CAM_ERR(CAM_CSIPHY, "Mink secure call failed, rc: %d", rc);
  258. return rc;
  259. }
  260. rc = Object_release(sc_object);
  261. if (rc) {
  262. CAM_ERR(CAM_CSIPHY, "Failed releasing secure camera object, rc: %d", rc);
  263. return rc;
  264. }
  265. rc = Object_release(client_env);
  266. if (rc) {
  267. CAM_ERR(CAM_CSIPHY, "Failed releasing mink env object, rc: %d", rc);
  268. return rc;
  269. }
  270. return 0;
  271. }
  272. #elif KERNEL_VERSION(5, 4, 0) <= LINUX_VERSION_CODE
  273. int cam_csiphy_notify_secure_mode(struct csiphy_device *csiphy_dev,
  274. bool protect, int32_t offset)
  275. {
  276. int rc = 0;
  277. /**
  278. * A check here is made if the target is using
  279. * an older version of the kernel driver (< 6.0)
  280. * with domain id feature present. In this case,
  281. * we are to fail this call, as the new mink call
  282. * is only supported on kernel driver versions 6.0
  283. * and above, and the new domain id scheme is not
  284. * backwards compatible with the older scheme.
  285. */
  286. if (csiphy_dev->domain_id_security) {
  287. CAM_ERR(CAM_CSIPHY,
  288. "Domain id support not present on current kernel driver: %d",
  289. LINUX_VERSION_CODE);
  290. return -EINVAL;
  291. }
  292. if (offset >= CSIPHY_MAX_INSTANCES_PER_PHY) {
  293. CAM_ERR(CAM_CSIPHY, "Invalid CSIPHY offset");
  294. rc = -EINVAL;
  295. } else if (qcom_scm_camera_protect_phy_lanes(protect,
  296. csiphy_dev->csiphy_info[offset].csiphy_cpas_cp_reg_mask)) {
  297. CAM_ERR(CAM_CSIPHY, "SCM call to hypervisor failed");
  298. rc = -EINVAL;
  299. }
  300. return rc;
  301. }
  302. #else
  303. int cam_csiphy_notify_secure_mode(struct csiphy_device *csiphy_dev,
  304. bool protect, int32_t offset)
  305. {
  306. int rc = 0;
  307. struct scm_desc description = {
  308. .arginfo = SCM_ARGS(2, SCM_VAL, SCM_VAL),
  309. .args[0] = protect,
  310. .args[1] = csiphy_dev->csiphy_info[offset]
  311. .csiphy_cpas_cp_reg_mask,
  312. };
  313. if (offset >= CSIPHY_MAX_INSTANCES_PER_PHY) {
  314. CAM_ERR(CAM_CSIPHY, "Invalid CSIPHY offset");
  315. rc = -EINVAL;
  316. } else if (scm_call2(SCM_SIP_FNID(0x18, 0x7), &description)) {
  317. CAM_ERR(CAM_CSIPHY, "SCM call to hypervisor failed");
  318. rc = -EINVAL;
  319. }
  320. return rc;
  321. }
  322. #endif
  323. /* Callback to compare device from match list before adding as component */
  324. static inline int camera_component_compare_dev(struct device *dev, void *data)
  325. {
  326. return dev == data;
  327. }
  328. /* Add component matches to list for master of aggregate driver */
  329. int camera_component_match_add_drivers(struct device *master_dev,
  330. struct component_match **match_list)
  331. {
  332. int i, rc = 0;
  333. struct platform_device *pdev = NULL;
  334. struct i2c_client *client = NULL;
  335. struct device *start_dev = NULL, *match_dev = NULL;
  336. if (!master_dev || !match_list) {
  337. CAM_ERR(CAM_UTIL, "Invalid parameters for component match add");
  338. rc = -EINVAL;
  339. goto end;
  340. }
  341. for (i = 0; i < ARRAY_SIZE(cam_component_platform_drivers); i++) {
  342. #if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 4, 0)
  343. struct device_driver const *drv =
  344. &cam_component_platform_drivers[i]->driver;
  345. const void *drv_ptr = (const void *)drv;
  346. #else
  347. struct device_driver *drv = &cam_component_platform_drivers[i]->driver;
  348. void *drv_ptr = (void *)drv;
  349. #endif
  350. start_dev = NULL;
  351. while ((match_dev = bus_find_device(&platform_bus_type,
  352. start_dev, drv_ptr, &camera_platform_compare_dev))) {
  353. put_device(start_dev);
  354. pdev = to_platform_device(match_dev);
  355. CAM_DBG(CAM_UTIL, "Adding matched component:%s", pdev->name);
  356. component_match_add(master_dev, match_list,
  357. camera_component_compare_dev, match_dev);
  358. start_dev = match_dev;
  359. }
  360. put_device(start_dev);
  361. }
  362. for (i = 0; i < ARRAY_SIZE(cam_component_i2c_drivers); i++) {
  363. #if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 4, 0)
  364. struct device_driver const *drv =
  365. &cam_component_i2c_drivers[i]->driver;
  366. const void *drv_ptr = (const void *)drv;
  367. #else
  368. struct device_driver *drv = &cam_component_i2c_drivers[i]->driver;
  369. void *drv_ptr = (void *)drv;
  370. #endif
  371. start_dev = NULL;
  372. while ((match_dev = bus_find_device(&i2c_bus_type,
  373. start_dev, drv_ptr, &camera_i2c_compare_dev))) {
  374. put_device(start_dev);
  375. client = to_i2c_client(match_dev);
  376. CAM_DBG(CAM_UTIL, "Adding matched component:%s", client->name);
  377. component_match_add(master_dev, match_list,
  378. camera_component_compare_dev, match_dev);
  379. start_dev = match_dev;
  380. }
  381. put_device(start_dev);
  382. }
  383. end:
  384. return rc;
  385. }
  386. #if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 10, 0)
  387. #include <linux/qcom-iommu-util.h>
  388. void cam_check_iommu_faults(struct iommu_domain *domain,
  389. struct cam_smmu_pf_info *pf_info)
  390. {
  391. struct qcom_iommu_fault_ids fault_ids = {0, 0, 0};
  392. if (qcom_iommu_get_fault_ids(domain, &fault_ids))
  393. CAM_ERR(CAM_SMMU, "Cannot get smmu fault ids");
  394. else
  395. CAM_ERR(CAM_SMMU, "smmu fault ids bid:%d pid:%d mid:%d",
  396. fault_ids.bid, fault_ids.pid, fault_ids.mid);
  397. pf_info->bid = fault_ids.bid;
  398. pf_info->pid = fault_ids.pid;
  399. pf_info->mid = fault_ids.mid;
  400. }
  401. #else
  402. void cam_check_iommu_faults(struct iommu_domain *domain,
  403. struct cam_smmu_pf_info *pf_info)
  404. {
  405. struct iommu_fault_ids fault_ids = {0, 0, 0};
  406. if (iommu_get_fault_ids(domain, &fault_ids))
  407. CAM_ERR(CAM_SMMU, "Error: Can not get smmu fault ids");
  408. CAM_ERR(CAM_SMMU, "smmu fault ids bid:%d pid:%d mid:%d",
  409. fault_ids.bid, fault_ids.pid, fault_ids.mid);
  410. pf_info->bid = fault_ids.bid;
  411. pf_info->pid = fault_ids.pid;
  412. pf_info->mid = fault_ids.mid;
  413. }
  414. #endif
  415. static int inline cam_subdev_list_cmp(struct cam_subdev *entry_1, struct cam_subdev *entry_2)
  416. {
  417. if (entry_1->close_seq_prior > entry_2->close_seq_prior)
  418. return 1;
  419. else if (entry_1->close_seq_prior < entry_2->close_seq_prior)
  420. return -1;
  421. else
  422. return 0;
  423. }
  424. #if (KERNEL_VERSION(5, 18, 0) <= LINUX_VERSION_CODE)
  425. int cam_compat_util_get_dmabuf_va(struct dma_buf *dmabuf, uintptr_t *vaddr)
  426. {
  427. struct iosys_map mapping;
  428. int error_code = dma_buf_vmap(dmabuf, &mapping);
  429. if (error_code) {
  430. *vaddr = 0;
  431. } else {
  432. *vaddr = (mapping.is_iomem) ?
  433. (uintptr_t)mapping.vaddr_iomem : (uintptr_t)mapping.vaddr;
  434. CAM_DBG(CAM_MEM,
  435. "dmabuf=%p, *vaddr=%p, is_iomem=%d, vaddr_iomem=%p, vaddr=%p",
  436. dmabuf, *vaddr, mapping.is_iomem, mapping.vaddr_iomem, mapping.vaddr);
  437. }
  438. return error_code;
  439. }
  440. void cam_compat_util_put_dmabuf_va(struct dma_buf *dmabuf, void *vaddr)
  441. {
  442. struct iosys_map mapping = IOSYS_MAP_INIT_VADDR(vaddr);
  443. dma_buf_vunmap(dmabuf, &mapping);
  444. }
  445. #elif (KERNEL_VERSION(5, 15, 0) <= LINUX_VERSION_CODE)
  446. int cam_compat_util_get_dmabuf_va(struct dma_buf *dmabuf, uintptr_t *vaddr)
  447. {
  448. struct dma_buf_map mapping;
  449. int error_code = dma_buf_vmap(dmabuf, &mapping);
  450. if (error_code) {
  451. *vaddr = 0;
  452. } else {
  453. *vaddr = (mapping.is_iomem) ?
  454. (uintptr_t)mapping.vaddr_iomem : (uintptr_t)mapping.vaddr;
  455. CAM_DBG(CAM_MEM,
  456. "dmabuf=%p, *vaddr=%p, is_iomem=%d, vaddr_iomem=%p, vaddr=%p",
  457. dmabuf, *vaddr, mapping.is_iomem, mapping.vaddr_iomem, mapping.vaddr);
  458. }
  459. return error_code;
  460. }
  461. void cam_compat_util_put_dmabuf_va(struct dma_buf *dmabuf, void *vaddr)
  462. {
  463. struct dma_buf_map mapping = DMA_BUF_MAP_INIT_VADDR(vaddr);
  464. dma_buf_vunmap(dmabuf, &mapping);
  465. }
  466. #else
  467. int cam_compat_util_get_dmabuf_va(struct dma_buf *dmabuf, uintptr_t *vaddr)
  468. {
  469. int error_code = 0;
  470. void *addr = dma_buf_vmap(dmabuf);
  471. if (!addr) {
  472. *vaddr = 0;
  473. error_code = -ENOSPC;
  474. } else {
  475. *vaddr = (uintptr_t)addr;
  476. }
  477. return error_code;
  478. }
  479. void cam_compat_util_put_dmabuf_va(struct dma_buf *dmabuf, void *vaddr)
  480. {
  481. dma_buf_vunmap(dmabuf, vaddr);
  482. }
  483. #endif
  484. #if (KERNEL_VERSION(5, 15, 0) <= LINUX_VERSION_CODE)
  485. void cam_smmu_util_iommu_custom(struct device *dev,
  486. dma_addr_t discard_start, size_t discard_length)
  487. {
  488. }
  489. int cam_req_mgr_ordered_list_cmp(void *priv,
  490. const struct list_head *head_1, const struct list_head *head_2)
  491. {
  492. return cam_subdev_list_cmp(list_entry(head_1, struct cam_subdev, list),
  493. list_entry(head_2, struct cam_subdev, list));
  494. }
  495. void cam_i3c_driver_remove(struct i3c_device *client)
  496. {
  497. CAM_DBG(CAM_SENSOR, "I3C remove invoked for %s",
  498. (client ? dev_name(&client->dev) : "none"));
  499. }
  500. #else
  501. void cam_smmu_util_iommu_custom(struct device *dev,
  502. dma_addr_t discard_start, size_t discard_length)
  503. {
  504. iommu_dma_enable_best_fit_algo(dev);
  505. if (discard_start)
  506. iommu_dma_reserve_iova(dev, discard_start, discard_length);
  507. return;
  508. }
  509. int cam_req_mgr_ordered_list_cmp(void *priv,
  510. struct list_head *head_1, struct list_head *head_2)
  511. {
  512. return cam_subdev_list_cmp(list_entry(head_1, struct cam_subdev, list),
  513. list_entry(head_2, struct cam_subdev, list));
  514. }
  515. int cam_i3c_driver_remove(struct i3c_device *client)
  516. {
  517. CAM_DBG(CAM_SENSOR, "I3C remove invoked for %s",
  518. (client ? dev_name(&client->dev) : "none"));
  519. return 0;
  520. }
  521. #endif
  522. #if (KERNEL_VERSION(5, 15, 0) <= LINUX_VERSION_CODE && \
  523. KERNEL_VERSION(5, 18, 0) > LINUX_VERSION_CODE)
  524. long cam_dma_buf_set_name(struct dma_buf *dmabuf, const char *name)
  525. {
  526. long ret = 0;
  527. ret = dma_buf_set_name(dmabuf, name);
  528. return ret;
  529. }
  530. #else
  531. long cam_dma_buf_set_name(struct dma_buf *dmabuf, const char *name)
  532. {
  533. return 0;
  534. }
  535. #endif
  536. #if KERNEL_VERSION(5, 18, 0) <= LINUX_VERSION_CODE
  537. void cam_eeprom_spi_driver_remove(struct spi_device *sdev)
  538. {
  539. struct v4l2_subdev *sd = spi_get_drvdata(sdev);
  540. struct cam_eeprom_ctrl_t *e_ctrl;
  541. struct cam_eeprom_soc_private *soc_private;
  542. struct cam_hw_soc_info *soc_info;
  543. if (!sd) {
  544. CAM_ERR(CAM_EEPROM, "Subdevice is NULL");
  545. return;
  546. }
  547. e_ctrl = (struct cam_eeprom_ctrl_t *)v4l2_get_subdevdata(sd);
  548. if (!e_ctrl) {
  549. CAM_ERR(CAM_EEPROM, "eeprom device is NULL");
  550. return;
  551. }
  552. soc_info = &e_ctrl->soc_info;
  553. mutex_lock(&(e_ctrl->eeprom_mutex));
  554. cam_eeprom_shutdown(e_ctrl);
  555. mutex_unlock(&(e_ctrl->eeprom_mutex));
  556. mutex_destroy(&(e_ctrl->eeprom_mutex));
  557. cam_unregister_subdev(&(e_ctrl->v4l2_dev_str));
  558. kfree(e_ctrl->io_master_info.spi_client);
  559. e_ctrl->io_master_info.spi_client = NULL;
  560. soc_private =
  561. (struct cam_eeprom_soc_private *)e_ctrl->soc_info.soc_private;
  562. if (soc_private) {
  563. kfree(soc_private->power_info.gpio_num_info);
  564. soc_private->power_info.gpio_num_info = NULL;
  565. kfree(soc_private);
  566. soc_private = NULL;
  567. }
  568. v4l2_set_subdevdata(&e_ctrl->v4l2_dev_str.sd, NULL);
  569. kfree(e_ctrl);
  570. }
  571. int cam_compat_util_get_irq(struct cam_hw_soc_info *soc_info)
  572. {
  573. int rc = 0;
  574. soc_info->irq_num = platform_get_irq(soc_info->pdev, 0);
  575. if (soc_info->irq_num < 0) {
  576. rc = soc_info->irq_num;
  577. return rc;
  578. }
  579. return rc;
  580. }
  581. #else
  582. int cam_eeprom_spi_driver_remove(struct spi_device *sdev)
  583. {
  584. struct v4l2_subdev *sd = spi_get_drvdata(sdev);
  585. struct cam_eeprom_ctrl_t *e_ctrl;
  586. struct cam_eeprom_soc_private *soc_private;
  587. struct cam_hw_soc_info *soc_info;
  588. if (!sd) {
  589. CAM_ERR(CAM_EEPROM, "Subdevice is NULL");
  590. return -EINVAL;
  591. }
  592. e_ctrl = (struct cam_eeprom_ctrl_t *)v4l2_get_subdevdata(sd);
  593. if (!e_ctrl) {
  594. CAM_ERR(CAM_EEPROM, "eeprom device is NULL");
  595. return -EINVAL;
  596. }
  597. soc_info = &e_ctrl->soc_info;
  598. mutex_lock(&(e_ctrl->eeprom_mutex));
  599. cam_eeprom_shutdown(e_ctrl);
  600. mutex_unlock(&(e_ctrl->eeprom_mutex));
  601. mutex_destroy(&(e_ctrl->eeprom_mutex));
  602. cam_unregister_subdev(&(e_ctrl->v4l2_dev_str));
  603. kfree(e_ctrl->io_master_info.spi_client);
  604. e_ctrl->io_master_info.spi_client = NULL;
  605. soc_private =
  606. (struct cam_eeprom_soc_private *)e_ctrl->soc_info.soc_private;
  607. if (soc_private) {
  608. kfree(soc_private->power_info.gpio_num_info);
  609. soc_private->power_info.gpio_num_info = NULL;
  610. kfree(soc_private);
  611. soc_private = NULL;
  612. }
  613. v4l2_set_subdevdata(&e_ctrl->v4l2_dev_str.sd, NULL);
  614. kfree(e_ctrl);
  615. return 0;
  616. }
  617. int cam_compat_util_get_irq(struct cam_hw_soc_info *soc_info)
  618. {
  619. int rc = 0;
  620. soc_info->irq_line =
  621. platform_get_resource_byname(soc_info->pdev,
  622. IORESOURCE_IRQ, soc_info->irq_name);
  623. if (!soc_info->irq_line) {
  624. rc = -ENODEV;
  625. return rc;
  626. }
  627. soc_info->irq_num = soc_info->irq_line->start;
  628. return rc;
  629. }
  630. #endif