Explorar o código

Merge 8eb1e1e993b0a50cdc5b209fd7084a73cac3a873 on remote branch

Change-Id: I87337423789f46b96405d5df9692e10e1428343e
Linux Build Service Account %!s(int64=2) %!d(string=hai) anos
pai
achega
00e60b6d27
Modificáronse 13 ficheiros con 252 adicións e 84 borrados
  1. 2 0
      Android.mk
  2. 8 0
      Kbuild
  3. 9 0
      cnss2/Kconfig
  4. 6 3
      cnss2/genl.c
  5. 80 11
      cnss2/main.c
  6. 5 0
      cnss2/main.h
  7. 45 10
      cnss2/pci.c
  8. 4 0
      cnss2/pci.h
  9. 11 4
      cnss2/pci_platform.h
  10. 7 6
      cnss2/pci_qcom.c
  11. 50 46
      cnss_prealloc/cnss_prealloc.c
  12. 20 2
      icnss2/main.c
  13. 5 2
      inc/cnss_prealloc.h

+ 2 - 0
Android.mk

@@ -31,7 +31,9 @@ CNSS_SRC_FILES := \
 	$(wildcard $(LOCAL_PATH)/*) \
 	$(wildcard $(LOCAL_PATH)/*/*) \
 
+ifeq ($(TARGET_KERNEL_DLKM_SECURE_MSM_OVERRIDE), true)
 KBUILD_OPTIONS += KBUILD_EXTRA_SYMBOLS=$(PWD)/$(call intermediates-dir-for,DLKM,sec-module-symvers)/Module.symvers
+endif
 
 # Module.symvers needs to be generated as a intermediate module so that
 # other modules which depend on WLAN platform modules can set local

+ 8 - 0
Kbuild

@@ -52,6 +52,14 @@ ifeq ($(CONFIG_CNSS2_SSR_DRIVER_DUMP),y)
 KBUILD_CPPFLAGS += -DCONFIG_CNSS2_SSR_DRIVER_DUMP
 endif
 
+ifeq ($(CONFIG_FREE_M3_BLOB_MEM),y)
+KBUILD_CPPFLAGS += -DCONFIG_FREE_M3_BLOB_MEM
+endif
+
+ifeq ($(CONFIG_DISABLE_CNSS_SRAM_DUMP),y)
+KBUILD_CPPFLAGS += -DCONFIG_DISABLE_CNSS_SRAM_DUMP
+endif
+
 obj-$(CONFIG_CNSS2) += cnss2/
 obj-$(CONFIG_ICNSS2) += icnss2/
 obj-$(CONFIG_CNSS_GENL) += cnss_genl/

+ 9 - 0
cnss2/Kconfig

@@ -117,3 +117,12 @@ config CNSS2_CONDITIONAL_POWEROFF
 	  for the first time to avoid potential subsequent failures during
 	  device re-probe(after wlan function driver loaded) under very bad
 	  thermal conditions.
+
+config DISABLE_CNSS_SRAM_DUMP
+	bool "Disable sram_dump"
+	depends on CNSS2
+	depends on CNSS2_DEBUG
+	help
+	  If enabled, CNSS plafrom driver will not dump sram when MHI power on
+	  timeout for CNSS QCA6490 chipset only. Since this feature about
+	  sram dump costs 4M memory.

+ 6 - 3
cnss2/genl.c

@@ -1,5 +1,8 @@
 // SPDX-License-Identifier: GPL-2.0-only
-/* Copyright (c) 2019, The Linux Foundation. All rights reserved. */
+/*
+ * Copyright (c) 2019, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
 
 #define pr_fmt(fmt) "cnss_genl: " fmt
 
@@ -92,7 +95,7 @@ static int cnss_genl_send_data(u8 type, char *file_name, u32 total_size,
 	int ret = 0;
 	char filename[CNSS_GENL_STR_LEN_MAX + 1];
 
-	cnss_pr_dbg("type: %u, file_name %s, total_size: %x, seg_id %u, end %u, data_len %u\n",
+	cnss_pr_vdbg("type: %u, file_name %s, total_size: %x, seg_id %u, end %u, data_len %u\n",
 		    type, file_name, total_size, seg_id, end, data_len);
 
 	if (!file_name)
@@ -164,7 +167,7 @@ int cnss_genl_send_msg(void *buff, u8 type, char *file_name, u32 total_size)
 	u8 end = 0;
 	u8 retry;
 
-	cnss_pr_dbg("type: %u, total_size: %x\n", type, total_size);
+	cnss_pr_vdbg("type: %u, total_size: %x\n", type, total_size);
 
 	while (remaining) {
 		if (remaining > CNSS_GENL_DATA_LEN_MAX) {

+ 80 - 11
cnss2/main.c

@@ -150,6 +150,20 @@ struct cnss_plat_data *cnss_get_plat_priv(struct platform_device
 	return NULL;
 }
 
+struct cnss_plat_data *cnss_get_first_plat_priv(struct platform_device
+						 *plat_dev)
+{
+	int i;
+
+	if (!plat_dev) {
+		for (i = 0; i < plat_env_count; i++) {
+			if (plat_env[i])
+				return plat_env[i];
+		}
+	}
+	return NULL;
+}
+
 static void cnss_clear_plat_priv(struct cnss_plat_data *plat_priv)
 {
 	cnss_pr_dbg("Clear plat_priv at %d", plat_priv->plat_idx);
@@ -274,6 +288,12 @@ cnss_get_pld_bus_ops_name(struct cnss_plat_data *plat_priv)
 }
 #endif
 
+void cnss_get_bwscal_info(struct cnss_plat_data *plat_priv)
+{
+	plat_priv->no_bwscale = of_property_read_bool(plat_priv->dev_node,
+						      "qcom,no-bwscale");
+}
+
 static inline int
 cnss_get_rc_num(struct cnss_plat_data *plat_priv)
 {
@@ -3049,8 +3069,11 @@ int cnss_do_elf_ramdump(struct cnss_plat_data *plat_priv)
 		}
 
 		seg = kcalloc(1, sizeof(*seg), GFP_KERNEL);
-		if (!seg)
+		if (!seg) {
+			cnss_pr_err("%s: Failed to allocate mem for seg %d\n",
+				    __func__, i);
 			continue;
+		}
 
 		if (meta_info.entry[dump_seg->type].entry_start == 0) {
 			meta_info.entry[dump_seg->type].type = dump_seg->type;
@@ -3065,8 +3088,11 @@ int cnss_do_elf_ramdump(struct cnss_plat_data *plat_priv)
 	}
 
 	seg = kcalloc(1, sizeof(*seg), GFP_KERNEL);
-	if (!seg)
-		goto do_elf_dump;
+	if (!seg) {
+		cnss_pr_err("%s: Failed to allocate mem for elf ramdump seg\n",
+			    __func__);
+		goto skip_elf_dump;
+	}
 
 	meta_info.magic = CNSS_RAMDUMP_MAGIC;
 	meta_info.version = CNSS_RAMDUMP_VERSION;
@@ -3076,9 +3102,9 @@ int cnss_do_elf_ramdump(struct cnss_plat_data *plat_priv)
 	seg->size = sizeof(meta_info);
 	list_add(&seg->node, &head);
 
-do_elf_dump:
 	ret = qcom_elf_dump(&head, info_v2->ramdump_dev, ELF_CLASS);
 
+skip_elf_dump:
 	while (!list_empty(&head)) {
 		seg = list_first_entry(&head, struct qcom_dump_segment, node);
 		list_del(&seg->node);
@@ -3119,8 +3145,9 @@ int cnss_do_host_ramdump(struct cnss_plat_data *plat_priv,
 		[CNSS_HOST_WMI_EVENT_LOG_IDX] = "wmi_event_log_idx",
 		[CNSS_HOST_WMI_RX_EVENT_IDX] = "wmi_rx_event_idx"
 	};
-	int i, j;
+	int i;
 	int ret = 0;
+	enum cnss_host_dump_type j;
 
 	if (!dump_enabled()) {
 		cnss_pr_info("Dump collection is not enabled\n");
@@ -3153,7 +3180,7 @@ int cnss_do_host_ramdump(struct cnss_plat_data *plat_priv,
 		seg->da = (dma_addr_t)ssr_entry[i].buffer_pointer;
 		seg->size = ssr_entry[i].buffer_size;
 
-		for (j = 0; j < ARRAY_SIZE(wlan_str); j++) {
+		for (j = 0; j < CNSS_HOST_DUMP_TYPE_MAX; j++) {
 			if (strncmp(ssr_entry[i].region_name, wlan_str[j],
 				    strlen(wlan_str[j])) == 0) {
 				meta_info.entry[i].type = j;
@@ -3166,6 +3193,13 @@ int cnss_do_host_ramdump(struct cnss_plat_data *plat_priv,
 	}
 
 	seg = kcalloc(1, sizeof(*seg), GFP_KERNEL);
+
+	if (!seg) {
+		cnss_pr_err("%s: Failed to allocate mem for host dump seg\n",
+			    __func__);
+		goto skip_host_dump;
+	}
+
 	meta_info.magic = CNSS_RAMDUMP_MAGIC;
 	meta_info.version = CNSS_RAMDUMP_VERSION;
 	meta_info.chipset = plat_priv->device_id;
@@ -3174,7 +3208,10 @@ int cnss_do_host_ramdump(struct cnss_plat_data *plat_priv,
 	seg->da = (dma_addr_t)&meta_info;
 	seg->size = sizeof(meta_info);
 	list_add(&seg->node, &head);
+
 	ret = qcom_elf_dump(&head, new_device, ELF_CLASS);
+
+skip_host_dump:
 	while (!list_empty(&head)) {
 		seg = list_first_entry(&head, struct qcom_dump_segment, node);
 		list_del(&seg->node);
@@ -4218,6 +4255,19 @@ int cnss_wlan_hw_disable_check(struct cnss_plat_data *plat_priv)
 }
 #endif
 
+#ifdef CONFIG_DISABLE_CNSS_SRAM_DUMP
+static void cnss_sram_dump_init(struct cnss_plat_data *plat_priv)
+{
+}
+#else
+static void cnss_sram_dump_init(struct cnss_plat_data *plat_priv)
+{
+	if (plat_priv->device_id == QCA6490_DEVICE_ID &&
+	    cnss_get_host_build_type() == QMI_HOST_BUILD_TYPE_PRIMARY_V01)
+		plat_priv->sram_dump = kcalloc(SRAM_DUMP_SIZE, 1, GFP_KERNEL);
+}
+#endif
+
 static int cnss_misc_init(struct cnss_plat_data *plat_priv)
 {
 	int ret;
@@ -4261,9 +4311,7 @@ static int cnss_misc_init(struct cnss_plat_data *plat_priv)
 		cnss_pr_err("QMI IPC connection call back register failed, err = %d\n",
 			    ret);
 
-	if (plat_priv->device_id == QCA6490_DEVICE_ID &&
-	    cnss_get_host_build_type() == QMI_HOST_BUILD_TYPE_PRIMARY_V01)
-		plat_priv->sram_dump = kcalloc(SRAM_DUMP_SIZE, 1, GFP_KERNEL);
+	cnss_sram_dump_init(plat_priv);
 
 	if (of_property_read_bool(plat_priv->plat_dev->dev.of_node,
 				  "qcom,rc-ep-short-channel"))
@@ -4272,6 +4320,19 @@ static int cnss_misc_init(struct cnss_plat_data *plat_priv)
 	return 0;
 }
 
+#ifdef CONFIG_DISABLE_CNSS_SRAM_DUMP
+static void cnss_sram_dump_deinit(struct cnss_plat_data *plat_priv)
+{
+}
+#else
+static void cnss_sram_dump_deinit(struct cnss_plat_data *plat_priv)
+{
+	if (plat_priv->device_id == QCA6490_DEVICE_ID &&
+	    cnss_get_host_build_type() == QMI_HOST_BUILD_TYPE_PRIMARY_V01)
+		kfree(plat_priv->sram_dump);
+}
+#endif
+
 static void cnss_misc_deinit(struct cnss_plat_data *plat_priv)
 {
 	cnss_plat_ipc_unregister(CNSS_PLAT_IPC_DAEMON_QMI_CLIENT_V01,
@@ -4286,7 +4347,7 @@ static void cnss_misc_deinit(struct cnss_plat_data *plat_priv)
 	del_timer(&plat_priv->fw_boot_timer);
 	wakeup_source_unregister(plat_priv->recovery_ws);
 	cnss_deinit_sol_gpio(plat_priv);
-	kfree(plat_priv->sram_dump);
+	cnss_sram_dump_deinit(plat_priv);
 	kfree(plat_priv->on_chip_pmic_board_ids);
 }
 
@@ -4504,9 +4565,17 @@ end:
 
 int cnss_wlan_hw_enable(void)
 {
-	struct cnss_plat_data *plat_priv = cnss_get_plat_priv(NULL);
+	struct cnss_plat_data *plat_priv;
 	int ret = 0;
 
+	if (cnss_is_dual_wlan_enabled())
+		plat_priv = cnss_get_first_plat_priv(NULL);
+	else
+		plat_priv = cnss_get_plat_priv(NULL);
+
+	if (!plat_priv)
+		return -ENODEV;
+
 	clear_bit(CNSS_WLAN_HW_DISABLED, &plat_priv->driver_state);
 
 	if (test_bit(CNSS_PCI_PROBE_DONE, &plat_priv->driver_state))

+ 5 - 0
cnss2/main.h

@@ -555,7 +555,9 @@ struct cnss_plat_data {
 	u8 use_fw_path_with_prefix;
 	char firmware_name[MAX_FIRMWARE_NAME_LEN];
 	char fw_fallback_name[MAX_FIRMWARE_NAME_LEN];
+#ifndef CONFIG_DISABLE_CNSS_SRAM_DUMP
 	u8 *sram_dump;
+#endif
 	struct completion rddm_complete;
 	struct completion recovery_complete;
 	struct cnss_control_params ctrl_params;
@@ -607,6 +609,7 @@ struct cnss_plat_data {
 	const char *pld_bus_ops_name;
 	u32 on_chip_pmic_devices_count;
 	u32 *on_chip_pmic_board_ids;
+	bool no_bwscale;
 };
 
 #if IS_ENABLED(CONFIG_ARCH_QCOM)
@@ -632,12 +635,14 @@ static inline u64 cnss_get_host_timestamp(struct cnss_plat_data *plat_priv)
 int cnss_wlan_hw_disable_check(struct cnss_plat_data *plat_priv);
 int cnss_wlan_hw_enable(void);
 struct cnss_plat_data *cnss_get_plat_priv(struct platform_device *plat_dev);
+struct cnss_plat_data *cnss_get_first_plat_priv(struct platform_device *plat_dev);
 void cnss_pm_stay_awake(struct cnss_plat_data *plat_priv);
 void cnss_pm_relax(struct cnss_plat_data *plat_priv);
 struct cnss_plat_data *cnss_get_plat_priv_by_rc_num(int rc_num);
 int cnss_get_plat_env_count(void);
 struct cnss_plat_data *cnss_get_plat_env(int index);
 void cnss_get_qrtr_info(struct cnss_plat_data *plat_priv);
+void cnss_get_bwscal_info(struct cnss_plat_data *plat_priv);
 bool cnss_is_dual_wlan_enabled(void);
 int cnss_driver_event_post(struct cnss_plat_data *plat_priv,
 			   enum cnss_driver_event_type type,

+ 45 - 10
cnss2/pci.c

@@ -35,7 +35,7 @@
 
 #define PCI_DMA_MASK_32_BIT		DMA_BIT_MASK(32)
 #define PCI_DMA_MASK_36_BIT		DMA_BIT_MASK(36)
-#define PCI_DMA_MASK_64_BIT		~0ULL
+#define PCI_DMA_MASK_64_BIT		DMA_BIT_MASK(64)
 
 #define MHI_NODE_NAME			"qcom,mhi"
 #define MHI_MSI_NAME			"MHI"
@@ -1611,6 +1611,7 @@ EXPORT_SYMBOL(cnss_get_pci_slot);
  */
 static void cnss_pci_dump_bl_sram_mem(struct cnss_pci_data *pci_priv)
 {
+	enum mhi_ee_type ee;
 	u32 mem_addr, val, pbl_log_max_size, sbl_log_max_size;
 	u32 pbl_log_sram_start;
 	u32 pbl_stage, sbl_log_start, sbl_log_size;
@@ -1667,6 +1668,12 @@ static void cnss_pci_dump_bl_sram_mem(struct cnss_pci_data *pci_priv)
 	cnss_pr_dbg("PBL_WLAN_BOOT_CFG: 0x%08x PBL_BOOTSTRAP_STATUS: 0x%08x\n",
 		    pbl_wlan_boot_cfg, pbl_bootstrap_status);
 
+	ee = mhi_get_exec_env(pci_priv->mhi_ctrl);
+	if (CNSS_MHI_IN_MISSION_MODE(ee)) {
+		cnss_pr_dbg("Avoid Dumping PBL log data in Mission mode\n");
+		return;
+	}
+
 	cnss_pr_dbg("Dumping PBL log data\n");
 	for (i = 0; i < pbl_log_max_size; i += sizeof(val)) {
 		mem_addr = pbl_log_sram_start + i;
@@ -1684,6 +1691,12 @@ static void cnss_pci_dump_bl_sram_mem(struct cnss_pci_data *pci_priv)
 		return;
 	}
 
+	ee = mhi_get_exec_env(pci_priv->mhi_ctrl);
+	if (CNSS_MHI_IN_MISSION_MODE(ee)) {
+		cnss_pr_dbg("Avoid Dumping SBL log data in Mission mode\n");
+		return;
+	}
+
 	cnss_pr_dbg("Dumping SBL log data\n");
 	for (i = 0; i < sbl_log_size; i += sizeof(val)) {
 		mem_addr = sbl_log_start + i;
@@ -1693,6 +1706,11 @@ static void cnss_pci_dump_bl_sram_mem(struct cnss_pci_data *pci_priv)
 	}
 }
 
+#ifdef CONFIG_DISABLE_CNSS_SRAM_DUMP
+static void cnss_pci_dump_sram(struct cnss_pci_data *pci_priv)
+{
+}
+#else
 static void cnss_pci_dump_sram(struct cnss_pci_data *pci_priv)
 {
 	struct cnss_plat_data *plat_priv;
@@ -1727,6 +1745,7 @@ static void cnss_pci_dump_sram(struct cnss_pci_data *pci_priv)
 			cond_resched();
 	}
 }
+#endif
 
 static int cnss_pci_handle_mhi_poweron_timeout(struct cnss_pci_data *pci_priv)
 {
@@ -3620,10 +3639,12 @@ static int cnss_pci_suspend_driver(struct cnss_pci_data *pci_priv)
 	struct pci_dev *pci_dev = pci_priv->pci_dev;
 	struct cnss_wlan_driver *driver_ops = pci_priv->driver_ops;
 	int ret = 0;
+	struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
 
 	pm_message_t state = { .event = PM_EVENT_SUSPEND };
 
-	if (driver_ops && driver_ops->suspend) {
+	if (test_bit(CNSS_DRIVER_REGISTERED, &plat_priv->driver_state) &&
+	    driver_ops && driver_ops->suspend) {
 		ret = driver_ops->suspend(pci_dev, state);
 		if (ret) {
 			cnss_pr_err("Failed to suspend host driver, err = %d\n",
@@ -3640,8 +3661,10 @@ static int cnss_pci_resume_driver(struct cnss_pci_data *pci_priv)
 	struct pci_dev *pci_dev = pci_priv->pci_dev;
 	struct cnss_wlan_driver *driver_ops = pci_priv->driver_ops;
 	int ret = 0;
+	struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
 
-	if (driver_ops && driver_ops->resume) {
+	if (test_bit(CNSS_DRIVER_REGISTERED, &plat_priv->driver_state) &&
+	    driver_ops && driver_ops->resume) {
 		ret = driver_ops->resume(pci_dev);
 		if (ret)
 			cnss_pr_err("Failed to resume host driver, err = %d\n",
@@ -3849,6 +3872,7 @@ static int cnss_pci_suspend_noirq(struct device *dev)
 	struct pci_dev *pci_dev = to_pci_dev(dev);
 	struct cnss_pci_data *pci_priv = cnss_get_pci_priv(pci_dev);
 	struct cnss_wlan_driver *driver_ops;
+	struct cnss_plat_data *plat_priv;
 
 	if (!pci_priv)
 		goto out;
@@ -3857,7 +3881,9 @@ static int cnss_pci_suspend_noirq(struct device *dev)
 		goto out;
 
 	driver_ops = pci_priv->driver_ops;
-	if (driver_ops && driver_ops->suspend_noirq)
+	plat_priv = pci_priv->plat_priv;
+	if (test_bit(CNSS_DRIVER_REGISTERED, &plat_priv->driver_state) &&
+	    driver_ops && driver_ops->suspend_noirq)
 		ret = driver_ops->suspend_noirq(pci_dev);
 
 	if (pci_priv->disable_pc && !pci_dev->state_saved &&
@@ -3874,6 +3900,7 @@ static int cnss_pci_resume_noirq(struct device *dev)
 	struct pci_dev *pci_dev = to_pci_dev(dev);
 	struct cnss_pci_data *pci_priv = cnss_get_pci_priv(pci_dev);
 	struct cnss_wlan_driver *driver_ops;
+	struct cnss_plat_data *plat_priv;
 
 	if (!pci_priv)
 		goto out;
@@ -3881,8 +3908,10 @@ static int cnss_pci_resume_noirq(struct device *dev)
 	if (!cnss_is_device_powered_on(pci_priv->plat_priv))
 		goto out;
 
+	plat_priv = pci_priv->plat_priv;
 	driver_ops = pci_priv->driver_ops;
-	if (driver_ops && driver_ops->resume_noirq &&
+	if (test_bit(CNSS_DRIVER_REGISTERED, &plat_priv->driver_state) &&
+	    driver_ops && driver_ops->resume_noirq &&
 	    !pci_priv->pci_link_down_ind)
 		ret = driver_ops->resume_noirq(pci_dev);
 
@@ -5187,6 +5216,7 @@ static int cnss_pci_enable_bus(struct cnss_pci_data *pci_priv)
 
 	switch (device_id) {
 	case QCA6174_DEVICE_ID:
+	case QCN7605_DEVICE_ID:
 		pci_priv->dma_bit_mask = PCI_DMA_MASK_32_BIT;
 		break;
 	case QCA6390_DEVICE_ID:
@@ -5196,9 +5226,6 @@ static int cnss_pci_enable_bus(struct cnss_pci_data *pci_priv)
 	case PEACH_DEVICE_ID:
 		pci_priv->dma_bit_mask = PCI_DMA_MASK_36_BIT;
 		break;
-	case QCN7605_DEVICE_ID:
-		pci_priv->dma_bit_mask = PCI_DMA_MASK_64_BIT;
-		break;
 	default:
 		pci_priv->dma_bit_mask = PCI_DMA_MASK_32_BIT;
 		break;
@@ -6359,11 +6386,16 @@ static int cnss_pci_register_mhi(struct cnss_pci_data *pci_priv)
 	}
 
 	/* MHI satellite driver only needs to connect when DRV is supported */
-	if (cnss_pci_is_drv_supported(pci_priv))
+	if (cnss_pci_get_drv_supported(pci_priv))
 		cnss_mhi_controller_set_base(pci_priv, bar_start);
 
+	cnss_get_bwscal_info(plat_priv);
+	cnss_pr_dbg("no_bwscale: %d\n", plat_priv->no_bwscale);
+
 	/* BW scale CB needs to be set after registering MHI per requirement */
-	cnss_mhi_controller_set_bw_scale_cb(pci_priv, cnss_mhi_bw_scale);
+	if (!plat_priv->no_bwscale)
+		cnss_mhi_controller_set_bw_scale_cb(pci_priv,
+						    cnss_mhi_bw_scale);
 
 	ret = cnss_pci_update_fw_name(pci_priv);
 	if (ret)
@@ -6748,6 +6780,9 @@ static int cnss_pci_probe(struct pci_dev *pci_dev,
 	if (ret)
 		goto unregister_ramdump;
 
+	/* update drv support flag */
+	cnss_pci_update_drv_supported(pci_priv);
+
 	ret = cnss_reg_pci_event(pci_priv);
 	if (ret) {
 		cnss_pr_err("Failed to register PCI event, err = %d\n", ret);

+ 4 - 0
cnss2/pci.h

@@ -33,6 +33,10 @@
 #define LINK_TRAINING_RETRY_DELAY_MS		500
 #define MSI_USERS			4
 
+#define CNSS_MHI_IN_MISSION_MODE(ee) (ee == MHI_EE_AMSS || \
+				      ee == MHI_EE_WFW || \
+				      ee == MHI_EE_FP)
+
 enum cnss_mhi_state {
 	CNSS_MHI_INIT,
 	CNSS_MHI_DEINIT,

+ 11 - 4
cnss2/pci_platform.h

@@ -1,5 +1,5 @@
 /* SPDX-License-Identifier: GPL-2.0-only */
-/* Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved. */
+/* Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved. */
 
 #ifndef _CNSS_PCI_PLATFORM_H
 #define _CNSS_PCI_PLATFORM_H
@@ -112,7 +112,8 @@ bool cnss_pci_is_one_msi(struct cnss_pci_data *pci_priv);
 int cnss_pci_get_one_msi_mhi_irq_array_size(struct cnss_pci_data *pci_priv);
 bool cnss_pci_is_force_one_msi(struct cnss_pci_data *pci_priv);
 int cnss_pci_init_smmu(struct cnss_pci_data *pci_priv);
-bool cnss_pci_is_drv_supported(struct cnss_pci_data *pci_priv);
+void cnss_pci_update_drv_supported(struct cnss_pci_data *pci_priv);
+
 /**
  * _cnss_pci_get_reg_dump() - Dump PCIe RC registers for debug
  * @pci_priv: driver PCI bus context pointer
@@ -198,12 +199,18 @@ int _cnss_pci_get_reg_dump(struct cnss_pci_data *pci_priv,
 	return 0;
 }
 
-bool cnss_pci_is_drv_supported(struct cnss_pci_data *pci_priv)
+void cnss_pci_update_drv_supported(struct cnss_pci_data *pci_priv)
 {
-	return false;
+	pci_priv->drv_supported = false;
 }
+
 #endif /* CONFIG_PCI_MSM */
 
+static inline bool cnss_pci_get_drv_supported(struct cnss_pci_data *pci_priv)
+{
+	return pci_priv->drv_supported;
+}
+
 #if IS_ENABLED(CONFIG_ARCH_QCOM)
 int cnss_pci_of_reserved_mem_device_init(struct cnss_pci_data *pci_priv);
 int cnss_pci_wake_gpio_init(struct cnss_pci_data *pci_priv);

+ 7 - 6
cnss2/pci_qcom.c

@@ -1,5 +1,5 @@
 // SPDX-License-Identifier: GPL-2.0-only
-/* Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved. */
+/* Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved. */
 
 #include "pci_platform.h"
 #include "debug.h"
@@ -159,7 +159,7 @@ static int cnss_pci_set_link_down(struct cnss_pci_data *pci_priv)
 	return ret;
 }
 
-bool cnss_pci_is_drv_supported(struct cnss_pci_data *pci_priv)
+void cnss_pci_update_drv_supported(struct cnss_pci_data *pci_priv)
 {
 	struct pci_dev *root_port = pcie_find_root_port(pci_priv->pci_dev);
 	struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
@@ -169,7 +169,7 @@ bool cnss_pci_is_drv_supported(struct cnss_pci_data *pci_priv)
 	if (!root_port) {
 		cnss_pr_err("PCIe DRV is not supported as root port is null\n");
 		pci_priv->drv_supported = false;
-		return drv_supported;
+		return;
 	}
 
 	root_of_node = root_port->dev.of_node;
@@ -189,8 +189,6 @@ bool cnss_pci_is_drv_supported(struct cnss_pci_data *pci_priv)
 		plat_priv->cap.cap_flag |= CNSS_HAS_DRV_SUPPORT;
 		cnss_set_feature_list(plat_priv, CNSS_DRV_SUPPORT_V01);
 	}
-
-	return drv_supported;
 }
 
 static void cnss_pci_event_cb(struct msm_pcie_notify *notify)
@@ -268,7 +266,7 @@ int cnss_reg_pci_event(struct cnss_pci_data *pci_priv)
 			    MSM_PCIE_EVENT_LINKDOWN |
 			    MSM_PCIE_EVENT_WAKEUP;
 
-	if (cnss_pci_is_drv_supported(pci_priv))
+	if (cnss_pci_get_drv_supported(pci_priv))
 		pci_event->events = pci_event->events |
 			MSM_PCIE_EVENT_DRV_CONNECT |
 			MSM_PCIE_EVENT_DRV_DISCONNECT;
@@ -299,6 +297,9 @@ int cnss_wlan_adsp_pc_enable(struct cnss_pci_data *pci_priv,
 	u32 pm_options = PM_OPTIONS_DEFAULT;
 	struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
 
+	if (!cnss_pci_get_drv_supported(pci_priv))
+		return 0;
+
 	if (plat_priv->adsp_pc_enabled == control) {
 		cnss_pr_dbg("ADSP power collapse already %s\n",
 			    control ? "Enabled" : "Disabled");

+ 50 - 46
cnss_prealloc/cnss_prealloc.c

@@ -1,7 +1,7 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
  * Copyright (c) 2012,2014-2017,2019-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
  */
 
 #include <linux/module.h>
@@ -16,6 +16,17 @@
 #else
 #include <net/cnss_prealloc.h>
 #endif
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 17, 0))
+/* Ideally header should be from standard include path. So this is not an
+ * ideal way of header inclusion but use of slab struct to derive cache
+ * from a mem ptr helps in avoiding additional tracking and/or adding headroom
+ * of 8 bytes for cache in the beginning of buffer and wasting extra memory,
+ * particulary in the case when size of memory requested falls around the edge
+ * of a page boundary. We also have precedence of minidump_memory.c which
+ * includes mm/slab.h using this style.
+ */
+#include "../mm/slab.h"
+#endif
 
 MODULE_LICENSE("GPL v2");
 MODULE_DESCRIPTION("CNSS prealloc driver");
@@ -146,64 +157,58 @@ static void cnss_pool_deinit(void)
 	}
 }
 
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 17, 0))
-/* In kernel 5.17, slab_cache is removed from page struct, so
- * store cache in the beginning of memory buffer.
+/**
+ * cnss_pool_get_index() - Get the index of memory pool
+ * @mem: Allocated memory
+ *
+ * Returns the index of the memory pool which fits the reqested memory. The
+ * complexity of this check is O(num of memory pools). Returns a negative
+ * value with error code in case of failure.
+ *
  */
-static inline void cnss_pool_put_cache_in_mem(void *mem, struct kmem_cache *cache)
-{
-	/* put cache at the beginnging of mem */
-	(*(struct kmem_cache **)mem) = cache;
-}
-
-static inline struct kmem_cache *cnss_pool_get_cache_from_mem(void *mem)
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 17, 0))
+static int cnss_pool_get_index(void *mem)
 {
+	struct slab *slab;
 	struct kmem_cache *cache;
+	int i;
+
+	if (!virt_addr_valid(mem))
+		return -EINVAL;
+
+	/* mem -> slab -> cache */
+	slab = virt_to_slab(mem);
+	if (!slab)
+		return -ENOENT;
 
-	/* read cache from the beginnging of mem */
-	cache = (struct kmem_cache *)(*(struct kmem_cache **)mem);
+	cache = slab->slab_cache;
+	if (!cache)
+		return -ENOENT;
 
-	return cache;
+	/* Check if memory belongs to a pool */
+	for (i = 0; i < ARRAY_SIZE(cnss_pools); i++) {
+		if (cnss_pools[i].cache == cache)
+			return i;
+	}
+
+	return -ENOENT;
 }
 #else /* (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 17, 0)) */
-/* for older kernel < 5.17, we use page->slab_cache. In such case
- * we do not reserve headroom in memory buffer to store cache.
- */
-static inline void cnss_pool_put_cache_in_mem(void *mem, struct kmem_cache *cache)
-{
-}
-
-static inline struct kmem_cache *cnss_pool_get_cache_from_mem(void *mem)
+static int cnss_pool_get_index(void *mem)
 {
 	struct page *page;
+	struct kmem_cache *cache;
+	int i;
 
 	if (!virt_addr_valid(mem))
-		return NULL;
+		return -EINVAL;
 
 	/* mem -> page -> cache */
 	page = virt_to_head_page(mem);
 	if (!page)
-		return NULL;
-
-	return page->slab_cache;
-}
-#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 17, 0)) */
-
-/**
- * cnss_pool_get_index() - Get the index of memory pool
- * @mem: Allocated memory
- *
- * Returns the index of the memory pool which fits the reqested memory. The
- * complexity of this check is O(num of memory pools). Returns a negative
- * value with error code in case of failure.
- *
- */
-static int cnss_pool_get_index(void *mem)
-{
-	struct kmem_cache *cache;
-	int i;
+		return -ENOENT;
 
-	cache = cnss_pool_get_cache_from_mem(mem);
+	cache = page->slab_cache;
 	if (!cache)
 		return -ENOENT;
 
@@ -215,6 +220,7 @@ static int cnss_pool_get_index(void *mem)
 
 	return -ENOENT;
 }
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 17, 0)) */
 
 /**
  * wcnss_prealloc_get() - Get preallocated memory from a pool
@@ -242,10 +248,8 @@ void *wcnss_prealloc_get(size_t size)
 		for (i = 0; i < ARRAY_SIZE(cnss_pools); i++) {
 			if (cnss_pools[i].size >= size && cnss_pools[i].mp) {
 				mem = mempool_alloc(cnss_pools[i].mp, gfp_mask);
-				if (mem) {
-					cnss_pool_put_cache_in_mem(mem, cnss_pools[i].cache);
+				if (mem)
 					break;
-				}
 			}
 		}
 	}

+ 20 - 2
icnss2/main.c

@@ -486,8 +486,12 @@ static int icnss_send_smp2p(struct icnss_priv *priv,
 		return ret;
 	}
 
-	if (test_bit(ICNSS_FW_DOWN, &priv->state))
-		return -ENODEV;
+	if (test_bit(ICNSS_FW_DOWN, &priv->state) ||
+	    !test_bit(ICNSS_FW_READY, &priv->state)) {
+		icnss_pr_smp2p("FW down, ignoring sending SMP2P state: 0x%lx\n",
+				  priv->state);
+		return -EINVAL;
+	}
 
 	value |= priv->smp2p_info[smp2p_entry].seq++;
 	value <<= ICNSS_SMEM_SEQ_NO_POS;
@@ -3303,6 +3307,13 @@ int icnss_force_wake_request(struct device *dev)
 		return -EINVAL;
 	}
 
+	if (test_bit(ICNSS_FW_DOWN, &priv->state) ||
+	    !test_bit(ICNSS_FW_READY, &priv->state)) {
+		icnss_pr_soc_wake("FW down, ignoring SOC Wake request state: 0x%lx\n",
+				  priv->state);
+		return -EINVAL;
+	}
+
 	if (atomic_inc_not_zero(&priv->soc_wake_ref_count)) {
 		icnss_pr_soc_wake("SOC already awake, Ref count: %d",
 				  atomic_read(&priv->soc_wake_ref_count));
@@ -3332,6 +3343,13 @@ int icnss_force_wake_release(struct device *dev)
 		return -EINVAL;
 	}
 
+	if (test_bit(ICNSS_FW_DOWN, &priv->state) ||
+	    !test_bit(ICNSS_FW_READY, &priv->state)) {
+		icnss_pr_soc_wake("FW down, ignoring SOC Wake release state: 0x%lx\n",
+				  priv->state);
+		return -EINVAL;
+	}
+
 	icnss_pr_soc_wake("Calling SOC Wake response");
 
 	if (atomic_read(&priv->soc_wake_ref_count) &&

+ 5 - 2
inc/cnss_prealloc.h

@@ -1,12 +1,15 @@
 /* SPDX-License-Identifier: GPL-2.0-only */
-/* Copyright (c) 2015-2016,2019 The Linux Foundation. All rights reserved. */
+/*
+ * Copyright (c) 2015-2016,2019 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
 
 #ifndef _NET_CNSS_PREALLOC_H_
 #define _NET_CNSS_PREALLOC_H_
 
 #include <linux/types.h>
 
-#define WCNSS_PRE_ALLOC_GET_THRESHOLD (4*1024)
+#define WCNSS_PRE_ALLOC_GET_THRESHOLD (8*1024)
 
 extern void *wcnss_prealloc_get(size_t size);
 extern int wcnss_prealloc_put(void *ptr);