|
@@ -22,6 +22,7 @@
|
|
|
#include <linux/sched.h>
|
|
|
#include <linux/module.h>
|
|
|
#include <linux/list.h>
|
|
|
+#include <linux/arch_topology.h>
|
|
|
#include <linux/hash.h>
|
|
|
#include <linux/msm_ion.h>
|
|
|
#include <linux/qcom_scm.h>
|
|
@@ -141,6 +142,10 @@
|
|
|
#define ION_FLAG_CACHED (1)
|
|
|
#endif
|
|
|
|
|
|
+#ifndef topology_cluster_id
|
|
|
+#define topology_cluster_id(cpu) topology_physical_package_id(cpu)
|
|
|
+#endif
|
|
|
+
|
|
|
/*
|
|
|
* ctxid of every message is OR-ed with fastrpc_remote_pd_type before
|
|
|
* it is sent to DSP. So mask 2 LSBs to retrieve actual context
|
|
@@ -2945,9 +2950,9 @@ static int fastrpc_invoke_send(struct smq_invoke_ctx *ctx,
|
|
|
}
|
|
|
|
|
|
/*
|
|
|
- * fastrpc_get_dsp_status - Reads the property string from device node
|
|
|
- * and updates the cdsp device avialbility status
|
|
|
- * if the node belongs to cdsp device.
|
|
|
+ * Reads the property string from device node
|
|
|
+ * and updates the cdsp device avialbility status
|
|
|
+ * if the node belongs to cdsp device.
|
|
|
* @me : pointer to fastrpc_apps.
|
|
|
*/
|
|
|
|
|
@@ -2974,6 +2979,26 @@ static void fastrpc_get_dsp_status(struct fastrpc_apps *me)
|
|
|
} while (1);
|
|
|
}
|
|
|
|
|
|
+/*
|
|
|
+ * Counts number of cores corresponding
|
|
|
+ * to cluster id 0. If a core is defective or unavailable, skip counting
|
|
|
+ * that core.
|
|
|
+ * @me : pointer to fastrpc_apps.
|
|
|
+ */
|
|
|
+
|
|
|
+static void fastrpc_lowest_capacity_corecount(struct fastrpc_apps *me)
|
|
|
+{
|
|
|
+ unsigned int cpu = 0;
|
|
|
+
|
|
|
+ cpu = cpumask_first(cpu_possible_mask);
|
|
|
+ for_each_cpu(cpu, cpu_possible_mask) {
|
|
|
+ if (topology_cluster_id(cpu) == 0)
|
|
|
+ me->lowest_capacity_core_count++;
|
|
|
+ }
|
|
|
+ ADSPRPC_INFO("lowest capacity core count: %u\n",
|
|
|
+ me->lowest_capacity_core_count);
|
|
|
+}
|
|
|
+
|
|
|
static void fastrpc_init(struct fastrpc_apps *me)
|
|
|
{
|
|
|
int i, jj;
|
|
@@ -5842,13 +5867,13 @@ static int fastrpc_device_release(struct inode *inode, struct file *file)
|
|
|
{
|
|
|
struct fastrpc_file *fl = (struct fastrpc_file *)file->private_data;
|
|
|
struct fastrpc_apps *me = &gfa;
|
|
|
- u32 ii;
|
|
|
+ unsigned int ii;
|
|
|
|
|
|
if (!fl)
|
|
|
return 0;
|
|
|
|
|
|
if (fl->qos_request && fl->dev_pm_qos_req) {
|
|
|
- for (ii = 0; ii < me->silvercores.corecount; ii++) {
|
|
|
+ for (ii = 0; ii < me->lowest_capacity_core_count; ii++) {
|
|
|
if (!dev_pm_qos_request_active(&fl->dev_pm_qos_req[ii]))
|
|
|
continue;
|
|
|
dev_pm_qos_remove_request(&fl->dev_pm_qos_req[ii]);
|
|
@@ -6245,9 +6270,10 @@ static int fastrpc_device_open(struct inode *inode, struct file *filp)
|
|
|
spin_lock_irqsave(&me->hlock, irq_flags);
|
|
|
hlist_add_head(&fl->hn, &me->drivers);
|
|
|
spin_unlock_irqrestore(&me->hlock, irq_flags);
|
|
|
- fl->dev_pm_qos_req = kcalloc(me->silvercores.corecount,
|
|
|
- sizeof(struct dev_pm_qos_request),
|
|
|
- GFP_KERNEL);
|
|
|
+ if (me->lowest_capacity_core_count)
|
|
|
+ fl->dev_pm_qos_req = kzalloc((me->lowest_capacity_core_count) *
|
|
|
+ sizeof(struct dev_pm_qos_request),
|
|
|
+ GFP_KERNEL);
|
|
|
spin_lock_init(&fl->dspsignals_lock);
|
|
|
mutex_init(&fl->signal_create_mutex);
|
|
|
init_completion(&fl->shutdown);
|
|
@@ -6445,7 +6471,7 @@ int fastrpc_internal_control(struct fastrpc_file *fl,
|
|
|
unsigned int latency;
|
|
|
struct fastrpc_apps *me = &gfa;
|
|
|
int sessionid = 0;
|
|
|
- u32 silver_core_count = me->silvercores.corecount, ii = 0, cpu;
|
|
|
+ unsigned int cpu;
|
|
|
unsigned long flags = 0;
|
|
|
|
|
|
VERIFY(err, !IS_ERR_OR_NULL(fl) && !IS_ERR_OR_NULL(fl->apps));
|
|
@@ -6469,23 +6495,29 @@ int fastrpc_internal_control(struct fastrpc_file *fl,
|
|
|
goto bail;
|
|
|
}
|
|
|
|
|
|
- VERIFY(err, me->silvercores.coreno && fl->dev_pm_qos_req);
|
|
|
+ VERIFY(err, (me->lowest_capacity_core_count && fl->dev_pm_qos_req));
|
|
|
if (err) {
|
|
|
+ ADSPRPC_INFO("Skipping PM QoS latency voting, core count: %u\n",
|
|
|
+ me->lowest_capacity_core_count);
|
|
|
err = -EINVAL;
|
|
|
goto bail;
|
|
|
}
|
|
|
+ /*
|
|
|
+ * Add voting request for all possible cores corresponding to cluster
|
|
|
+ * id 0. If DT property 'qcom,single-core-latency-vote' is enabled
|
|
|
+ * then add voting request for only one core of cluster id 0.
|
|
|
+ */
|
|
|
+ for (cpu = 0; cpu < me->lowest_capacity_core_count; cpu++) {
|
|
|
|
|
|
- for (ii = 0; ii < silver_core_count; ii++) {
|
|
|
- cpu = me->silvercores.coreno[ii];
|
|
|
if (!fl->qos_request) {
|
|
|
err = dev_pm_qos_add_request(
|
|
|
get_cpu_device(cpu),
|
|
|
- &fl->dev_pm_qos_req[ii],
|
|
|
+ &fl->dev_pm_qos_req[cpu],
|
|
|
DEV_PM_QOS_RESUME_LATENCY,
|
|
|
latency);
|
|
|
} else {
|
|
|
err = dev_pm_qos_update_request(
|
|
|
- &fl->dev_pm_qos_req[ii],
|
|
|
+ &fl->dev_pm_qos_req[cpu],
|
|
|
latency);
|
|
|
}
|
|
|
/* PM QoS request APIs return 0 or 1 on success */
|
|
@@ -6499,7 +6531,6 @@ int fastrpc_internal_control(struct fastrpc_file *fl,
|
|
|
fl->qos_request = 1;
|
|
|
err = 0;
|
|
|
}
|
|
|
-
|
|
|
/* Ensure CPU feature map updated to DSP for early WakeUp */
|
|
|
fastrpc_send_cpuinfo_to_dsp(fl);
|
|
|
break;
|
|
@@ -7931,39 +7962,6 @@ bail:
|
|
|
}
|
|
|
}
|
|
|
|
|
|
-static void init_qos_cores_list(struct device *dev, char *prop_name,
|
|
|
- struct qos_cores *silvercores)
|
|
|
-{
|
|
|
- int err = 0;
|
|
|
- u32 len = 0, i = 0;
|
|
|
- u32 *coreslist = NULL;
|
|
|
-
|
|
|
- if (!of_find_property(dev->of_node, prop_name, &len))
|
|
|
- goto bail;
|
|
|
- if (len == 0)
|
|
|
- goto bail;
|
|
|
- len /= sizeof(u32);
|
|
|
- VERIFY(err, NULL != (coreslist = kcalloc(len, sizeof(u32),
|
|
|
- GFP_KERNEL)));
|
|
|
- if (err)
|
|
|
- goto bail;
|
|
|
- for (i = 0; i < len; i++) {
|
|
|
- err = of_property_read_u32_index(dev->of_node, prop_name, i,
|
|
|
- &coreslist[i]);
|
|
|
- if (err) {
|
|
|
- pr_err("adsprpc: %s: failed to read QOS cores list\n",
|
|
|
- __func__);
|
|
|
- goto bail;
|
|
|
- }
|
|
|
- }
|
|
|
- silvercores->coreno = coreslist;
|
|
|
- silvercores->corecount = len;
|
|
|
-bail:
|
|
|
- if (err)
|
|
|
- kfree(coreslist);
|
|
|
-
|
|
|
-}
|
|
|
-
|
|
|
static void fastrpc_init_privileged_gids(struct device *dev, char *prop_name,
|
|
|
struct gid_list *gidlist)
|
|
|
{
|
|
@@ -8125,9 +8123,14 @@ static int fastrpc_probe(struct platform_device *pdev)
|
|
|
&gcinfo[0].rhvm);
|
|
|
fastrpc_init_privileged_gids(dev, "qcom,fastrpc-gids",
|
|
|
&me->gidlist);
|
|
|
- init_qos_cores_list(dev, "qcom,qos-cores",
|
|
|
- &me->silvercores);
|
|
|
-
|
|
|
+ /*
|
|
|
+ * Check if latency voting for only one core
|
|
|
+ * is enabled for the platform
|
|
|
+ */
|
|
|
+ me->single_core_latency_vote = of_property_read_bool(dev->of_node,
|
|
|
+ "qcom,single-core-latency-vote");
|
|
|
+ if (me->single_core_latency_vote)
|
|
|
+ me->lowest_capacity_core_count = 1;
|
|
|
of_property_read_u32(dev->of_node, "qcom,rpc-latency-us",
|
|
|
&me->latency);
|
|
|
if (of_get_property(dev->of_node,
|
|
@@ -8677,6 +8680,7 @@ static int __init fastrpc_device_init(void)
|
|
|
goto bus_device_register_bail;
|
|
|
}
|
|
|
me->fastrpc_bus_register = true;
|
|
|
+ fastrpc_lowest_capacity_corecount(me);
|
|
|
VERIFY(err, 0 == platform_driver_register(&fastrpc_driver));
|
|
|
if (err)
|
|
|
goto register_bail;
|