Merge tag 'armsoc-drivers' of git://git.kernel.org/pub/scm/linux/kernel/git/arm/arm-soc

Pull ARM SoC driver updates from Arnd Bergmann:
 "The main addition this time around is the new ARM "SCMI" framework,
  which is the latest in a series of standards coming from ARM to do
  power management in a platform independent way.

  This has been through many review cycles, and it relies on a rather
  interesting way of using the mailbox subsystem, but in the end I
  agreed that Sudeep's version was the best we could do after all.

  Other changes include:

   - the ARM CCN driver is moved out of drivers/bus into drivers/perf,
     which makes more sense. Similarly, the performance monitoring
     portion of the CCI driver are moved the same way and cleaned up a
     little more.

   - a series of updates to the SCPI framework

   - support for the Mediatek mt7623a SoC in drivers/soc

   - support for additional NVIDIA Tegra hardware in drivers/soc

   - a new reset driver for Socionext Uniphier

   - lesser bug fixes in drivers/soc, drivers/tee, drivers/memory, and
     drivers/firmware and drivers/reset across platforms"

* tag 'armsoc-drivers' of git://git.kernel.org/pub/scm/linux/kernel/git/arm/arm-soc: (87 commits)
  reset: uniphier: add ethernet reset control support for PXs3
  reset: stm32mp1: Enable stm32mp1 reset driver
  dt-bindings: reset: add STM32MP1 resets
  reset: uniphier: add Pro4/Pro5/PXs2 audio systems reset control
  reset: imx7: add 'depends on HAS_IOMEM' to fix unmet dependency
  reset: modify the way reset lookup works for board files
  reset: add support for non-DT systems
  clk: scmi: use devm_of_clk_add_hw_provider() API and drop scmi_clocks_remove
  firmware: arm_scmi: prevent accessing rate_discrete uninitialized
  hwmon: (scmi) return -EINVAL when sensor information is unavailable
  amlogic: meson-gx-socinfo: Update soc ids
  soc/tegra: pmc: Use the new reset APIs to manage reset controllers
  soc: mediatek: update power domain data of MT2712
  dt-bindings: soc: update MT2712 power dt-bindings
  cpufreq: scmi: add thermal dependency
  soc: mediatek: fix the mistaken pointer accessed when subdomains are added
  soc: mediatek: add SCPSYS power domain driver for MediaTek MT7623A SoC
  soc: mediatek: avoid hardcoded value with bus_prot_mask
  dt-bindings: soc: add header files required for MT7623A SCPSYS dt-binding
  dt-bindings: soc: add SCPSYS binding for MT7623 and MT7623A SoC
  ...
This commit is contained in:
Linus Torvalds
2018-04-05 21:29:35 -07:00
79 changed files with 6861 additions and 2189 deletions

View File

@@ -19,6 +19,40 @@ config ARM_PSCI_CHECKER
on and off through hotplug, so for now torture tests and PSCI checker
are mutually exclusive.
config ARM_SCMI_PROTOCOL
bool "ARM System Control and Management Interface (SCMI) Message Protocol"
depends on ARM || ARM64 || COMPILE_TEST
depends on MAILBOX
help
ARM System Control and Management Interface (SCMI) protocol is a
set of operating system-independent software interfaces that are
used in system management. SCMI is extensible and currently provides
interfaces for: Discovery and self-description of the interfaces
it supports, Power domain management which is the ability to place
a given device or domain into the various power-saving states that
it supports, Performance management which is the ability to control
the performance of a domain that is composed of compute engines
such as application processors and other accelerators, Clock
management which is the ability to set and inquire rates on platform
managed clocks and Sensor management which is the ability to read
sensor data, and be notified of sensor value.
This protocol library provides interface for all the client drivers
making use of the features offered by the SCMI.
config ARM_SCMI_POWER_DOMAIN
tristate "SCMI power domain driver"
depends on ARM_SCMI_PROTOCOL || (COMPILE_TEST && OF)
default y
select PM_GENERIC_DOMAINS if PM
help
This enables support for the SCMI power domains which can be
enabled or disabled via the SCP firmware
This driver can also be built as a module. If so, the module
will be called scmi_pm_domain. Note this may needed early in boot
before rootfs may be available.
config ARM_SCPI_PROTOCOL
tristate "ARM System Control and Power Interface (SCPI) Message Protocol"
depends on ARM || ARM64 || COMPILE_TEST

View File

@@ -25,6 +25,7 @@ obj-$(CONFIG_QCOM_SCM_32) += qcom_scm-32.o
CFLAGS_qcom_scm-32.o :=$(call as-instr,.arch armv7-a\n.arch_extension sec,-DREQUIRES_SEC=1) -march=armv7-a
obj-$(CONFIG_TI_SCI_PROTOCOL) += ti_sci.o
obj-$(CONFIG_ARM_SCMI_PROTOCOL) += arm_scmi/
obj-y += broadcom/
obj-y += meson/
obj-$(CONFIG_GOOGLE_FIRMWARE) += google/

View File

@@ -0,0 +1,5 @@
obj-y = scmi-bus.o scmi-driver.o scmi-protocols.o
scmi-bus-y = bus.o
scmi-driver-y = driver.o
scmi-protocols-y = base.o clock.o perf.o power.o sensors.o
obj-$(CONFIG_ARM_SCMI_POWER_DOMAIN) += scmi_pm_domain.o

View File

@@ -0,0 +1,253 @@
// SPDX-License-Identifier: GPL-2.0
/*
* System Control and Management Interface (SCMI) Base Protocol
*
* Copyright (C) 2018 ARM Ltd.
*/
#include "common.h"
enum scmi_base_protocol_cmd {
BASE_DISCOVER_VENDOR = 0x3,
BASE_DISCOVER_SUB_VENDOR = 0x4,
BASE_DISCOVER_IMPLEMENT_VERSION = 0x5,
BASE_DISCOVER_LIST_PROTOCOLS = 0x6,
BASE_DISCOVER_AGENT = 0x7,
BASE_NOTIFY_ERRORS = 0x8,
};
struct scmi_msg_resp_base_attributes {
u8 num_protocols;
u8 num_agents;
__le16 reserved;
};
/**
* scmi_base_attributes_get() - gets the implementation details
* that are associated with the base protocol.
*
* @handle - SCMI entity handle
*
* Return: 0 on success, else appropriate SCMI error.
*/
static int scmi_base_attributes_get(const struct scmi_handle *handle)
{
int ret;
struct scmi_xfer *t;
struct scmi_msg_resp_base_attributes *attr_info;
struct scmi_revision_info *rev = handle->version;
ret = scmi_one_xfer_init(handle, PROTOCOL_ATTRIBUTES,
SCMI_PROTOCOL_BASE, 0, sizeof(*attr_info), &t);
if (ret)
return ret;
ret = scmi_do_xfer(handle, t);
if (!ret) {
attr_info = t->rx.buf;
rev->num_protocols = attr_info->num_protocols;
rev->num_agents = attr_info->num_agents;
}
scmi_one_xfer_put(handle, t);
return ret;
}
/**
* scmi_base_vendor_id_get() - gets vendor/subvendor identifier ASCII string.
*
* @handle - SCMI entity handle
* @sub_vendor - specify true if sub-vendor ID is needed
*
* Return: 0 on success, else appropriate SCMI error.
*/
static int
scmi_base_vendor_id_get(const struct scmi_handle *handle, bool sub_vendor)
{
u8 cmd;
int ret, size;
char *vendor_id;
struct scmi_xfer *t;
struct scmi_revision_info *rev = handle->version;
if (sub_vendor) {
cmd = BASE_DISCOVER_SUB_VENDOR;
vendor_id = rev->sub_vendor_id;
size = ARRAY_SIZE(rev->sub_vendor_id);
} else {
cmd = BASE_DISCOVER_VENDOR;
vendor_id = rev->vendor_id;
size = ARRAY_SIZE(rev->vendor_id);
}
ret = scmi_one_xfer_init(handle, cmd, SCMI_PROTOCOL_BASE, 0, size, &t);
if (ret)
return ret;
ret = scmi_do_xfer(handle, t);
if (!ret)
memcpy(vendor_id, t->rx.buf, size);
scmi_one_xfer_put(handle, t);
return ret;
}
/**
* scmi_base_implementation_version_get() - gets a vendor-specific
* implementation 32-bit version. The format of the version number is
* vendor-specific
*
* @handle - SCMI entity handle
*
* Return: 0 on success, else appropriate SCMI error.
*/
static int
scmi_base_implementation_version_get(const struct scmi_handle *handle)
{
int ret;
__le32 *impl_ver;
struct scmi_xfer *t;
struct scmi_revision_info *rev = handle->version;
ret = scmi_one_xfer_init(handle, BASE_DISCOVER_IMPLEMENT_VERSION,
SCMI_PROTOCOL_BASE, 0, sizeof(*impl_ver), &t);
if (ret)
return ret;
ret = scmi_do_xfer(handle, t);
if (!ret) {
impl_ver = t->rx.buf;
rev->impl_ver = le32_to_cpu(*impl_ver);
}
scmi_one_xfer_put(handle, t);
return ret;
}
/**
* scmi_base_implementation_list_get() - gets the list of protocols it is
* OSPM is allowed to access
*
* @handle - SCMI entity handle
* @protocols_imp - pointer to hold the list of protocol identifiers
*
* Return: 0 on success, else appropriate SCMI error.
*/
static int scmi_base_implementation_list_get(const struct scmi_handle *handle,
u8 *protocols_imp)
{
u8 *list;
int ret, loop;
struct scmi_xfer *t;
__le32 *num_skip, *num_ret;
u32 tot_num_ret = 0, loop_num_ret;
struct device *dev = handle->dev;
ret = scmi_one_xfer_init(handle, BASE_DISCOVER_LIST_PROTOCOLS,
SCMI_PROTOCOL_BASE, sizeof(*num_skip), 0, &t);
if (ret)
return ret;
num_skip = t->tx.buf;
num_ret = t->rx.buf;
list = t->rx.buf + sizeof(*num_ret);
do {
/* Set the number of protocols to be skipped/already read */
*num_skip = cpu_to_le32(tot_num_ret);
ret = scmi_do_xfer(handle, t);
if (ret)
break;
loop_num_ret = le32_to_cpu(*num_ret);
if (tot_num_ret + loop_num_ret > MAX_PROTOCOLS_IMP) {
dev_err(dev, "No. of Protocol > MAX_PROTOCOLS_IMP");
break;
}
for (loop = 0; loop < loop_num_ret; loop++)
protocols_imp[tot_num_ret + loop] = *(list + loop);
tot_num_ret += loop_num_ret;
} while (loop_num_ret);
scmi_one_xfer_put(handle, t);
return ret;
}
/**
* scmi_base_discover_agent_get() - discover the name of an agent
*
* @handle - SCMI entity handle
* @id - Agent identifier
* @name - Agent identifier ASCII string
*
* An agent id of 0 is reserved to identify the platform itself.
* Generally operating system is represented as "OSPM"
*
* Return: 0 on success, else appropriate SCMI error.
*/
static int scmi_base_discover_agent_get(const struct scmi_handle *handle,
int id, char *name)
{
int ret;
struct scmi_xfer *t;
ret = scmi_one_xfer_init(handle, BASE_DISCOVER_AGENT,
SCMI_PROTOCOL_BASE, sizeof(__le32),
SCMI_MAX_STR_SIZE, &t);
if (ret)
return ret;
*(__le32 *)t->tx.buf = cpu_to_le32(id);
ret = scmi_do_xfer(handle, t);
if (!ret)
memcpy(name, t->rx.buf, SCMI_MAX_STR_SIZE);
scmi_one_xfer_put(handle, t);
return ret;
}
int scmi_base_protocol_init(struct scmi_handle *h)
{
int id, ret;
u8 *prot_imp;
u32 version;
char name[SCMI_MAX_STR_SIZE];
const struct scmi_handle *handle = h;
struct device *dev = handle->dev;
struct scmi_revision_info *rev = handle->version;
ret = scmi_version_get(handle, SCMI_PROTOCOL_BASE, &version);
if (ret)
return ret;
prot_imp = devm_kcalloc(dev, MAX_PROTOCOLS_IMP, sizeof(u8), GFP_KERNEL);
if (!prot_imp)
return -ENOMEM;
rev->major_ver = PROTOCOL_REV_MAJOR(version),
rev->minor_ver = PROTOCOL_REV_MINOR(version);
scmi_base_attributes_get(handle);
scmi_base_vendor_id_get(handle, false);
scmi_base_vendor_id_get(handle, true);
scmi_base_implementation_version_get(handle);
scmi_base_implementation_list_get(handle, prot_imp);
scmi_setup_protocol_implemented(handle, prot_imp);
dev_info(dev, "SCMI Protocol v%d.%d '%s:%s' Firmware version 0x%x\n",
rev->major_ver, rev->minor_ver, rev->vendor_id,
rev->sub_vendor_id, rev->impl_ver);
dev_dbg(dev, "Found %d protocol(s) %d agent(s)\n", rev->num_protocols,
rev->num_agents);
for (id = 0; id < rev->num_agents; id++) {
scmi_base_discover_agent_get(handle, id, name);
dev_dbg(dev, "Agent %d: %s\n", id, name);
}
return 0;
}

View File

@@ -0,0 +1,221 @@
// SPDX-License-Identifier: GPL-2.0
/*
* System Control and Management Interface (SCMI) Message Protocol bus layer
*
* Copyright (C) 2018 ARM Ltd.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/types.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/device.h>
#include "common.h"
static DEFINE_IDA(scmi_bus_id);
static DEFINE_IDR(scmi_protocols);
static DEFINE_SPINLOCK(protocol_lock);
static const struct scmi_device_id *
scmi_dev_match_id(struct scmi_device *scmi_dev, struct scmi_driver *scmi_drv)
{
const struct scmi_device_id *id = scmi_drv->id_table;
if (!id)
return NULL;
for (; id->protocol_id; id++)
if (id->protocol_id == scmi_dev->protocol_id)
return id;
return NULL;
}
static int scmi_dev_match(struct device *dev, struct device_driver *drv)
{
struct scmi_driver *scmi_drv = to_scmi_driver(drv);
struct scmi_device *scmi_dev = to_scmi_dev(dev);
const struct scmi_device_id *id;
id = scmi_dev_match_id(scmi_dev, scmi_drv);
if (id)
return 1;
return 0;
}
static int scmi_protocol_init(int protocol_id, struct scmi_handle *handle)
{
scmi_prot_init_fn_t fn = idr_find(&scmi_protocols, protocol_id);
if (unlikely(!fn))
return -EINVAL;
return fn(handle);
}
static int scmi_dev_probe(struct device *dev)
{
struct scmi_driver *scmi_drv = to_scmi_driver(dev->driver);
struct scmi_device *scmi_dev = to_scmi_dev(dev);
const struct scmi_device_id *id;
int ret;
id = scmi_dev_match_id(scmi_dev, scmi_drv);
if (!id)
return -ENODEV;
if (!scmi_dev->handle)
return -EPROBE_DEFER;
ret = scmi_protocol_init(scmi_dev->protocol_id, scmi_dev->handle);
if (ret)
return ret;
return scmi_drv->probe(scmi_dev);
}
static int scmi_dev_remove(struct device *dev)
{
struct scmi_driver *scmi_drv = to_scmi_driver(dev->driver);
struct scmi_device *scmi_dev = to_scmi_dev(dev);
if (scmi_drv->remove)
scmi_drv->remove(scmi_dev);
return 0;
}
static struct bus_type scmi_bus_type = {
.name = "scmi_protocol",
.match = scmi_dev_match,
.probe = scmi_dev_probe,
.remove = scmi_dev_remove,
};
int scmi_driver_register(struct scmi_driver *driver, struct module *owner,
const char *mod_name)
{
int retval;
driver->driver.bus = &scmi_bus_type;
driver->driver.name = driver->name;
driver->driver.owner = owner;
driver->driver.mod_name = mod_name;
retval = driver_register(&driver->driver);
if (!retval)
pr_debug("registered new scmi driver %s\n", driver->name);
return retval;
}
EXPORT_SYMBOL_GPL(scmi_driver_register);
void scmi_driver_unregister(struct scmi_driver *driver)
{
driver_unregister(&driver->driver);
}
EXPORT_SYMBOL_GPL(scmi_driver_unregister);
struct scmi_device *
scmi_device_create(struct device_node *np, struct device *parent, int protocol)
{
int id, retval;
struct scmi_device *scmi_dev;
id = ida_simple_get(&scmi_bus_id, 1, 0, GFP_KERNEL);
if (id < 0)
return NULL;
scmi_dev = kzalloc(sizeof(*scmi_dev), GFP_KERNEL);
if (!scmi_dev)
goto no_mem;
scmi_dev->id = id;
scmi_dev->protocol_id = protocol;
scmi_dev->dev.parent = parent;
scmi_dev->dev.of_node = np;
scmi_dev->dev.bus = &scmi_bus_type;
dev_set_name(&scmi_dev->dev, "scmi_dev.%d", id);
retval = device_register(&scmi_dev->dev);
if (!retval)
return scmi_dev;
put_device(&scmi_dev->dev);
kfree(scmi_dev);
no_mem:
ida_simple_remove(&scmi_bus_id, id);
return NULL;
}
void scmi_device_destroy(struct scmi_device *scmi_dev)
{
scmi_handle_put(scmi_dev->handle);
device_unregister(&scmi_dev->dev);
ida_simple_remove(&scmi_bus_id, scmi_dev->id);
kfree(scmi_dev);
}
void scmi_set_handle(struct scmi_device *scmi_dev)
{
scmi_dev->handle = scmi_handle_get(&scmi_dev->dev);
}
int scmi_protocol_register(int protocol_id, scmi_prot_init_fn_t fn)
{
int ret;
spin_lock(&protocol_lock);
ret = idr_alloc(&scmi_protocols, fn, protocol_id, protocol_id + 1,
GFP_ATOMIC);
if (ret != protocol_id)
pr_err("unable to allocate SCMI idr slot, err %d\n", ret);
spin_unlock(&protocol_lock);
return ret;
}
EXPORT_SYMBOL_GPL(scmi_protocol_register);
void scmi_protocol_unregister(int protocol_id)
{
spin_lock(&protocol_lock);
idr_remove(&scmi_protocols, protocol_id);
spin_unlock(&protocol_lock);
}
EXPORT_SYMBOL_GPL(scmi_protocol_unregister);
static int __scmi_devices_unregister(struct device *dev, void *data)
{
struct scmi_device *scmi_dev = to_scmi_dev(dev);
scmi_device_destroy(scmi_dev);
return 0;
}
static void scmi_devices_unregister(void)
{
bus_for_each_dev(&scmi_bus_type, NULL, NULL, __scmi_devices_unregister);
}
static int __init scmi_bus_init(void)
{
int retval;
retval = bus_register(&scmi_bus_type);
if (retval)
pr_err("scmi protocol bus register failed (%d)\n", retval);
return retval;
}
subsys_initcall(scmi_bus_init);
static void __exit scmi_bus_exit(void)
{
scmi_devices_unregister();
bus_unregister(&scmi_bus_type);
ida_destroy(&scmi_bus_id);
}
module_exit(scmi_bus_exit);

View File

@@ -0,0 +1,343 @@
// SPDX-License-Identifier: GPL-2.0
/*
* System Control and Management Interface (SCMI) Clock Protocol
*
* Copyright (C) 2018 ARM Ltd.
*/
#include "common.h"
enum scmi_clock_protocol_cmd {
CLOCK_ATTRIBUTES = 0x3,
CLOCK_DESCRIBE_RATES = 0x4,
CLOCK_RATE_SET = 0x5,
CLOCK_RATE_GET = 0x6,
CLOCK_CONFIG_SET = 0x7,
};
struct scmi_msg_resp_clock_protocol_attributes {
__le16 num_clocks;
u8 max_async_req;
u8 reserved;
};
struct scmi_msg_resp_clock_attributes {
__le32 attributes;
#define CLOCK_ENABLE BIT(0)
u8 name[SCMI_MAX_STR_SIZE];
};
struct scmi_clock_set_config {
__le32 id;
__le32 attributes;
};
struct scmi_msg_clock_describe_rates {
__le32 id;
__le32 rate_index;
};
struct scmi_msg_resp_clock_describe_rates {
__le32 num_rates_flags;
#define NUM_RETURNED(x) ((x) & 0xfff)
#define RATE_DISCRETE(x) !((x) & BIT(12))
#define NUM_REMAINING(x) ((x) >> 16)
struct {
__le32 value_low;
__le32 value_high;
} rate[0];
#define RATE_TO_U64(X) \
({ \
typeof(X) x = (X); \
le32_to_cpu((x).value_low) | (u64)le32_to_cpu((x).value_high) << 32; \
})
};
struct scmi_clock_set_rate {
__le32 flags;
#define CLOCK_SET_ASYNC BIT(0)
#define CLOCK_SET_DELAYED BIT(1)
#define CLOCK_SET_ROUND_UP BIT(2)
#define CLOCK_SET_ROUND_AUTO BIT(3)
__le32 id;
__le32 value_low;
__le32 value_high;
};
struct clock_info {
int num_clocks;
int max_async_req;
struct scmi_clock_info *clk;
};
static int scmi_clock_protocol_attributes_get(const struct scmi_handle *handle,
struct clock_info *ci)
{
int ret;
struct scmi_xfer *t;
struct scmi_msg_resp_clock_protocol_attributes *attr;
ret = scmi_one_xfer_init(handle, PROTOCOL_ATTRIBUTES,
SCMI_PROTOCOL_CLOCK, 0, sizeof(*attr), &t);
if (ret)
return ret;
attr = t->rx.buf;
ret = scmi_do_xfer(handle, t);
if (!ret) {
ci->num_clocks = le16_to_cpu(attr->num_clocks);
ci->max_async_req = attr->max_async_req;
}
scmi_one_xfer_put(handle, t);
return ret;
}
static int scmi_clock_attributes_get(const struct scmi_handle *handle,
u32 clk_id, struct scmi_clock_info *clk)
{
int ret;
struct scmi_xfer *t;
struct scmi_msg_resp_clock_attributes *attr;
ret = scmi_one_xfer_init(handle, CLOCK_ATTRIBUTES, SCMI_PROTOCOL_CLOCK,
sizeof(clk_id), sizeof(*attr), &t);
if (ret)
return ret;
*(__le32 *)t->tx.buf = cpu_to_le32(clk_id);
attr = t->rx.buf;
ret = scmi_do_xfer(handle, t);
if (!ret)
memcpy(clk->name, attr->name, SCMI_MAX_STR_SIZE);
else
clk->name[0] = '\0';
scmi_one_xfer_put(handle, t);
return ret;
}
static int
scmi_clock_describe_rates_get(const struct scmi_handle *handle, u32 clk_id,
struct scmi_clock_info *clk)
{
u64 *rate;
int ret, cnt;
bool rate_discrete = false;
u32 tot_rate_cnt = 0, rates_flag;
u16 num_returned, num_remaining;
struct scmi_xfer *t;
struct scmi_msg_clock_describe_rates *clk_desc;
struct scmi_msg_resp_clock_describe_rates *rlist;
ret = scmi_one_xfer_init(handle, CLOCK_DESCRIBE_RATES,
SCMI_PROTOCOL_CLOCK, sizeof(*clk_desc), 0, &t);
if (ret)
return ret;
clk_desc = t->tx.buf;
rlist = t->rx.buf;
do {
clk_desc->id = cpu_to_le32(clk_id);
/* Set the number of rates to be skipped/already read */
clk_desc->rate_index = cpu_to_le32(tot_rate_cnt);
ret = scmi_do_xfer(handle, t);
if (ret)
goto err;
rates_flag = le32_to_cpu(rlist->num_rates_flags);
num_remaining = NUM_REMAINING(rates_flag);
rate_discrete = RATE_DISCRETE(rates_flag);
num_returned = NUM_RETURNED(rates_flag);
if (tot_rate_cnt + num_returned > SCMI_MAX_NUM_RATES) {
dev_err(handle->dev, "No. of rates > MAX_NUM_RATES");
break;
}
if (!rate_discrete) {
clk->range.min_rate = RATE_TO_U64(rlist->rate[0]);
clk->range.max_rate = RATE_TO_U64(rlist->rate[1]);
clk->range.step_size = RATE_TO_U64(rlist->rate[2]);
dev_dbg(handle->dev, "Min %llu Max %llu Step %llu Hz\n",
clk->range.min_rate, clk->range.max_rate,
clk->range.step_size);
break;
}
rate = &clk->list.rates[tot_rate_cnt];
for (cnt = 0; cnt < num_returned; cnt++, rate++) {
*rate = RATE_TO_U64(rlist->rate[cnt]);
dev_dbg(handle->dev, "Rate %llu Hz\n", *rate);
}
tot_rate_cnt += num_returned;
/*
* check for both returned and remaining to avoid infinite
* loop due to buggy firmware
*/
} while (num_returned && num_remaining);
if (rate_discrete)
clk->list.num_rates = tot_rate_cnt;
err:
scmi_one_xfer_put(handle, t);
return ret;
}
static int
scmi_clock_rate_get(const struct scmi_handle *handle, u32 clk_id, u64 *value)
{
int ret;
struct scmi_xfer *t;
ret = scmi_one_xfer_init(handle, CLOCK_RATE_GET, SCMI_PROTOCOL_CLOCK,
sizeof(__le32), sizeof(u64), &t);
if (ret)
return ret;
*(__le32 *)t->tx.buf = cpu_to_le32(clk_id);
ret = scmi_do_xfer(handle, t);
if (!ret) {
__le32 *pval = t->rx.buf;
*value = le32_to_cpu(*pval);
*value |= (u64)le32_to_cpu(*(pval + 1)) << 32;
}
scmi_one_xfer_put(handle, t);
return ret;
}
static int scmi_clock_rate_set(const struct scmi_handle *handle, u32 clk_id,
u32 config, u64 rate)
{
int ret;
struct scmi_xfer *t;
struct scmi_clock_set_rate *cfg;
ret = scmi_one_xfer_init(handle, CLOCK_RATE_SET, SCMI_PROTOCOL_CLOCK,
sizeof(*cfg), 0, &t);
if (ret)
return ret;
cfg = t->tx.buf;
cfg->flags = cpu_to_le32(config);
cfg->id = cpu_to_le32(clk_id);
cfg->value_low = cpu_to_le32(rate & 0xffffffff);
cfg->value_high = cpu_to_le32(rate >> 32);
ret = scmi_do_xfer(handle, t);
scmi_one_xfer_put(handle, t);
return ret;
}
static int
scmi_clock_config_set(const struct scmi_handle *handle, u32 clk_id, u32 config)
{
int ret;
struct scmi_xfer *t;
struct scmi_clock_set_config *cfg;
ret = scmi_one_xfer_init(handle, CLOCK_CONFIG_SET, SCMI_PROTOCOL_CLOCK,
sizeof(*cfg), 0, &t);
if (ret)
return ret;
cfg = t->tx.buf;
cfg->id = cpu_to_le32(clk_id);
cfg->attributes = cpu_to_le32(config);
ret = scmi_do_xfer(handle, t);
scmi_one_xfer_put(handle, t);
return ret;
}
static int scmi_clock_enable(const struct scmi_handle *handle, u32 clk_id)
{
return scmi_clock_config_set(handle, clk_id, CLOCK_ENABLE);
}
static int scmi_clock_disable(const struct scmi_handle *handle, u32 clk_id)
{
return scmi_clock_config_set(handle, clk_id, 0);
}
static int scmi_clock_count_get(const struct scmi_handle *handle)
{
struct clock_info *ci = handle->clk_priv;
return ci->num_clocks;
}
static const struct scmi_clock_info *
scmi_clock_info_get(const struct scmi_handle *handle, u32 clk_id)
{
struct clock_info *ci = handle->clk_priv;
struct scmi_clock_info *clk = ci->clk + clk_id;
if (!clk->name || !clk->name[0])
return NULL;
return clk;
}
static struct scmi_clk_ops clk_ops = {
.count_get = scmi_clock_count_get,
.info_get = scmi_clock_info_get,
.rate_get = scmi_clock_rate_get,
.rate_set = scmi_clock_rate_set,
.enable = scmi_clock_enable,
.disable = scmi_clock_disable,
};
static int scmi_clock_protocol_init(struct scmi_handle *handle)
{
u32 version;
int clkid, ret;
struct clock_info *cinfo;
scmi_version_get(handle, SCMI_PROTOCOL_CLOCK, &version);
dev_dbg(handle->dev, "Clock Version %d.%d\n",
PROTOCOL_REV_MAJOR(version), PROTOCOL_REV_MINOR(version));
cinfo = devm_kzalloc(handle->dev, sizeof(*cinfo), GFP_KERNEL);
if (!cinfo)
return -ENOMEM;
scmi_clock_protocol_attributes_get(handle, cinfo);
cinfo->clk = devm_kcalloc(handle->dev, cinfo->num_clocks,
sizeof(*cinfo->clk), GFP_KERNEL);
if (!cinfo->clk)
return -ENOMEM;
for (clkid = 0; clkid < cinfo->num_clocks; clkid++) {
struct scmi_clock_info *clk = cinfo->clk + clkid;
ret = scmi_clock_attributes_get(handle, clkid, clk);
if (!ret)
scmi_clock_describe_rates_get(handle, clkid, clk);
}
handle->clk_ops = &clk_ops;
handle->clk_priv = cinfo;
return 0;
}
static int __init scmi_clock_init(void)
{
return scmi_protocol_register(SCMI_PROTOCOL_CLOCK,
&scmi_clock_protocol_init);
}
subsys_initcall(scmi_clock_init);

View File

@@ -0,0 +1,105 @@
// SPDX-License-Identifier: GPL-2.0
/*
* System Control and Management Interface (SCMI) Message Protocol
* driver common header file containing some definitions, structures
* and function prototypes used in all the different SCMI protocols.
*
* Copyright (C) 2018 ARM Ltd.
*/
#include <linux/completion.h>
#include <linux/device.h>
#include <linux/errno.h>
#include <linux/kernel.h>
#include <linux/scmi_protocol.h>
#include <linux/types.h>
#define PROTOCOL_REV_MINOR_BITS 16
#define PROTOCOL_REV_MINOR_MASK ((1U << PROTOCOL_REV_MINOR_BITS) - 1)
#define PROTOCOL_REV_MAJOR(x) ((x) >> PROTOCOL_REV_MINOR_BITS)
#define PROTOCOL_REV_MINOR(x) ((x) & PROTOCOL_REV_MINOR_MASK)
#define MAX_PROTOCOLS_IMP 16
#define MAX_OPPS 16
enum scmi_common_cmd {
PROTOCOL_VERSION = 0x0,
PROTOCOL_ATTRIBUTES = 0x1,
PROTOCOL_MESSAGE_ATTRIBUTES = 0x2,
};
/**
* struct scmi_msg_resp_prot_version - Response for a message
*
* @major_version: Major version of the ABI that firmware supports
* @minor_version: Minor version of the ABI that firmware supports
*
* In general, ABI version changes follow the rule that minor version increments
* are backward compatible. Major revision changes in ABI may not be
* backward compatible.
*
* Response to a generic message with message type SCMI_MSG_VERSION
*/
struct scmi_msg_resp_prot_version {
__le16 minor_version;
__le16 major_version;
};
/**
* struct scmi_msg_hdr - Message(Tx/Rx) header
*
* @id: The identifier of the command being sent
* @protocol_id: The identifier of the protocol used to send @id command
* @seq: The token to identify the message. when a message/command returns,
* the platform returns the whole message header unmodified including
* the token.
*/
struct scmi_msg_hdr {
u8 id;
u8 protocol_id;
u16 seq;
u32 status;
bool poll_completion;
};
/**
* struct scmi_msg - Message(Tx/Rx) structure
*
* @buf: Buffer pointer
* @len: Length of data in the Buffer
*/
struct scmi_msg {
void *buf;
size_t len;
};
/**
* struct scmi_xfer - Structure representing a message flow
*
* @hdr: Transmit message header
* @tx: Transmit message
* @rx: Receive message, the buffer should be pre-allocated to store
* message. If request-ACK protocol is used, we can reuse the same
* buffer for the rx path as we use for the tx path.
* @done: completion event
*/
struct scmi_xfer {
void *con_priv;
struct scmi_msg_hdr hdr;
struct scmi_msg tx;
struct scmi_msg rx;
struct completion done;
};
void scmi_one_xfer_put(const struct scmi_handle *h, struct scmi_xfer *xfer);
int scmi_do_xfer(const struct scmi_handle *h, struct scmi_xfer *xfer);
int scmi_one_xfer_init(const struct scmi_handle *h, u8 msg_id, u8 prot_id,
size_t tx_size, size_t rx_size, struct scmi_xfer **p);
int scmi_handle_put(const struct scmi_handle *handle);
struct scmi_handle *scmi_handle_get(struct device *dev);
void scmi_set_handle(struct scmi_device *scmi_dev);
int scmi_version_get(const struct scmi_handle *h, u8 protocol, u32 *version);
void scmi_setup_protocol_implemented(const struct scmi_handle *handle,
u8 *prot_imp);
int scmi_base_protocol_init(struct scmi_handle *h);

View File

@@ -0,0 +1,871 @@
// SPDX-License-Identifier: GPL-2.0
/*
* System Control and Management Interface (SCMI) Message Protocol driver
*
* SCMI Message Protocol is used between the System Control Processor(SCP)
* and the Application Processors(AP). The Message Handling Unit(MHU)
* provides a mechanism for inter-processor communication between SCP's
* Cortex M3 and AP.
*
* SCP offers control and management of the core/cluster power states,
* various power domain DVFS including the core/cluster, certain system
* clocks configuration, thermal sensors and many others.
*
* Copyright (C) 2018 ARM Ltd.
*/
#include <linux/bitmap.h>
#include <linux/export.h>
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/ktime.h>
#include <linux/mailbox_client.h>
#include <linux/module.h>
#include <linux/of_address.h>
#include <linux/of_device.h>
#include <linux/processor.h>
#include <linux/semaphore.h>
#include <linux/slab.h>
#include "common.h"
#define MSG_ID_SHIFT 0
#define MSG_ID_MASK 0xff
#define MSG_TYPE_SHIFT 8
#define MSG_TYPE_MASK 0x3
#define MSG_PROTOCOL_ID_SHIFT 10
#define MSG_PROTOCOL_ID_MASK 0xff
#define MSG_TOKEN_ID_SHIFT 18
#define MSG_TOKEN_ID_MASK 0x3ff
#define MSG_XTRACT_TOKEN(header) \
(((header) >> MSG_TOKEN_ID_SHIFT) & MSG_TOKEN_ID_MASK)
enum scmi_error_codes {
SCMI_SUCCESS = 0, /* Success */
SCMI_ERR_SUPPORT = -1, /* Not supported */
SCMI_ERR_PARAMS = -2, /* Invalid Parameters */
SCMI_ERR_ACCESS = -3, /* Invalid access/permission denied */
SCMI_ERR_ENTRY = -4, /* Not found */
SCMI_ERR_RANGE = -5, /* Value out of range */
SCMI_ERR_BUSY = -6, /* Device busy */
SCMI_ERR_COMMS = -7, /* Communication Error */
SCMI_ERR_GENERIC = -8, /* Generic Error */
SCMI_ERR_HARDWARE = -9, /* Hardware Error */
SCMI_ERR_PROTOCOL = -10,/* Protocol Error */
SCMI_ERR_MAX
};
/* List of all SCMI devices active in system */
static LIST_HEAD(scmi_list);
/* Protection for the entire list */
static DEFINE_MUTEX(scmi_list_mutex);
/**
* struct scmi_xfers_info - Structure to manage transfer information
*
* @xfer_block: Preallocated Message array
* @xfer_alloc_table: Bitmap table for allocated messages.
* Index of this bitmap table is also used for message
* sequence identifier.
* @xfer_lock: Protection for message allocation
*/
struct scmi_xfers_info {
struct scmi_xfer *xfer_block;
unsigned long *xfer_alloc_table;
/* protect transfer allocation */
spinlock_t xfer_lock;
};
/**
* struct scmi_desc - Description of SoC integration
*
* @max_rx_timeout_ms: Timeout for communication with SoC (in Milliseconds)
* @max_msg: Maximum number of messages that can be pending
* simultaneously in the system
* @max_msg_size: Maximum size of data per message that can be handled.
*/
struct scmi_desc {
int max_rx_timeout_ms;
int max_msg;
int max_msg_size;
};
/**
* struct scmi_chan_info - Structure representing a SCMI channel informfation
*
* @cl: Mailbox Client
* @chan: Transmit/Receive mailbox channel
* @payload: Transmit/Receive mailbox channel payload area
* @dev: Reference to device in the SCMI hierarchy corresponding to this
* channel
*/
struct scmi_chan_info {
struct mbox_client cl;
struct mbox_chan *chan;
void __iomem *payload;
struct device *dev;
struct scmi_handle *handle;
};
/**
* struct scmi_info - Structure representing a SCMI instance
*
* @dev: Device pointer
* @desc: SoC description for this instance
* @handle: Instance of SCMI handle to send to clients
* @version: SCMI revision information containing protocol version,
* implementation version and (sub-)vendor identification.
* @minfo: Message info
* @tx_idr: IDR object to map protocol id to channel info pointer
* @protocols_imp: list of protocols implemented, currently maximum of
* MAX_PROTOCOLS_IMP elements allocated by the base protocol
* @node: list head
* @users: Number of users of this instance
*/
struct scmi_info {
struct device *dev;
const struct scmi_desc *desc;
struct scmi_revision_info version;
struct scmi_handle handle;
struct scmi_xfers_info minfo;
struct idr tx_idr;
u8 *protocols_imp;
struct list_head node;
int users;
};
#define client_to_scmi_chan_info(c) container_of(c, struct scmi_chan_info, cl)
#define handle_to_scmi_info(h) container_of(h, struct scmi_info, handle)
/*
* SCMI specification requires all parameters, message headers, return
* arguments or any protocol data to be expressed in little endian
* format only.
*/
struct scmi_shared_mem {
__le32 reserved;
__le32 channel_status;
#define SCMI_SHMEM_CHAN_STAT_CHANNEL_ERROR BIT(1)
#define SCMI_SHMEM_CHAN_STAT_CHANNEL_FREE BIT(0)
__le32 reserved1[2];
__le32 flags;
#define SCMI_SHMEM_FLAG_INTR_ENABLED BIT(0)
__le32 length;
__le32 msg_header;
u8 msg_payload[0];
};
static const int scmi_linux_errmap[] = {
/* better than switch case as long as return value is continuous */
0, /* SCMI_SUCCESS */
-EOPNOTSUPP, /* SCMI_ERR_SUPPORT */
-EINVAL, /* SCMI_ERR_PARAM */
-EACCES, /* SCMI_ERR_ACCESS */
-ENOENT, /* SCMI_ERR_ENTRY */
-ERANGE, /* SCMI_ERR_RANGE */
-EBUSY, /* SCMI_ERR_BUSY */
-ECOMM, /* SCMI_ERR_COMMS */
-EIO, /* SCMI_ERR_GENERIC */
-EREMOTEIO, /* SCMI_ERR_HARDWARE */
-EPROTO, /* SCMI_ERR_PROTOCOL */
};
static inline int scmi_to_linux_errno(int errno)
{
if (errno < SCMI_SUCCESS && errno > SCMI_ERR_MAX)
return scmi_linux_errmap[-errno];
return -EIO;
}
/**
* scmi_dump_header_dbg() - Helper to dump a message header.
*
* @dev: Device pointer corresponding to the SCMI entity
* @hdr: pointer to header.
*/
static inline void scmi_dump_header_dbg(struct device *dev,
struct scmi_msg_hdr *hdr)
{
dev_dbg(dev, "Command ID: %x Sequence ID: %x Protocol: %x\n",
hdr->id, hdr->seq, hdr->protocol_id);
}
static void scmi_fetch_response(struct scmi_xfer *xfer,
struct scmi_shared_mem __iomem *mem)
{
xfer->hdr.status = ioread32(mem->msg_payload);
/* Skip the length of header and statues in payload area i.e 8 bytes*/
xfer->rx.len = min_t(size_t, xfer->rx.len, ioread32(&mem->length) - 8);
/* Take a copy to the rx buffer.. */
memcpy_fromio(xfer->rx.buf, mem->msg_payload + 4, xfer->rx.len);
}
/**
* scmi_rx_callback() - mailbox client callback for receive messages
*
* @cl: client pointer
* @m: mailbox message
*
* Processes one received message to appropriate transfer information and
* signals completion of the transfer.
*
* NOTE: This function will be invoked in IRQ context, hence should be
* as optimal as possible.
*/
static void scmi_rx_callback(struct mbox_client *cl, void *m)
{
u16 xfer_id;
struct scmi_xfer *xfer;
struct scmi_chan_info *cinfo = client_to_scmi_chan_info(cl);
struct device *dev = cinfo->dev;
struct scmi_info *info = handle_to_scmi_info(cinfo->handle);
struct scmi_xfers_info *minfo = &info->minfo;
struct scmi_shared_mem __iomem *mem = cinfo->payload;
xfer_id = MSG_XTRACT_TOKEN(ioread32(&mem->msg_header));
/*
* Are we even expecting this?
*/
if (!test_bit(xfer_id, minfo->xfer_alloc_table)) {
dev_err(dev, "message for %d is not expected!\n", xfer_id);
return;
}
xfer = &minfo->xfer_block[xfer_id];
scmi_dump_header_dbg(dev, &xfer->hdr);
/* Is the message of valid length? */
if (xfer->rx.len > info->desc->max_msg_size) {
dev_err(dev, "unable to handle %zu xfer(max %d)\n",
xfer->rx.len, info->desc->max_msg_size);
return;
}
scmi_fetch_response(xfer, mem);
complete(&xfer->done);
}
/**
* pack_scmi_header() - packs and returns 32-bit header
*
* @hdr: pointer to header containing all the information on message id,
* protocol id and sequence id.
*/
static inline u32 pack_scmi_header(struct scmi_msg_hdr *hdr)
{
return ((hdr->id & MSG_ID_MASK) << MSG_ID_SHIFT) |
((hdr->seq & MSG_TOKEN_ID_MASK) << MSG_TOKEN_ID_SHIFT) |
((hdr->protocol_id & MSG_PROTOCOL_ID_MASK) << MSG_PROTOCOL_ID_SHIFT);
}
/**
* scmi_tx_prepare() - mailbox client callback to prepare for the transfer
*
* @cl: client pointer
* @m: mailbox message
*
* This function prepares the shared memory which contains the header and the
* payload.
*/
static void scmi_tx_prepare(struct mbox_client *cl, void *m)
{
struct scmi_xfer *t = m;
struct scmi_chan_info *cinfo = client_to_scmi_chan_info(cl);
struct scmi_shared_mem __iomem *mem = cinfo->payload;
/* Mark channel busy + clear error */
iowrite32(0x0, &mem->channel_status);
iowrite32(t->hdr.poll_completion ? 0 : SCMI_SHMEM_FLAG_INTR_ENABLED,
&mem->flags);
iowrite32(sizeof(mem->msg_header) + t->tx.len, &mem->length);
iowrite32(pack_scmi_header(&t->hdr), &mem->msg_header);
if (t->tx.buf)
memcpy_toio(mem->msg_payload, t->tx.buf, t->tx.len);
}
/**
* scmi_one_xfer_get() - Allocate one message
*
* @handle: SCMI entity handle
*
* Helper function which is used by various command functions that are
* exposed to clients of this driver for allocating a message traffic event.
*
* This function can sleep depending on pending requests already in the system
* for the SCMI entity. Further, this also holds a spinlock to maintain
* integrity of internal data structures.
*
* Return: 0 if all went fine, else corresponding error.
*/
static struct scmi_xfer *scmi_one_xfer_get(const struct scmi_handle *handle)
{
u16 xfer_id;
struct scmi_xfer *xfer;
unsigned long flags, bit_pos;
struct scmi_info *info = handle_to_scmi_info(handle);
struct scmi_xfers_info *minfo = &info->minfo;
/* Keep the locked section as small as possible */
spin_lock_irqsave(&minfo->xfer_lock, flags);
bit_pos = find_first_zero_bit(minfo->xfer_alloc_table,
info->desc->max_msg);
if (bit_pos == info->desc->max_msg) {
spin_unlock_irqrestore(&minfo->xfer_lock, flags);
return ERR_PTR(-ENOMEM);
}
set_bit(bit_pos, minfo->xfer_alloc_table);
spin_unlock_irqrestore(&minfo->xfer_lock, flags);
xfer_id = bit_pos;
xfer = &minfo->xfer_block[xfer_id];
xfer->hdr.seq = xfer_id;
reinit_completion(&xfer->done);
return xfer;
}
/**
* scmi_one_xfer_put() - Release a message
*
* @minfo: transfer info pointer
* @xfer: message that was reserved by scmi_one_xfer_get
*
* This holds a spinlock to maintain integrity of internal data structures.
*/
void scmi_one_xfer_put(const struct scmi_handle *handle, struct scmi_xfer *xfer)
{
unsigned long flags;
struct scmi_info *info = handle_to_scmi_info(handle);
struct scmi_xfers_info *minfo = &info->minfo;
/*
* Keep the locked section as small as possible
* NOTE: we might escape with smp_mb and no lock here..
* but just be conservative and symmetric.
*/
spin_lock_irqsave(&minfo->xfer_lock, flags);
clear_bit(xfer->hdr.seq, minfo->xfer_alloc_table);
spin_unlock_irqrestore(&minfo->xfer_lock, flags);
}
static bool
scmi_xfer_poll_done(const struct scmi_chan_info *cinfo, struct scmi_xfer *xfer)
{
struct scmi_shared_mem __iomem *mem = cinfo->payload;
u16 xfer_id = MSG_XTRACT_TOKEN(ioread32(&mem->msg_header));
if (xfer->hdr.seq != xfer_id)
return false;
return ioread32(&mem->channel_status) &
(SCMI_SHMEM_CHAN_STAT_CHANNEL_ERROR |
SCMI_SHMEM_CHAN_STAT_CHANNEL_FREE);
}
#define SCMI_MAX_POLL_TO_NS (100 * NSEC_PER_USEC)
static bool scmi_xfer_done_no_timeout(const struct scmi_chan_info *cinfo,
struct scmi_xfer *xfer, ktime_t stop)
{
ktime_t __cur = ktime_get();
return scmi_xfer_poll_done(cinfo, xfer) || ktime_after(__cur, stop);
}
/**
* scmi_do_xfer() - Do one transfer
*
* @info: Pointer to SCMI entity information
* @xfer: Transfer to initiate and wait for response
*
* Return: -ETIMEDOUT in case of no response, if transmit error,
* return corresponding error, else if all goes well,
* return 0.
*/
int scmi_do_xfer(const struct scmi_handle *handle, struct scmi_xfer *xfer)
{
int ret;
int timeout;
struct scmi_info *info = handle_to_scmi_info(handle);
struct device *dev = info->dev;
struct scmi_chan_info *cinfo;
cinfo = idr_find(&info->tx_idr, xfer->hdr.protocol_id);
if (unlikely(!cinfo))
return -EINVAL;
ret = mbox_send_message(cinfo->chan, xfer);
if (ret < 0) {
dev_dbg(dev, "mbox send fail %d\n", ret);
return ret;
}
/* mbox_send_message returns non-negative value on success, so reset */
ret = 0;
if (xfer->hdr.poll_completion) {
ktime_t stop = ktime_add_ns(ktime_get(), SCMI_MAX_POLL_TO_NS);
spin_until_cond(scmi_xfer_done_no_timeout(cinfo, xfer, stop));
if (ktime_before(ktime_get(), stop))
scmi_fetch_response(xfer, cinfo->payload);
else
ret = -ETIMEDOUT;
} else {
/* And we wait for the response. */
timeout = msecs_to_jiffies(info->desc->max_rx_timeout_ms);
if (!wait_for_completion_timeout(&xfer->done, timeout)) {
dev_err(dev, "mbox timed out in resp(caller: %pS)\n",
(void *)_RET_IP_);
ret = -ETIMEDOUT;
}
}
if (!ret && xfer->hdr.status)
ret = scmi_to_linux_errno(xfer->hdr.status);
/*
* NOTE: we might prefer not to need the mailbox ticker to manage the
* transfer queueing since the protocol layer queues things by itself.
* Unfortunately, we have to kick the mailbox framework after we have
* received our message.
*/
mbox_client_txdone(cinfo->chan, ret);
return ret;
}
/**
* scmi_one_xfer_init() - Allocate and initialise one message
*
* @handle: SCMI entity handle
* @msg_id: Message identifier
* @msg_prot_id: Protocol identifier for the message
* @tx_size: transmit message size
* @rx_size: receive message size
* @p: pointer to the allocated and initialised message
*
* This function allocates the message using @scmi_one_xfer_get and
* initialise the header.
*
* Return: 0 if all went fine with @p pointing to message, else
* corresponding error.
*/
int scmi_one_xfer_init(const struct scmi_handle *handle, u8 msg_id, u8 prot_id,
size_t tx_size, size_t rx_size, struct scmi_xfer **p)
{
int ret;
struct scmi_xfer *xfer;
struct scmi_info *info = handle_to_scmi_info(handle);
struct device *dev = info->dev;
/* Ensure we have sane transfer sizes */
if (rx_size > info->desc->max_msg_size ||
tx_size > info->desc->max_msg_size)
return -ERANGE;
xfer = scmi_one_xfer_get(handle);
if (IS_ERR(xfer)) {
ret = PTR_ERR(xfer);
dev_err(dev, "failed to get free message slot(%d)\n", ret);
return ret;
}
xfer->tx.len = tx_size;
xfer->rx.len = rx_size ? : info->desc->max_msg_size;
xfer->hdr.id = msg_id;
xfer->hdr.protocol_id = prot_id;
xfer->hdr.poll_completion = false;
*p = xfer;
return 0;
}
/**
* scmi_version_get() - command to get the revision of the SCMI entity
*
* @handle: Handle to SCMI entity information
*
* Updates the SCMI information in the internal data structure.
*
* Return: 0 if all went fine, else return appropriate error.
*/
int scmi_version_get(const struct scmi_handle *handle, u8 protocol,
u32 *version)
{
int ret;
__le32 *rev_info;
struct scmi_xfer *t;
ret = scmi_one_xfer_init(handle, PROTOCOL_VERSION, protocol, 0,
sizeof(*version), &t);
if (ret)
return ret;
ret = scmi_do_xfer(handle, t);
if (!ret) {
rev_info = t->rx.buf;
*version = le32_to_cpu(*rev_info);
}
scmi_one_xfer_put(handle, t);
return ret;
}
void scmi_setup_protocol_implemented(const struct scmi_handle *handle,
u8 *prot_imp)
{
struct scmi_info *info = handle_to_scmi_info(handle);
info->protocols_imp = prot_imp;
}
static bool
scmi_is_protocol_implemented(const struct scmi_handle *handle, u8 prot_id)
{
int i;
struct scmi_info *info = handle_to_scmi_info(handle);
if (!info->protocols_imp)
return false;
for (i = 0; i < MAX_PROTOCOLS_IMP; i++)
if (info->protocols_imp[i] == prot_id)
return true;
return false;
}
/**
* scmi_handle_get() - Get the SCMI handle for a device
*
* @dev: pointer to device for which we want SCMI handle
*
* NOTE: The function does not track individual clients of the framework
* and is expected to be maintained by caller of SCMI protocol library.
* scmi_handle_put must be balanced with successful scmi_handle_get
*
* Return: pointer to handle if successful, NULL on error
*/
struct scmi_handle *scmi_handle_get(struct device *dev)
{
struct list_head *p;
struct scmi_info *info;
struct scmi_handle *handle = NULL;
mutex_lock(&scmi_list_mutex);
list_for_each(p, &scmi_list) {
info = list_entry(p, struct scmi_info, node);
if (dev->parent == info->dev) {
handle = &info->handle;
info->users++;
break;
}
}
mutex_unlock(&scmi_list_mutex);
return handle;
}
/**
* scmi_handle_put() - Release the handle acquired by scmi_handle_get
*
* @handle: handle acquired by scmi_handle_get
*
* NOTE: The function does not track individual clients of the framework
* and is expected to be maintained by caller of SCMI protocol library.
* scmi_handle_put must be balanced with successful scmi_handle_get
*
* Return: 0 is successfully released
* if null was passed, it returns -EINVAL;
*/
int scmi_handle_put(const struct scmi_handle *handle)
{
struct scmi_info *info;
if (!handle)
return -EINVAL;
info = handle_to_scmi_info(handle);
mutex_lock(&scmi_list_mutex);
if (!WARN_ON(!info->users))
info->users--;
mutex_unlock(&scmi_list_mutex);
return 0;
}
static const struct scmi_desc scmi_generic_desc = {
.max_rx_timeout_ms = 30, /* we may increase this if required */
.max_msg = 20, /* Limited by MBOX_TX_QUEUE_LEN */
.max_msg_size = 128,
};
/* Each compatible listed below must have descriptor associated with it */
static const struct of_device_id scmi_of_match[] = {
{ .compatible = "arm,scmi", .data = &scmi_generic_desc },
{ /* Sentinel */ },
};
MODULE_DEVICE_TABLE(of, scmi_of_match);
static int scmi_xfer_info_init(struct scmi_info *sinfo)
{
int i;
struct scmi_xfer *xfer;
struct device *dev = sinfo->dev;
const struct scmi_desc *desc = sinfo->desc;
struct scmi_xfers_info *info = &sinfo->minfo;
/* Pre-allocated messages, no more than what hdr.seq can support */
if (WARN_ON(desc->max_msg >= (MSG_TOKEN_ID_MASK + 1))) {
dev_err(dev, "Maximum message of %d exceeds supported %d\n",
desc->max_msg, MSG_TOKEN_ID_MASK + 1);
return -EINVAL;
}
info->xfer_block = devm_kcalloc(dev, desc->max_msg,
sizeof(*info->xfer_block), GFP_KERNEL);
if (!info->xfer_block)
return -ENOMEM;
info->xfer_alloc_table = devm_kcalloc(dev, BITS_TO_LONGS(desc->max_msg),
sizeof(long), GFP_KERNEL);
if (!info->xfer_alloc_table)
return -ENOMEM;
bitmap_zero(info->xfer_alloc_table, desc->max_msg);
/* Pre-initialize the buffer pointer to pre-allocated buffers */
for (i = 0, xfer = info->xfer_block; i < desc->max_msg; i++, xfer++) {
xfer->rx.buf = devm_kcalloc(dev, sizeof(u8), desc->max_msg_size,
GFP_KERNEL);
if (!xfer->rx.buf)
return -ENOMEM;
xfer->tx.buf = xfer->rx.buf;
init_completion(&xfer->done);
}
spin_lock_init(&info->xfer_lock);
return 0;
}
static int scmi_mailbox_check(struct device_node *np)
{
struct of_phandle_args arg;
return of_parse_phandle_with_args(np, "mboxes", "#mbox-cells", 0, &arg);
}
static int scmi_mbox_free_channel(int id, void *p, void *data)
{
struct scmi_chan_info *cinfo = p;
struct idr *idr = data;
if (!IS_ERR_OR_NULL(cinfo->chan)) {
mbox_free_channel(cinfo->chan);
cinfo->chan = NULL;
}
idr_remove(idr, id);
return 0;
}
static int scmi_remove(struct platform_device *pdev)
{
int ret = 0;
struct scmi_info *info = platform_get_drvdata(pdev);
struct idr *idr = &info->tx_idr;
mutex_lock(&scmi_list_mutex);
if (info->users)
ret = -EBUSY;
else
list_del(&info->node);
mutex_unlock(&scmi_list_mutex);
if (!ret) {
/* Safe to free channels since no more users */
ret = idr_for_each(idr, scmi_mbox_free_channel, idr);
idr_destroy(&info->tx_idr);
}
return ret;
}
static inline int
scmi_mbox_chan_setup(struct scmi_info *info, struct device *dev, int prot_id)
{
int ret;
struct resource res;
resource_size_t size;
struct device_node *shmem, *np = dev->of_node;
struct scmi_chan_info *cinfo;
struct mbox_client *cl;
if (scmi_mailbox_check(np)) {
cinfo = idr_find(&info->tx_idr, SCMI_PROTOCOL_BASE);
goto idr_alloc;
}
cinfo = devm_kzalloc(info->dev, sizeof(*cinfo), GFP_KERNEL);
if (!cinfo)
return -ENOMEM;
cinfo->dev = dev;
cl = &cinfo->cl;
cl->dev = dev;
cl->rx_callback = scmi_rx_callback;
cl->tx_prepare = scmi_tx_prepare;
cl->tx_block = false;
cl->knows_txdone = true;
shmem = of_parse_phandle(np, "shmem", 0);
ret = of_address_to_resource(shmem, 0, &res);
of_node_put(shmem);
if (ret) {
dev_err(dev, "failed to get SCMI Tx payload mem resource\n");
return ret;
}
size = resource_size(&res);
cinfo->payload = devm_ioremap(info->dev, res.start, size);
if (!cinfo->payload) {
dev_err(dev, "failed to ioremap SCMI Tx payload\n");
return -EADDRNOTAVAIL;
}
/* Transmit channel is first entry i.e. index 0 */
cinfo->chan = mbox_request_channel(cl, 0);
if (IS_ERR(cinfo->chan)) {
ret = PTR_ERR(cinfo->chan);
if (ret != -EPROBE_DEFER)
dev_err(dev, "failed to request SCMI Tx mailbox\n");
return ret;
}
idr_alloc:
ret = idr_alloc(&info->tx_idr, cinfo, prot_id, prot_id + 1, GFP_KERNEL);
if (ret != prot_id) {
dev_err(dev, "unable to allocate SCMI idr slot err %d\n", ret);
return ret;
}
cinfo->handle = &info->handle;
return 0;
}
static inline void
scmi_create_protocol_device(struct device_node *np, struct scmi_info *info,
int prot_id)
{
struct scmi_device *sdev;
sdev = scmi_device_create(np, info->dev, prot_id);
if (!sdev) {
dev_err(info->dev, "failed to create %d protocol device\n",
prot_id);
return;
}
if (scmi_mbox_chan_setup(info, &sdev->dev, prot_id)) {
dev_err(&sdev->dev, "failed to setup transport\n");
scmi_device_destroy(sdev);
}
/* setup handle now as the transport is ready */
scmi_set_handle(sdev);
}
static int scmi_probe(struct platform_device *pdev)
{
int ret;
struct scmi_handle *handle;
const struct scmi_desc *desc;
struct scmi_info *info;
struct device *dev = &pdev->dev;
struct device_node *child, *np = dev->of_node;
/* Only mailbox method supported, check for the presence of one */
if (scmi_mailbox_check(np)) {
dev_err(dev, "no mailbox found in %pOF\n", np);
return -EINVAL;
}
desc = of_match_device(scmi_of_match, dev)->data;
info = devm_kzalloc(dev, sizeof(*info), GFP_KERNEL);
if (!info)
return -ENOMEM;
info->dev = dev;
info->desc = desc;
INIT_LIST_HEAD(&info->node);
ret = scmi_xfer_info_init(info);
if (ret)
return ret;
platform_set_drvdata(pdev, info);
idr_init(&info->tx_idr);
handle = &info->handle;
handle->dev = info->dev;
handle->version = &info->version;
ret = scmi_mbox_chan_setup(info, dev, SCMI_PROTOCOL_BASE);
if (ret)
return ret;
ret = scmi_base_protocol_init(handle);
if (ret) {
dev_err(dev, "unable to communicate with SCMI(%d)\n", ret);
return ret;
}
mutex_lock(&scmi_list_mutex);
list_add_tail(&info->node, &scmi_list);
mutex_unlock(&scmi_list_mutex);
for_each_available_child_of_node(np, child) {
u32 prot_id;
if (of_property_read_u32(child, "reg", &prot_id))
continue;
prot_id &= MSG_PROTOCOL_ID_MASK;
if (!scmi_is_protocol_implemented(handle, prot_id)) {
dev_err(dev, "SCMI protocol %d not implemented\n",
prot_id);
continue;
}
scmi_create_protocol_device(child, info, prot_id);
}
return 0;
}
static struct platform_driver scmi_driver = {
.driver = {
.name = "arm-scmi",
.of_match_table = scmi_of_match,
},
.probe = scmi_probe,
.remove = scmi_remove,
};
module_platform_driver(scmi_driver);
MODULE_ALIAS("platform: arm-scmi");
MODULE_AUTHOR("Sudeep Holla <sudeep.holla@arm.com>");
MODULE_DESCRIPTION("ARM SCMI protocol driver");
MODULE_LICENSE("GPL v2");

View File

@@ -0,0 +1,481 @@
// SPDX-License-Identifier: GPL-2.0
/*
* System Control and Management Interface (SCMI) Performance Protocol
*
* Copyright (C) 2018 ARM Ltd.
*/
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/pm_opp.h>
#include <linux/sort.h>
#include "common.h"
enum scmi_performance_protocol_cmd {
PERF_DOMAIN_ATTRIBUTES = 0x3,
PERF_DESCRIBE_LEVELS = 0x4,
PERF_LIMITS_SET = 0x5,
PERF_LIMITS_GET = 0x6,
PERF_LEVEL_SET = 0x7,
PERF_LEVEL_GET = 0x8,
PERF_NOTIFY_LIMITS = 0x9,
PERF_NOTIFY_LEVEL = 0xa,
};
struct scmi_opp {
u32 perf;
u32 power;
u32 trans_latency_us;
};
struct scmi_msg_resp_perf_attributes {
__le16 num_domains;
__le16 flags;
#define POWER_SCALE_IN_MILLIWATT(x) ((x) & BIT(0))
__le32 stats_addr_low;
__le32 stats_addr_high;
__le32 stats_size;
};
struct scmi_msg_resp_perf_domain_attributes {
__le32 flags;
#define SUPPORTS_SET_LIMITS(x) ((x) & BIT(31))
#define SUPPORTS_SET_PERF_LVL(x) ((x) & BIT(30))
#define SUPPORTS_PERF_LIMIT_NOTIFY(x) ((x) & BIT(29))
#define SUPPORTS_PERF_LEVEL_NOTIFY(x) ((x) & BIT(28))
__le32 rate_limit_us;
__le32 sustained_freq_khz;
__le32 sustained_perf_level;
u8 name[SCMI_MAX_STR_SIZE];
};
struct scmi_msg_perf_describe_levels {
__le32 domain;
__le32 level_index;
};
struct scmi_perf_set_limits {
__le32 domain;
__le32 max_level;
__le32 min_level;
};
struct scmi_perf_get_limits {
__le32 max_level;
__le32 min_level;
};
struct scmi_perf_set_level {
__le32 domain;
__le32 level;
};
struct scmi_perf_notify_level_or_limits {
__le32 domain;
__le32 notify_enable;
};
struct scmi_msg_resp_perf_describe_levels {
__le16 num_returned;
__le16 num_remaining;
struct {
__le32 perf_val;
__le32 power;
__le16 transition_latency_us;
__le16 reserved;
} opp[0];
};
struct perf_dom_info {
bool set_limits;
bool set_perf;
bool perf_limit_notify;
bool perf_level_notify;
u32 opp_count;
u32 sustained_freq_khz;
u32 sustained_perf_level;
u32 mult_factor;
char name[SCMI_MAX_STR_SIZE];
struct scmi_opp opp[MAX_OPPS];
};
struct scmi_perf_info {
int num_domains;
bool power_scale_mw;
u64 stats_addr;
u32 stats_size;
struct perf_dom_info *dom_info;
};
static int scmi_perf_attributes_get(const struct scmi_handle *handle,
struct scmi_perf_info *pi)
{
int ret;
struct scmi_xfer *t;
struct scmi_msg_resp_perf_attributes *attr;
ret = scmi_one_xfer_init(handle, PROTOCOL_ATTRIBUTES,
SCMI_PROTOCOL_PERF, 0, sizeof(*attr), &t);
if (ret)
return ret;
attr = t->rx.buf;
ret = scmi_do_xfer(handle, t);
if (!ret) {
u16 flags = le16_to_cpu(attr->flags);
pi->num_domains = le16_to_cpu(attr->num_domains);
pi->power_scale_mw = POWER_SCALE_IN_MILLIWATT(flags);
pi->stats_addr = le32_to_cpu(attr->stats_addr_low) |
(u64)le32_to_cpu(attr->stats_addr_high) << 32;
pi->stats_size = le32_to_cpu(attr->stats_size);
}
scmi_one_xfer_put(handle, t);
return ret;
}
static int
scmi_perf_domain_attributes_get(const struct scmi_handle *handle, u32 domain,
struct perf_dom_info *dom_info)
{
int ret;
struct scmi_xfer *t;
struct scmi_msg_resp_perf_domain_attributes *attr;
ret = scmi_one_xfer_init(handle, PERF_DOMAIN_ATTRIBUTES,
SCMI_PROTOCOL_PERF, sizeof(domain),
sizeof(*attr), &t);
if (ret)
return ret;
*(__le32 *)t->tx.buf = cpu_to_le32(domain);
attr = t->rx.buf;
ret = scmi_do_xfer(handle, t);
if (!ret) {
u32 flags = le32_to_cpu(attr->flags);
dom_info->set_limits = SUPPORTS_SET_LIMITS(flags);
dom_info->set_perf = SUPPORTS_SET_PERF_LVL(flags);
dom_info->perf_limit_notify = SUPPORTS_PERF_LIMIT_NOTIFY(flags);
dom_info->perf_level_notify = SUPPORTS_PERF_LEVEL_NOTIFY(flags);
dom_info->sustained_freq_khz =
le32_to_cpu(attr->sustained_freq_khz);
dom_info->sustained_perf_level =
le32_to_cpu(attr->sustained_perf_level);
dom_info->mult_factor = (dom_info->sustained_freq_khz * 1000) /
dom_info->sustained_perf_level;
memcpy(dom_info->name, attr->name, SCMI_MAX_STR_SIZE);
}
scmi_one_xfer_put(handle, t);
return ret;
}
static int opp_cmp_func(const void *opp1, const void *opp2)
{
const struct scmi_opp *t1 = opp1, *t2 = opp2;
return t1->perf - t2->perf;
}
static int
scmi_perf_describe_levels_get(const struct scmi_handle *handle, u32 domain,
struct perf_dom_info *perf_dom)
{
int ret, cnt;
u32 tot_opp_cnt = 0;
u16 num_returned, num_remaining;
struct scmi_xfer *t;
struct scmi_opp *opp;
struct scmi_msg_perf_describe_levels *dom_info;
struct scmi_msg_resp_perf_describe_levels *level_info;
ret = scmi_one_xfer_init(handle, PERF_DESCRIBE_LEVELS,
SCMI_PROTOCOL_PERF, sizeof(*dom_info), 0, &t);
if (ret)
return ret;
dom_info = t->tx.buf;
level_info = t->rx.buf;
do {
dom_info->domain = cpu_to_le32(domain);
/* Set the number of OPPs to be skipped/already read */
dom_info->level_index = cpu_to_le32(tot_opp_cnt);
ret = scmi_do_xfer(handle, t);
if (ret)
break;
num_returned = le16_to_cpu(level_info->num_returned);
num_remaining = le16_to_cpu(level_info->num_remaining);
if (tot_opp_cnt + num_returned > MAX_OPPS) {
dev_err(handle->dev, "No. of OPPs exceeded MAX_OPPS");
break;
}
opp = &perf_dom->opp[tot_opp_cnt];
for (cnt = 0; cnt < num_returned; cnt++, opp++) {
opp->perf = le32_to_cpu(level_info->opp[cnt].perf_val);
opp->power = le32_to_cpu(level_info->opp[cnt].power);
opp->trans_latency_us = le16_to_cpu
(level_info->opp[cnt].transition_latency_us);
dev_dbg(handle->dev, "Level %d Power %d Latency %dus\n",
opp->perf, opp->power, opp->trans_latency_us);
}
tot_opp_cnt += num_returned;
/*
* check for both returned and remaining to avoid infinite
* loop due to buggy firmware
*/
} while (num_returned && num_remaining);
perf_dom->opp_count = tot_opp_cnt;
scmi_one_xfer_put(handle, t);
sort(perf_dom->opp, tot_opp_cnt, sizeof(*opp), opp_cmp_func, NULL);
return ret;
}
static int scmi_perf_limits_set(const struct scmi_handle *handle, u32 domain,
u32 max_perf, u32 min_perf)
{
int ret;
struct scmi_xfer *t;
struct scmi_perf_set_limits *limits;
ret = scmi_one_xfer_init(handle, PERF_LIMITS_SET, SCMI_PROTOCOL_PERF,
sizeof(*limits), 0, &t);
if (ret)
return ret;
limits = t->tx.buf;
limits->domain = cpu_to_le32(domain);
limits->max_level = cpu_to_le32(max_perf);
limits->min_level = cpu_to_le32(min_perf);
ret = scmi_do_xfer(handle, t);
scmi_one_xfer_put(handle, t);
return ret;
}
static int scmi_perf_limits_get(const struct scmi_handle *handle, u32 domain,
u32 *max_perf, u32 *min_perf)
{
int ret;
struct scmi_xfer *t;
struct scmi_perf_get_limits *limits;
ret = scmi_one_xfer_init(handle, PERF_LIMITS_GET, SCMI_PROTOCOL_PERF,
sizeof(__le32), 0, &t);
if (ret)
return ret;
*(__le32 *)t->tx.buf = cpu_to_le32(domain);
ret = scmi_do_xfer(handle, t);
if (!ret) {
limits = t->rx.buf;
*max_perf = le32_to_cpu(limits->max_level);
*min_perf = le32_to_cpu(limits->min_level);
}
scmi_one_xfer_put(handle, t);
return ret;
}
static int scmi_perf_level_set(const struct scmi_handle *handle, u32 domain,
u32 level, bool poll)
{
int ret;
struct scmi_xfer *t;
struct scmi_perf_set_level *lvl;
ret = scmi_one_xfer_init(handle, PERF_LEVEL_SET, SCMI_PROTOCOL_PERF,
sizeof(*lvl), 0, &t);
if (ret)
return ret;
t->hdr.poll_completion = poll;
lvl = t->tx.buf;
lvl->domain = cpu_to_le32(domain);
lvl->level = cpu_to_le32(level);
ret = scmi_do_xfer(handle, t);
scmi_one_xfer_put(handle, t);
return ret;
}
static int scmi_perf_level_get(const struct scmi_handle *handle, u32 domain,
u32 *level, bool poll)
{
int ret;
struct scmi_xfer *t;
ret = scmi_one_xfer_init(handle, PERF_LEVEL_GET, SCMI_PROTOCOL_PERF,
sizeof(u32), sizeof(u32), &t);
if (ret)
return ret;
t->hdr.poll_completion = poll;
*(__le32 *)t->tx.buf = cpu_to_le32(domain);
ret = scmi_do_xfer(handle, t);
if (!ret)
*level = le32_to_cpu(*(__le32 *)t->rx.buf);
scmi_one_xfer_put(handle, t);
return ret;
}
/* Device specific ops */
static int scmi_dev_domain_id(struct device *dev)
{
struct of_phandle_args clkspec;
if (of_parse_phandle_with_args(dev->of_node, "clocks", "#clock-cells",
0, &clkspec))
return -EINVAL;
return clkspec.args[0];
}
static int scmi_dvfs_add_opps_to_device(const struct scmi_handle *handle,
struct device *dev)
{
int idx, ret, domain;
unsigned long freq;
struct scmi_opp *opp;
struct perf_dom_info *dom;
struct scmi_perf_info *pi = handle->perf_priv;
domain = scmi_dev_domain_id(dev);
if (domain < 0)
return domain;
dom = pi->dom_info + domain;
if (!dom)
return -EIO;
for (opp = dom->opp, idx = 0; idx < dom->opp_count; idx++, opp++) {
freq = opp->perf * dom->mult_factor;
ret = dev_pm_opp_add(dev, freq, 0);
if (ret) {
dev_warn(dev, "failed to add opp %luHz\n", freq);
while (idx-- > 0) {
freq = (--opp)->perf * dom->mult_factor;
dev_pm_opp_remove(dev, freq);
}
return ret;
}
}
return 0;
}
static int scmi_dvfs_get_transition_latency(const struct scmi_handle *handle,
struct device *dev)
{
struct perf_dom_info *dom;
struct scmi_perf_info *pi = handle->perf_priv;
int domain = scmi_dev_domain_id(dev);
if (domain < 0)
return domain;
dom = pi->dom_info + domain;
if (!dom)
return -EIO;
/* uS to nS */
return dom->opp[dom->opp_count - 1].trans_latency_us * 1000;
}
static int scmi_dvfs_freq_set(const struct scmi_handle *handle, u32 domain,
unsigned long freq, bool poll)
{
struct scmi_perf_info *pi = handle->perf_priv;
struct perf_dom_info *dom = pi->dom_info + domain;
return scmi_perf_level_set(handle, domain, freq / dom->mult_factor,
poll);
}
static int scmi_dvfs_freq_get(const struct scmi_handle *handle, u32 domain,
unsigned long *freq, bool poll)
{
int ret;
u32 level;
struct scmi_perf_info *pi = handle->perf_priv;
struct perf_dom_info *dom = pi->dom_info + domain;
ret = scmi_perf_level_get(handle, domain, &level, poll);
if (!ret)
*freq = level * dom->mult_factor;
return ret;
}
static struct scmi_perf_ops perf_ops = {
.limits_set = scmi_perf_limits_set,
.limits_get = scmi_perf_limits_get,
.level_set = scmi_perf_level_set,
.level_get = scmi_perf_level_get,
.device_domain_id = scmi_dev_domain_id,
.get_transition_latency = scmi_dvfs_get_transition_latency,
.add_opps_to_device = scmi_dvfs_add_opps_to_device,
.freq_set = scmi_dvfs_freq_set,
.freq_get = scmi_dvfs_freq_get,
};
static int scmi_perf_protocol_init(struct scmi_handle *handle)
{
int domain;
u32 version;
struct scmi_perf_info *pinfo;
scmi_version_get(handle, SCMI_PROTOCOL_PERF, &version);
dev_dbg(handle->dev, "Performance Version %d.%d\n",
PROTOCOL_REV_MAJOR(version), PROTOCOL_REV_MINOR(version));
pinfo = devm_kzalloc(handle->dev, sizeof(*pinfo), GFP_KERNEL);
if (!pinfo)
return -ENOMEM;
scmi_perf_attributes_get(handle, pinfo);
pinfo->dom_info = devm_kcalloc(handle->dev, pinfo->num_domains,
sizeof(*pinfo->dom_info), GFP_KERNEL);
if (!pinfo->dom_info)
return -ENOMEM;
for (domain = 0; domain < pinfo->num_domains; domain++) {
struct perf_dom_info *dom = pinfo->dom_info + domain;
scmi_perf_domain_attributes_get(handle, domain, dom);
scmi_perf_describe_levels_get(handle, domain, dom);
}
handle->perf_ops = &perf_ops;
handle->perf_priv = pinfo;
return 0;
}
static int __init scmi_perf_init(void)
{
return scmi_protocol_register(SCMI_PROTOCOL_PERF,
&scmi_perf_protocol_init);
}
subsys_initcall(scmi_perf_init);

View File

@@ -0,0 +1,221 @@
// SPDX-License-Identifier: GPL-2.0
/*
* System Control and Management Interface (SCMI) Power Protocol
*
* Copyright (C) 2018 ARM Ltd.
*/
#include "common.h"
enum scmi_power_protocol_cmd {
POWER_DOMAIN_ATTRIBUTES = 0x3,
POWER_STATE_SET = 0x4,
POWER_STATE_GET = 0x5,
POWER_STATE_NOTIFY = 0x6,
};
struct scmi_msg_resp_power_attributes {
__le16 num_domains;
__le16 reserved;
__le32 stats_addr_low;
__le32 stats_addr_high;
__le32 stats_size;
};
struct scmi_msg_resp_power_domain_attributes {
__le32 flags;
#define SUPPORTS_STATE_SET_NOTIFY(x) ((x) & BIT(31))
#define SUPPORTS_STATE_SET_ASYNC(x) ((x) & BIT(30))
#define SUPPORTS_STATE_SET_SYNC(x) ((x) & BIT(29))
u8 name[SCMI_MAX_STR_SIZE];
};
struct scmi_power_set_state {
__le32 flags;
#define STATE_SET_ASYNC BIT(0)
__le32 domain;
__le32 state;
};
struct scmi_power_state_notify {
__le32 domain;
__le32 notify_enable;
};
struct power_dom_info {
bool state_set_sync;
bool state_set_async;
bool state_set_notify;
char name[SCMI_MAX_STR_SIZE];
};
struct scmi_power_info {
int num_domains;
u64 stats_addr;
u32 stats_size;
struct power_dom_info *dom_info;
};
static int scmi_power_attributes_get(const struct scmi_handle *handle,
struct scmi_power_info *pi)
{
int ret;
struct scmi_xfer *t;
struct scmi_msg_resp_power_attributes *attr;
ret = scmi_one_xfer_init(handle, PROTOCOL_ATTRIBUTES,
SCMI_PROTOCOL_POWER, 0, sizeof(*attr), &t);
if (ret)
return ret;
attr = t->rx.buf;
ret = scmi_do_xfer(handle, t);
if (!ret) {
pi->num_domains = le16_to_cpu(attr->num_domains);
pi->stats_addr = le32_to_cpu(attr->stats_addr_low) |
(u64)le32_to_cpu(attr->stats_addr_high) << 32;
pi->stats_size = le32_to_cpu(attr->stats_size);
}
scmi_one_xfer_put(handle, t);
return ret;
}
static int
scmi_power_domain_attributes_get(const struct scmi_handle *handle, u32 domain,
struct power_dom_info *dom_info)
{
int ret;
struct scmi_xfer *t;
struct scmi_msg_resp_power_domain_attributes *attr;
ret = scmi_one_xfer_init(handle, POWER_DOMAIN_ATTRIBUTES,
SCMI_PROTOCOL_POWER, sizeof(domain),
sizeof(*attr), &t);
if (ret)
return ret;
*(__le32 *)t->tx.buf = cpu_to_le32(domain);
attr = t->rx.buf;
ret = scmi_do_xfer(handle, t);
if (!ret) {
u32 flags = le32_to_cpu(attr->flags);
dom_info->state_set_notify = SUPPORTS_STATE_SET_NOTIFY(flags);
dom_info->state_set_async = SUPPORTS_STATE_SET_ASYNC(flags);
dom_info->state_set_sync = SUPPORTS_STATE_SET_SYNC(flags);
memcpy(dom_info->name, attr->name, SCMI_MAX_STR_SIZE);
}
scmi_one_xfer_put(handle, t);
return ret;
}
static int
scmi_power_state_set(const struct scmi_handle *handle, u32 domain, u32 state)
{
int ret;
struct scmi_xfer *t;
struct scmi_power_set_state *st;
ret = scmi_one_xfer_init(handle, POWER_STATE_SET, SCMI_PROTOCOL_POWER,
sizeof(*st), 0, &t);
if (ret)
return ret;
st = t->tx.buf;
st->flags = cpu_to_le32(0);
st->domain = cpu_to_le32(domain);
st->state = cpu_to_le32(state);
ret = scmi_do_xfer(handle, t);
scmi_one_xfer_put(handle, t);
return ret;
}
static int
scmi_power_state_get(const struct scmi_handle *handle, u32 domain, u32 *state)
{
int ret;
struct scmi_xfer *t;
ret = scmi_one_xfer_init(handle, POWER_STATE_GET, SCMI_PROTOCOL_POWER,
sizeof(u32), sizeof(u32), &t);
if (ret)
return ret;
*(__le32 *)t->tx.buf = cpu_to_le32(domain);
ret = scmi_do_xfer(handle, t);
if (!ret)
*state = le32_to_cpu(*(__le32 *)t->rx.buf);
scmi_one_xfer_put(handle, t);
return ret;
}
static int scmi_power_num_domains_get(const struct scmi_handle *handle)
{
struct scmi_power_info *pi = handle->power_priv;
return pi->num_domains;
}
static char *scmi_power_name_get(const struct scmi_handle *handle, u32 domain)
{
struct scmi_power_info *pi = handle->power_priv;
struct power_dom_info *dom = pi->dom_info + domain;
return dom->name;
}
static struct scmi_power_ops power_ops = {
.num_domains_get = scmi_power_num_domains_get,
.name_get = scmi_power_name_get,
.state_set = scmi_power_state_set,
.state_get = scmi_power_state_get,
};
static int scmi_power_protocol_init(struct scmi_handle *handle)
{
int domain;
u32 version;
struct scmi_power_info *pinfo;
scmi_version_get(handle, SCMI_PROTOCOL_POWER, &version);
dev_dbg(handle->dev, "Power Version %d.%d\n",
PROTOCOL_REV_MAJOR(version), PROTOCOL_REV_MINOR(version));
pinfo = devm_kzalloc(handle->dev, sizeof(*pinfo), GFP_KERNEL);
if (!pinfo)
return -ENOMEM;
scmi_power_attributes_get(handle, pinfo);
pinfo->dom_info = devm_kcalloc(handle->dev, pinfo->num_domains,
sizeof(*pinfo->dom_info), GFP_KERNEL);
if (!pinfo->dom_info)
return -ENOMEM;
for (domain = 0; domain < pinfo->num_domains; domain++) {
struct power_dom_info *dom = pinfo->dom_info + domain;
scmi_power_domain_attributes_get(handle, domain, dom);
}
handle->power_ops = &power_ops;
handle->power_priv = pinfo;
return 0;
}
static int __init scmi_power_init(void)
{
return scmi_protocol_register(SCMI_PROTOCOL_POWER,
&scmi_power_protocol_init);
}
subsys_initcall(scmi_power_init);

View File

@@ -0,0 +1,129 @@
// SPDX-License-Identifier: GPL-2.0
/*
* SCMI Generic power domain support.
*
* Copyright (C) 2018 ARM Ltd.
*/
#include <linux/err.h>
#include <linux/io.h>
#include <linux/module.h>
#include <linux/pm_domain.h>
#include <linux/scmi_protocol.h>
struct scmi_pm_domain {
struct generic_pm_domain genpd;
const struct scmi_handle *handle;
const char *name;
u32 domain;
};
#define to_scmi_pd(gpd) container_of(gpd, struct scmi_pm_domain, genpd)
static int scmi_pd_power(struct generic_pm_domain *domain, bool power_on)
{
int ret;
u32 state, ret_state;
struct scmi_pm_domain *pd = to_scmi_pd(domain);
const struct scmi_power_ops *ops = pd->handle->power_ops;
if (power_on)
state = SCMI_POWER_STATE_GENERIC_ON;
else
state = SCMI_POWER_STATE_GENERIC_OFF;
ret = ops->state_set(pd->handle, pd->domain, state);
if (!ret)
ret = ops->state_get(pd->handle, pd->domain, &ret_state);
if (!ret && state != ret_state)
return -EIO;
return ret;
}
static int scmi_pd_power_on(struct generic_pm_domain *domain)
{
return scmi_pd_power(domain, true);
}
static int scmi_pd_power_off(struct generic_pm_domain *domain)
{
return scmi_pd_power(domain, false);
}
static int scmi_pm_domain_probe(struct scmi_device *sdev)
{
int num_domains, i;
struct device *dev = &sdev->dev;
struct device_node *np = dev->of_node;
struct scmi_pm_domain *scmi_pd;
struct genpd_onecell_data *scmi_pd_data;
struct generic_pm_domain **domains;
const struct scmi_handle *handle = sdev->handle;
if (!handle || !handle->power_ops)
return -ENODEV;
num_domains = handle->power_ops->num_domains_get(handle);
if (num_domains < 0) {
dev_err(dev, "number of domains not found\n");
return num_domains;
}
scmi_pd = devm_kcalloc(dev, num_domains, sizeof(*scmi_pd), GFP_KERNEL);
if (!scmi_pd)
return -ENOMEM;
scmi_pd_data = devm_kzalloc(dev, sizeof(*scmi_pd_data), GFP_KERNEL);
if (!scmi_pd_data)
return -ENOMEM;
domains = devm_kcalloc(dev, num_domains, sizeof(*domains), GFP_KERNEL);
if (!domains)
return -ENOMEM;
for (i = 0; i < num_domains; i++, scmi_pd++) {
u32 state;
domains[i] = &scmi_pd->genpd;
scmi_pd->domain = i;
scmi_pd->handle = handle;
scmi_pd->name = handle->power_ops->name_get(handle, i);
scmi_pd->genpd.name = scmi_pd->name;
scmi_pd->genpd.power_off = scmi_pd_power_off;
scmi_pd->genpd.power_on = scmi_pd_power_on;
if (handle->power_ops->state_get(handle, i, &state)) {
dev_warn(dev, "failed to get state for domain %d\n", i);
continue;
}
pm_genpd_init(&scmi_pd->genpd, NULL,
state == SCMI_POWER_STATE_GENERIC_OFF);
}
scmi_pd_data->domains = domains;
scmi_pd_data->num_domains = num_domains;
of_genpd_add_provider_onecell(np, scmi_pd_data);
return 0;
}
static const struct scmi_device_id scmi_id_table[] = {
{ SCMI_PROTOCOL_POWER },
{ },
};
MODULE_DEVICE_TABLE(scmi, scmi_id_table);
static struct scmi_driver scmi_power_domain_driver = {
.name = "scmi-power-domain",
.probe = scmi_pm_domain_probe,
.id_table = scmi_id_table,
};
module_scmi_driver(scmi_power_domain_driver);
MODULE_AUTHOR("Sudeep Holla <sudeep.holla@arm.com>");
MODULE_DESCRIPTION("ARM SCMI power domain driver");
MODULE_LICENSE("GPL v2");

View File

@@ -0,0 +1,291 @@
// SPDX-License-Identifier: GPL-2.0
/*
* System Control and Management Interface (SCMI) Sensor Protocol
*
* Copyright (C) 2018 ARM Ltd.
*/
#include "common.h"
enum scmi_sensor_protocol_cmd {
SENSOR_DESCRIPTION_GET = 0x3,
SENSOR_CONFIG_SET = 0x4,
SENSOR_TRIP_POINT_SET = 0x5,
SENSOR_READING_GET = 0x6,
};
struct scmi_msg_resp_sensor_attributes {
__le16 num_sensors;
u8 max_requests;
u8 reserved;
__le32 reg_addr_low;
__le32 reg_addr_high;
__le32 reg_size;
};
struct scmi_msg_resp_sensor_description {
__le16 num_returned;
__le16 num_remaining;
struct {
__le32 id;
__le32 attributes_low;
#define SUPPORTS_ASYNC_READ(x) ((x) & BIT(31))
#define NUM_TRIP_POINTS(x) (((x) >> 4) & 0xff)
__le32 attributes_high;
#define SENSOR_TYPE(x) ((x) & 0xff)
#define SENSOR_SCALE(x) (((x) >> 11) & 0x3f)
#define SENSOR_UPDATE_SCALE(x) (((x) >> 22) & 0x1f)
#define SENSOR_UPDATE_BASE(x) (((x) >> 27) & 0x1f)
u8 name[SCMI_MAX_STR_SIZE];
} desc[0];
};
struct scmi_msg_set_sensor_config {
__le32 id;
__le32 event_control;
};
struct scmi_msg_set_sensor_trip_point {
__le32 id;
__le32 event_control;
#define SENSOR_TP_EVENT_MASK (0x3)
#define SENSOR_TP_DISABLED 0x0
#define SENSOR_TP_POSITIVE 0x1
#define SENSOR_TP_NEGATIVE 0x2
#define SENSOR_TP_BOTH 0x3
#define SENSOR_TP_ID(x) (((x) & 0xff) << 4)
__le32 value_low;
__le32 value_high;
};
struct scmi_msg_sensor_reading_get {
__le32 id;
__le32 flags;
#define SENSOR_READ_ASYNC BIT(0)
};
struct sensors_info {
int num_sensors;
int max_requests;
u64 reg_addr;
u32 reg_size;
struct scmi_sensor_info *sensors;
};
static int scmi_sensor_attributes_get(const struct scmi_handle *handle,
struct sensors_info *si)
{
int ret;
struct scmi_xfer *t;
struct scmi_msg_resp_sensor_attributes *attr;
ret = scmi_one_xfer_init(handle, PROTOCOL_ATTRIBUTES,
SCMI_PROTOCOL_SENSOR, 0, sizeof(*attr), &t);
if (ret)
return ret;
attr = t->rx.buf;
ret = scmi_do_xfer(handle, t);
if (!ret) {
si->num_sensors = le16_to_cpu(attr->num_sensors);
si->max_requests = attr->max_requests;
si->reg_addr = le32_to_cpu(attr->reg_addr_low) |
(u64)le32_to_cpu(attr->reg_addr_high) << 32;
si->reg_size = le32_to_cpu(attr->reg_size);
}
scmi_one_xfer_put(handle, t);
return ret;
}
static int scmi_sensor_description_get(const struct scmi_handle *handle,
struct sensors_info *si)
{
int ret, cnt;
u32 desc_index = 0;
u16 num_returned, num_remaining;
struct scmi_xfer *t;
struct scmi_msg_resp_sensor_description *buf;
ret = scmi_one_xfer_init(handle, SENSOR_DESCRIPTION_GET,
SCMI_PROTOCOL_SENSOR, sizeof(__le32), 0, &t);
if (ret)
return ret;
buf = t->rx.buf;
do {
/* Set the number of sensors to be skipped/already read */
*(__le32 *)t->tx.buf = cpu_to_le32(desc_index);
ret = scmi_do_xfer(handle, t);
if (ret)
break;
num_returned = le16_to_cpu(buf->num_returned);
num_remaining = le16_to_cpu(buf->num_remaining);
if (desc_index + num_returned > si->num_sensors) {
dev_err(handle->dev, "No. of sensors can't exceed %d",
si->num_sensors);
break;
}
for (cnt = 0; cnt < num_returned; cnt++) {
u32 attrh;
struct scmi_sensor_info *s;
attrh = le32_to_cpu(buf->desc[cnt].attributes_high);
s = &si->sensors[desc_index + cnt];
s->id = le32_to_cpu(buf->desc[cnt].id);
s->type = SENSOR_TYPE(attrh);
memcpy(s->name, buf->desc[cnt].name, SCMI_MAX_STR_SIZE);
}
desc_index += num_returned;
/*
* check for both returned and remaining to avoid infinite
* loop due to buggy firmware
*/
} while (num_returned && num_remaining);
scmi_one_xfer_put(handle, t);
return ret;
}
static int
scmi_sensor_configuration_set(const struct scmi_handle *handle, u32 sensor_id)
{
int ret;
u32 evt_cntl = BIT(0);
struct scmi_xfer *t;
struct scmi_msg_set_sensor_config *cfg;
ret = scmi_one_xfer_init(handle, SENSOR_CONFIG_SET,
SCMI_PROTOCOL_SENSOR, sizeof(*cfg), 0, &t);
if (ret)
return ret;
cfg = t->tx.buf;
cfg->id = cpu_to_le32(sensor_id);
cfg->event_control = cpu_to_le32(evt_cntl);
ret = scmi_do_xfer(handle, t);
scmi_one_xfer_put(handle, t);
return ret;
}
static int scmi_sensor_trip_point_set(const struct scmi_handle *handle,
u32 sensor_id, u8 trip_id, u64 trip_value)
{
int ret;
u32 evt_cntl = SENSOR_TP_BOTH;
struct scmi_xfer *t;
struct scmi_msg_set_sensor_trip_point *trip;
ret = scmi_one_xfer_init(handle, SENSOR_TRIP_POINT_SET,
SCMI_PROTOCOL_SENSOR, sizeof(*trip), 0, &t);
if (ret)
return ret;
trip = t->tx.buf;
trip->id = cpu_to_le32(sensor_id);
trip->event_control = cpu_to_le32(evt_cntl | SENSOR_TP_ID(trip_id));
trip->value_low = cpu_to_le32(trip_value & 0xffffffff);
trip->value_high = cpu_to_le32(trip_value >> 32);
ret = scmi_do_xfer(handle, t);
scmi_one_xfer_put(handle, t);
return ret;
}
static int scmi_sensor_reading_get(const struct scmi_handle *handle,
u32 sensor_id, bool async, u64 *value)
{
int ret;
struct scmi_xfer *t;
struct scmi_msg_sensor_reading_get *sensor;
ret = scmi_one_xfer_init(handle, SENSOR_READING_GET,
SCMI_PROTOCOL_SENSOR, sizeof(*sensor),
sizeof(u64), &t);
if (ret)
return ret;
sensor = t->tx.buf;
sensor->id = cpu_to_le32(sensor_id);
sensor->flags = cpu_to_le32(async ? SENSOR_READ_ASYNC : 0);
ret = scmi_do_xfer(handle, t);
if (!ret) {
__le32 *pval = t->rx.buf;
*value = le32_to_cpu(*pval);
*value |= (u64)le32_to_cpu(*(pval + 1)) << 32;
}
scmi_one_xfer_put(handle, t);
return ret;
}
static const struct scmi_sensor_info *
scmi_sensor_info_get(const struct scmi_handle *handle, u32 sensor_id)
{
struct sensors_info *si = handle->sensor_priv;
return si->sensors + sensor_id;
}
static int scmi_sensor_count_get(const struct scmi_handle *handle)
{
struct sensors_info *si = handle->sensor_priv;
return si->num_sensors;
}
static struct scmi_sensor_ops sensor_ops = {
.count_get = scmi_sensor_count_get,
.info_get = scmi_sensor_info_get,
.configuration_set = scmi_sensor_configuration_set,
.trip_point_set = scmi_sensor_trip_point_set,
.reading_get = scmi_sensor_reading_get,
};
static int scmi_sensors_protocol_init(struct scmi_handle *handle)
{
u32 version;
struct sensors_info *sinfo;
scmi_version_get(handle, SCMI_PROTOCOL_SENSOR, &version);
dev_dbg(handle->dev, "Sensor Version %d.%d\n",
PROTOCOL_REV_MAJOR(version), PROTOCOL_REV_MINOR(version));
sinfo = devm_kzalloc(handle->dev, sizeof(*sinfo), GFP_KERNEL);
if (!sinfo)
return -ENOMEM;
scmi_sensor_attributes_get(handle, sinfo);
sinfo->sensors = devm_kcalloc(handle->dev, sinfo->num_sensors,
sizeof(*sinfo->sensors), GFP_KERNEL);
if (!sinfo->sensors)
return -ENOMEM;
scmi_sensor_description_get(handle, sinfo);
handle->sensor_ops = &sensor_ops;
handle->sensor_priv = sinfo;
return 0;
}
static int __init scmi_sensors_init(void)
{
return scmi_protocol_register(SCMI_PROTOCOL_SENSOR,
&scmi_sensors_protocol_init);
}
subsys_initcall(scmi_sensors_init);

View File

@@ -28,6 +28,7 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/bitmap.h>
#include <linux/bitfield.h>
#include <linux/device.h>
#include <linux/err.h>
#include <linux/export.h>
@@ -45,48 +46,32 @@
#include <linux/sort.h>
#include <linux/spinlock.h>
#define CMD_ID_SHIFT 0
#define CMD_ID_MASK 0x7f
#define CMD_TOKEN_ID_SHIFT 8
#define CMD_TOKEN_ID_MASK 0xff
#define CMD_DATA_SIZE_SHIFT 16
#define CMD_DATA_SIZE_MASK 0x1ff
#define CMD_LEGACY_DATA_SIZE_SHIFT 20
#define CMD_LEGACY_DATA_SIZE_MASK 0x1ff
#define PACK_SCPI_CMD(cmd_id, tx_sz) \
((((cmd_id) & CMD_ID_MASK) << CMD_ID_SHIFT) | \
(((tx_sz) & CMD_DATA_SIZE_MASK) << CMD_DATA_SIZE_SHIFT))
#define ADD_SCPI_TOKEN(cmd, token) \
((cmd) |= (((token) & CMD_TOKEN_ID_MASK) << CMD_TOKEN_ID_SHIFT))
#define PACK_LEGACY_SCPI_CMD(cmd_id, tx_sz) \
((((cmd_id) & CMD_ID_MASK) << CMD_ID_SHIFT) | \
(((tx_sz) & CMD_LEGACY_DATA_SIZE_MASK) << CMD_LEGACY_DATA_SIZE_SHIFT))
#define CMD_ID_MASK GENMASK(6, 0)
#define CMD_TOKEN_ID_MASK GENMASK(15, 8)
#define CMD_DATA_SIZE_MASK GENMASK(24, 16)
#define CMD_LEGACY_DATA_SIZE_MASK GENMASK(28, 20)
#define PACK_SCPI_CMD(cmd_id, tx_sz) \
(FIELD_PREP(CMD_ID_MASK, cmd_id) | \
FIELD_PREP(CMD_DATA_SIZE_MASK, tx_sz))
#define PACK_LEGACY_SCPI_CMD(cmd_id, tx_sz) \
(FIELD_PREP(CMD_ID_MASK, cmd_id) | \
FIELD_PREP(CMD_LEGACY_DATA_SIZE_MASK, tx_sz))
#define CMD_SIZE(cmd) (((cmd) >> CMD_DATA_SIZE_SHIFT) & CMD_DATA_SIZE_MASK)
#define CMD_LEGACY_SIZE(cmd) (((cmd) >> CMD_LEGACY_DATA_SIZE_SHIFT) & \
CMD_LEGACY_DATA_SIZE_MASK)
#define CMD_UNIQ_MASK (CMD_TOKEN_ID_MASK << CMD_TOKEN_ID_SHIFT | CMD_ID_MASK)
#define CMD_SIZE(cmd) FIELD_GET(CMD_DATA_SIZE_MASK, cmd)
#define CMD_UNIQ_MASK (CMD_TOKEN_ID_MASK | CMD_ID_MASK)
#define CMD_XTRACT_UNIQ(cmd) ((cmd) & CMD_UNIQ_MASK)
#define SCPI_SLOT 0
#define MAX_DVFS_DOMAINS 8
#define MAX_DVFS_OPPS 16
#define DVFS_LATENCY(hdr) (le32_to_cpu(hdr) >> 16)
#define DVFS_OPP_COUNT(hdr) ((le32_to_cpu(hdr) >> 8) & 0xff)
#define PROTOCOL_REV_MINOR_BITS 16
#define PROTOCOL_REV_MINOR_MASK ((1U << PROTOCOL_REV_MINOR_BITS) - 1)
#define PROTOCOL_REV_MAJOR(x) ((x) >> PROTOCOL_REV_MINOR_BITS)
#define PROTOCOL_REV_MINOR(x) ((x) & PROTOCOL_REV_MINOR_MASK)
#define PROTO_REV_MAJOR_MASK GENMASK(31, 16)
#define PROTO_REV_MINOR_MASK GENMASK(15, 0)
#define FW_REV_MAJOR_BITS 24
#define FW_REV_MINOR_BITS 16
#define FW_REV_PATCH_MASK ((1U << FW_REV_MINOR_BITS) - 1)
#define FW_REV_MINOR_MASK ((1U << FW_REV_MAJOR_BITS) - 1)
#define FW_REV_MAJOR(x) ((x) >> FW_REV_MAJOR_BITS)
#define FW_REV_MINOR(x) (((x) & FW_REV_MINOR_MASK) >> FW_REV_MINOR_BITS)
#define FW_REV_PATCH(x) ((x) & FW_REV_PATCH_MASK)
#define FW_REV_MAJOR_MASK GENMASK(31, 24)
#define FW_REV_MINOR_MASK GENMASK(23, 16)
#define FW_REV_PATCH_MASK GENMASK(15, 0)
#define MAX_RX_TIMEOUT (msecs_to_jiffies(30))
@@ -311,10 +296,6 @@ struct clk_get_info {
u8 name[20];
} __packed;
struct clk_get_value {
__le32 rate;
} __packed;
struct clk_set_value {
__le16 id;
__le16 reserved;
@@ -328,7 +309,9 @@ struct legacy_clk_set_value {
} __packed;
struct dvfs_info {
__le32 header;
u8 domain;
u8 opp_count;
__le16 latency;
struct {
__le32 freq;
__le32 m_volt;
@@ -340,10 +323,6 @@ struct dvfs_set {
u8 index;
} __packed;
struct sensor_capabilities {
__le16 sensors;
} __packed;
struct _scpi_sensor_info {
__le16 sensor_id;
u8 class;
@@ -351,11 +330,6 @@ struct _scpi_sensor_info {
char name[20];
};
struct sensor_value {
__le32 lo_val;
__le32 hi_val;
} __packed;
struct dev_pstate_set {
__le16 dev_id;
u8 pstate;
@@ -419,19 +393,20 @@ static void scpi_process_cmd(struct scpi_chan *ch, u32 cmd)
unsigned int len;
if (scpi_info->is_legacy) {
struct legacy_scpi_shared_mem *mem = ch->rx_payload;
struct legacy_scpi_shared_mem __iomem *mem =
ch->rx_payload;
/* RX Length is not replied by the legacy Firmware */
len = match->rx_len;
match->status = le32_to_cpu(mem->status);
match->status = ioread32(&mem->status);
memcpy_fromio(match->rx_buf, mem->payload, len);
} else {
struct scpi_shared_mem *mem = ch->rx_payload;
struct scpi_shared_mem __iomem *mem = ch->rx_payload;
len = min(match->rx_len, CMD_SIZE(cmd));
len = min_t(unsigned int, match->rx_len, CMD_SIZE(cmd));
match->status = le32_to_cpu(mem->status);
match->status = ioread32(&mem->status);
memcpy_fromio(match->rx_buf, mem->payload, len);
}
@@ -445,11 +420,11 @@ static void scpi_process_cmd(struct scpi_chan *ch, u32 cmd)
static void scpi_handle_remote_msg(struct mbox_client *c, void *msg)
{
struct scpi_chan *ch = container_of(c, struct scpi_chan, cl);
struct scpi_shared_mem *mem = ch->rx_payload;
struct scpi_shared_mem __iomem *mem = ch->rx_payload;
u32 cmd = 0;
if (!scpi_info->is_legacy)
cmd = le32_to_cpu(mem->command);
cmd = ioread32(&mem->command);
scpi_process_cmd(ch, cmd);
}
@@ -459,7 +434,7 @@ static void scpi_tx_prepare(struct mbox_client *c, void *msg)
unsigned long flags;
struct scpi_xfer *t = msg;
struct scpi_chan *ch = container_of(c, struct scpi_chan, cl);
struct scpi_shared_mem *mem = (struct scpi_shared_mem *)ch->tx_payload;
struct scpi_shared_mem __iomem *mem = ch->tx_payload;
if (t->tx_buf) {
if (scpi_info->is_legacy)
@@ -471,14 +446,14 @@ static void scpi_tx_prepare(struct mbox_client *c, void *msg)
if (t->rx_buf) {
if (!(++ch->token))
++ch->token;
ADD_SCPI_TOKEN(t->cmd, ch->token);
t->cmd |= FIELD_PREP(CMD_TOKEN_ID_MASK, ch->token);
spin_lock_irqsave(&ch->rx_lock, flags);
list_add_tail(&t->node, &ch->rx_pending);
spin_unlock_irqrestore(&ch->rx_lock, flags);
}
if (!scpi_info->is_legacy)
mem->command = cpu_to_le32(t->cmd);
iowrite32(t->cmd, &mem->command);
}
static struct scpi_xfer *get_scpi_xfer(struct scpi_chan *ch)
@@ -583,13 +558,13 @@ scpi_clk_get_range(u16 clk_id, unsigned long *min, unsigned long *max)
static unsigned long scpi_clk_get_val(u16 clk_id)
{
int ret;
struct clk_get_value clk;
__le32 rate;
__le16 le_clk_id = cpu_to_le16(clk_id);
ret = scpi_send_message(CMD_GET_CLOCK_VALUE, &le_clk_id,
sizeof(le_clk_id), &clk, sizeof(clk));
sizeof(le_clk_id), &rate, sizeof(rate));
return ret ? ret : le32_to_cpu(clk.rate);
return ret ? ret : le32_to_cpu(rate);
}
static int scpi_clk_set_val(u16 clk_id, unsigned long rate)
@@ -665,8 +640,8 @@ static struct scpi_dvfs_info *scpi_dvfs_get_info(u8 domain)
if (!info)
return ERR_PTR(-ENOMEM);
info->count = DVFS_OPP_COUNT(buf.header);
info->latency = DVFS_LATENCY(buf.header) * 1000; /* uS to nS */
info->count = buf.opp_count;
info->latency = le16_to_cpu(buf.latency) * 1000; /* uS to nS */
info->opps = kcalloc(info->count, sizeof(*opp), GFP_KERNEL);
if (!info->opps) {
@@ -713,9 +688,6 @@ static int scpi_dvfs_get_transition_latency(struct device *dev)
if (IS_ERR(info))
return PTR_ERR(info);
if (!info->latency)
return 0;
return info->latency;
}
@@ -746,13 +718,13 @@ static int scpi_dvfs_add_opps_to_device(struct device *dev)
static int scpi_sensor_get_capability(u16 *sensors)
{
struct sensor_capabilities cap_buf;
__le16 cap;
int ret;
ret = scpi_send_message(CMD_SENSOR_CAPABILITIES, NULL, 0, &cap_buf,
sizeof(cap_buf));
ret = scpi_send_message(CMD_SENSOR_CAPABILITIES, NULL, 0, &cap,
sizeof(cap));
if (!ret)
*sensors = le16_to_cpu(cap_buf.sensors);
*sensors = le16_to_cpu(cap);
return ret;
}
@@ -776,20 +748,19 @@ static int scpi_sensor_get_info(u16 sensor_id, struct scpi_sensor_info *info)
static int scpi_sensor_get_value(u16 sensor, u64 *val)
{
__le16 id = cpu_to_le16(sensor);
struct sensor_value buf;
__le64 value;
int ret;
ret = scpi_send_message(CMD_SENSOR_VALUE, &id, sizeof(id),
&buf, sizeof(buf));
&value, sizeof(value));
if (ret)
return ret;
if (scpi_info->is_legacy)
/* only 32-bits supported, hi_val can be junk */
*val = le32_to_cpu(buf.lo_val);
/* only 32-bits supported, upper 32 bits can be junk */
*val = le32_to_cpup((__le32 *)&value);
else
*val = (u64)le32_to_cpu(buf.hi_val) << 32 |
le32_to_cpu(buf.lo_val);
*val = le64_to_cpu(value);
return 0;
}
@@ -864,9 +835,9 @@ static ssize_t protocol_version_show(struct device *dev,
{
struct scpi_drvinfo *scpi_info = dev_get_drvdata(dev);
return sprintf(buf, "%d.%d\n",
PROTOCOL_REV_MAJOR(scpi_info->protocol_version),
PROTOCOL_REV_MINOR(scpi_info->protocol_version));
return sprintf(buf, "%lu.%lu\n",
FIELD_GET(PROTO_REV_MAJOR_MASK, scpi_info->protocol_version),
FIELD_GET(PROTO_REV_MINOR_MASK, scpi_info->protocol_version));
}
static DEVICE_ATTR_RO(protocol_version);
@@ -875,10 +846,10 @@ static ssize_t firmware_version_show(struct device *dev,
{
struct scpi_drvinfo *scpi_info = dev_get_drvdata(dev);
return sprintf(buf, "%d.%d.%d\n",
FW_REV_MAJOR(scpi_info->firmware_version),
FW_REV_MINOR(scpi_info->firmware_version),
FW_REV_PATCH(scpi_info->firmware_version));
return sprintf(buf, "%lu.%lu.%lu\n",
FIELD_GET(FW_REV_MAJOR_MASK, scpi_info->firmware_version),
FIELD_GET(FW_REV_MINOR_MASK, scpi_info->firmware_version),
FIELD_GET(FW_REV_PATCH_MASK, scpi_info->firmware_version));
}
static DEVICE_ATTR_RO(firmware_version);
@@ -889,37 +860,26 @@ static struct attribute *versions_attrs[] = {
};
ATTRIBUTE_GROUPS(versions);
static void
scpi_free_channels(struct device *dev, struct scpi_chan *pchan, int count)
static void scpi_free_channels(void *data)
{
struct scpi_drvinfo *info = data;
int i;
for (i = 0; i < count && pchan->chan; i++, pchan++) {
mbox_free_channel(pchan->chan);
devm_kfree(dev, pchan->xfers);
devm_iounmap(dev, pchan->rx_payload);
}
for (i = 0; i < info->num_chans; i++)
mbox_free_channel(info->channels[i].chan);
}
static int scpi_remove(struct platform_device *pdev)
{
int i;
struct device *dev = &pdev->dev;
struct scpi_drvinfo *info = platform_get_drvdata(pdev);
scpi_info = NULL; /* stop exporting SCPI ops through get_scpi_ops */
of_platform_depopulate(dev);
sysfs_remove_groups(&dev->kobj, versions_groups);
scpi_free_channels(dev, info->channels, info->num_chans);
platform_set_drvdata(pdev, NULL);
for (i = 0; i < MAX_DVFS_DOMAINS && info->dvfs[i]; i++) {
kfree(info->dvfs[i]->opps);
kfree(info->dvfs[i]);
}
devm_kfree(dev, info->channels);
devm_kfree(dev, info);
return 0;
}
@@ -952,7 +912,6 @@ static int scpi_probe(struct platform_device *pdev)
{
int count, idx, ret;
struct resource res;
struct scpi_chan *scpi_chan;
struct device *dev = &pdev->dev;
struct device_node *np = dev->of_node;
@@ -969,13 +928,19 @@ static int scpi_probe(struct platform_device *pdev)
return -ENODEV;
}
scpi_chan = devm_kcalloc(dev, count, sizeof(*scpi_chan), GFP_KERNEL);
if (!scpi_chan)
scpi_info->channels = devm_kcalloc(dev, count, sizeof(struct scpi_chan),
GFP_KERNEL);
if (!scpi_info->channels)
return -ENOMEM;
for (idx = 0; idx < count; idx++) {
ret = devm_add_action(dev, scpi_free_channels, scpi_info);
if (ret)
return ret;
for (; scpi_info->num_chans < count; scpi_info->num_chans++) {
resource_size_t size;
struct scpi_chan *pchan = scpi_chan + idx;
int idx = scpi_info->num_chans;
struct scpi_chan *pchan = scpi_info->channels + idx;
struct mbox_client *cl = &pchan->cl;
struct device_node *shmem = of_parse_phandle(np, "shmem", idx);
@@ -983,15 +948,14 @@ static int scpi_probe(struct platform_device *pdev)
of_node_put(shmem);
if (ret) {
dev_err(dev, "failed to get SCPI payload mem resource\n");
goto err;
return ret;
}
size = resource_size(&res);
pchan->rx_payload = devm_ioremap(dev, res.start, size);
if (!pchan->rx_payload) {
dev_err(dev, "failed to ioremap SCPI payload\n");
ret = -EADDRNOTAVAIL;
goto err;
return -EADDRNOTAVAIL;
}
pchan->tx_payload = pchan->rx_payload + (size >> 1);
@@ -1017,14 +981,9 @@ static int scpi_probe(struct platform_device *pdev)
dev_err(dev, "failed to get channel%d err %d\n",
idx, ret);
}
err:
scpi_free_channels(dev, scpi_chan, idx);
scpi_info = NULL;
return ret;
}
scpi_info->channels = scpi_chan;
scpi_info->num_chans = count;
scpi_info->commands = scpi_std_commands;
platform_set_drvdata(pdev, scpi_info);
@@ -1043,23 +1002,31 @@ err:
ret = scpi_init_versions(scpi_info);
if (ret) {
dev_err(dev, "incorrect or no SCP firmware found\n");
scpi_remove(pdev);
return ret;
}
_dev_info(dev, "SCP Protocol %d.%d Firmware %d.%d.%d version\n",
PROTOCOL_REV_MAJOR(scpi_info->protocol_version),
PROTOCOL_REV_MINOR(scpi_info->protocol_version),
FW_REV_MAJOR(scpi_info->firmware_version),
FW_REV_MINOR(scpi_info->firmware_version),
FW_REV_PATCH(scpi_info->firmware_version));
if (scpi_info->is_legacy && !scpi_info->protocol_version &&
!scpi_info->firmware_version)
dev_info(dev, "SCP Protocol legacy pre-1.0 firmware\n");
else
dev_info(dev, "SCP Protocol %lu.%lu Firmware %lu.%lu.%lu version\n",
FIELD_GET(PROTO_REV_MAJOR_MASK,
scpi_info->protocol_version),
FIELD_GET(PROTO_REV_MINOR_MASK,
scpi_info->protocol_version),
FIELD_GET(FW_REV_MAJOR_MASK,
scpi_info->firmware_version),
FIELD_GET(FW_REV_MINOR_MASK,
scpi_info->firmware_version),
FIELD_GET(FW_REV_PATCH_MASK,
scpi_info->firmware_version));
scpi_info->scpi_ops = &scpi_ops;
ret = sysfs_create_groups(&dev->kobj, versions_groups);
ret = devm_device_add_groups(dev, versions_groups);
if (ret)
dev_err(dev, "unable to create sysfs version group\n");
return of_platform_populate(dev->of_node, NULL, NULL, dev);
return devm_of_platform_populate(dev);
}
static const struct of_device_id scpi_of_match[] = {

View File

@@ -17,8 +17,10 @@
#include <linux/arm-smccc.h>
#include <linux/bug.h>
#include <linux/io.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/printk.h>
#include <linux/types.h>
#include <linux/sizes.h>
@@ -217,21 +219,11 @@ static const struct of_device_id meson_sm_ids[] = {
{ /* sentinel */ },
};
int __init meson_sm_init(void)
static int __init meson_sm_probe(struct platform_device *pdev)
{
const struct meson_sm_chip *chip;
const struct of_device_id *matched_np;
struct device_node *np;
np = of_find_matching_node_and_match(NULL, meson_sm_ids, &matched_np);
if (!np)
return -ENODEV;
chip = matched_np->data;
if (!chip) {
pr_err("unable to setup secure-monitor data\n");
goto out;
}
chip = of_match_device(meson_sm_ids, &pdev->dev)->data;
if (chip->cmd_shmem_in_base) {
fw.sm_shmem_in_base = meson_sm_map_shmem(chip->cmd_shmem_in_base,
@@ -257,4 +249,11 @@ out_in_base:
out:
return -EINVAL;
}
device_initcall(meson_sm_init);
static struct platform_driver meson_sm_driver = {
.driver = {
.name = "meson-sm",
.of_match_table = of_match_ptr(meson_sm_ids),
},
};
module_platform_driver_probe(meson_sm_driver, meson_sm_probe);

View File

@@ -70,57 +70,20 @@ void tegra_bpmp_put(struct tegra_bpmp *bpmp)
}
EXPORT_SYMBOL_GPL(tegra_bpmp_put);
static int tegra_bpmp_channel_get_index(struct tegra_bpmp_channel *channel)
{
return channel - channel->bpmp->channels;
}
static int
tegra_bpmp_channel_get_thread_index(struct tegra_bpmp_channel *channel)
{
struct tegra_bpmp *bpmp = channel->bpmp;
unsigned int offset, count;
unsigned int count;
int index;
offset = bpmp->soc->channels.thread.offset;
count = bpmp->soc->channels.thread.count;
index = tegra_bpmp_channel_get_index(channel);
if (index < 0)
return index;
if (index < offset || index >= offset + count)
index = channel - channel->bpmp->threaded_channels;
if (index < 0 || index >= count)
return -EINVAL;
return index - offset;
}
static struct tegra_bpmp_channel *
tegra_bpmp_channel_get_thread(struct tegra_bpmp *bpmp, unsigned int index)
{
unsigned int offset = bpmp->soc->channels.thread.offset;
unsigned int count = bpmp->soc->channels.thread.count;
if (index >= count)
return NULL;
return &bpmp->channels[offset + index];
}
static struct tegra_bpmp_channel *
tegra_bpmp_channel_get_tx(struct tegra_bpmp *bpmp)
{
unsigned int offset = bpmp->soc->channels.cpu_tx.offset;
return &bpmp->channels[offset + smp_processor_id()];
}
static struct tegra_bpmp_channel *
tegra_bpmp_channel_get_rx(struct tegra_bpmp *bpmp)
{
unsigned int offset = bpmp->soc->channels.cpu_rx.offset;
return &bpmp->channels[offset];
return index;
}
static bool tegra_bpmp_message_valid(const struct tegra_bpmp_message *msg)
@@ -271,11 +234,7 @@ tegra_bpmp_write_threaded(struct tegra_bpmp *bpmp, unsigned int mrq,
goto unlock;
}
channel = tegra_bpmp_channel_get_thread(bpmp, index);
if (!channel) {
err = -EINVAL;
goto unlock;
}
channel = &bpmp->threaded_channels[index];
if (!tegra_bpmp_master_free(channel)) {
err = -EBUSY;
@@ -328,12 +287,18 @@ int tegra_bpmp_transfer_atomic(struct tegra_bpmp *bpmp,
if (!tegra_bpmp_message_valid(msg))
return -EINVAL;
channel = tegra_bpmp_channel_get_tx(bpmp);
channel = bpmp->tx_channel;
spin_lock(&bpmp->atomic_tx_lock);
err = tegra_bpmp_channel_write(channel, msg->mrq, MSG_ACK,
msg->tx.data, msg->tx.size);
if (err < 0)
if (err < 0) {
spin_unlock(&bpmp->atomic_tx_lock);
return err;
}
spin_unlock(&bpmp->atomic_tx_lock);
err = mbox_send_message(bpmp->mbox.channel, NULL);
if (err < 0)
@@ -607,7 +572,7 @@ static void tegra_bpmp_handle_rx(struct mbox_client *client, void *data)
unsigned int i, count;
unsigned long *busy;
channel = tegra_bpmp_channel_get_rx(bpmp);
channel = bpmp->rx_channel;
count = bpmp->soc->channels.thread.count;
busy = bpmp->threaded.busy;
@@ -619,9 +584,7 @@ static void tegra_bpmp_handle_rx(struct mbox_client *client, void *data)
for_each_set_bit(i, busy, count) {
struct tegra_bpmp_channel *channel;
channel = tegra_bpmp_channel_get_thread(bpmp, i);
if (!channel)
continue;
channel = &bpmp->threaded_channels[i];
if (tegra_bpmp_master_acked(channel)) {
tegra_bpmp_channel_signal(channel);
@@ -698,7 +661,6 @@ static void tegra_bpmp_channel_cleanup(struct tegra_bpmp_channel *channel)
static int tegra_bpmp_probe(struct platform_device *pdev)
{
struct tegra_bpmp_channel *channel;
struct tegra_bpmp *bpmp;
unsigned int i;
char tag[32];
@@ -732,7 +694,7 @@ static int tegra_bpmp_probe(struct platform_device *pdev)
}
bpmp->rx.virt = gen_pool_dma_alloc(bpmp->rx.pool, 4096, &bpmp->rx.phys);
if (!bpmp->rx.pool) {
if (!bpmp->rx.virt) {
dev_err(&pdev->dev, "failed to allocate from RX pool\n");
err = -ENOMEM;
goto free_tx;
@@ -758,24 +720,45 @@ static int tegra_bpmp_probe(struct platform_device *pdev)
goto free_rx;
}
bpmp->num_channels = bpmp->soc->channels.cpu_tx.count +
bpmp->soc->channels.thread.count +
bpmp->soc->channels.cpu_rx.count;
bpmp->channels = devm_kcalloc(&pdev->dev, bpmp->num_channels,
sizeof(*channel), GFP_KERNEL);
if (!bpmp->channels) {
spin_lock_init(&bpmp->atomic_tx_lock);
bpmp->tx_channel = devm_kzalloc(&pdev->dev, sizeof(*bpmp->tx_channel),
GFP_KERNEL);
if (!bpmp->tx_channel) {
err = -ENOMEM;
goto free_rx;
}
/* message channel initialization */
for (i = 0; i < bpmp->num_channels; i++) {
struct tegra_bpmp_channel *channel = &bpmp->channels[i];
bpmp->rx_channel = devm_kzalloc(&pdev->dev, sizeof(*bpmp->rx_channel),
GFP_KERNEL);
if (!bpmp->rx_channel) {
err = -ENOMEM;
goto free_rx;
}
err = tegra_bpmp_channel_init(channel, bpmp, i);
bpmp->threaded_channels = devm_kcalloc(&pdev->dev, bpmp->threaded.count,
sizeof(*bpmp->threaded_channels),
GFP_KERNEL);
if (!bpmp->threaded_channels) {
err = -ENOMEM;
goto free_rx;
}
err = tegra_bpmp_channel_init(bpmp->tx_channel, bpmp,
bpmp->soc->channels.cpu_tx.offset);
if (err < 0)
goto free_rx;
err = tegra_bpmp_channel_init(bpmp->rx_channel, bpmp,
bpmp->soc->channels.cpu_rx.offset);
if (err < 0)
goto cleanup_tx_channel;
for (i = 0; i < bpmp->threaded.count; i++) {
err = tegra_bpmp_channel_init(
&bpmp->threaded_channels[i], bpmp,
bpmp->soc->channels.thread.offset + i);
if (err < 0)
goto cleanup_channels;
goto cleanup_threaded_channels;
}
/* mbox registration */
@@ -788,15 +771,14 @@ static int tegra_bpmp_probe(struct platform_device *pdev)
if (IS_ERR(bpmp->mbox.channel)) {
err = PTR_ERR(bpmp->mbox.channel);
dev_err(&pdev->dev, "failed to get HSP mailbox: %d\n", err);
goto cleanup_channels;
goto cleanup_threaded_channels;
}
/* reset message channels */
for (i = 0; i < bpmp->num_channels; i++) {
struct tegra_bpmp_channel *channel = &bpmp->channels[i];
tegra_bpmp_channel_reset(channel);
}
tegra_bpmp_channel_reset(bpmp->tx_channel);
tegra_bpmp_channel_reset(bpmp->rx_channel);
for (i = 0; i < bpmp->threaded.count; i++)
tegra_bpmp_channel_reset(&bpmp->threaded_channels[i]);
err = tegra_bpmp_request_mrq(bpmp, MRQ_PING,
tegra_bpmp_mrq_handle_ping, bpmp);
@@ -845,9 +827,15 @@ free_mrq:
tegra_bpmp_free_mrq(bpmp, MRQ_PING, bpmp);
free_mbox:
mbox_free_channel(bpmp->mbox.channel);
cleanup_channels:
while (i--)
tegra_bpmp_channel_cleanup(&bpmp->channels[i]);
cleanup_threaded_channels:
for (i = 0; i < bpmp->threaded.count; i++) {
if (bpmp->threaded_channels[i].bpmp)
tegra_bpmp_channel_cleanup(&bpmp->threaded_channels[i]);
}
tegra_bpmp_channel_cleanup(bpmp->rx_channel);
cleanup_tx_channel:
tegra_bpmp_channel_cleanup(bpmp->tx_channel);
free_rx:
gen_pool_free(bpmp->rx.pool, (unsigned long)bpmp->rx.virt, 4096);
free_tx:
@@ -858,18 +846,16 @@ free_tx:
static const struct tegra_bpmp_soc tegra186_soc = {
.channels = {
.cpu_tx = {
.offset = 0,
.count = 6,
.offset = 3,
.timeout = 60 * USEC_PER_SEC,
},
.thread = {
.offset = 6,
.count = 7,
.offset = 0,
.count = 3,
.timeout = 600 * USEC_PER_SEC,
},
.cpu_rx = {
.offset = 13,
.count = 1,
.timeout = 0,
},
},