Merge tag 'armsoc-drivers' of git://git.kernel.org/pub/scm/linux/kernel/git/soc/soc
Pull ARM SoC driver updates from Arnd Bergmann: "This contains driver changes that are tightly connected to SoC specific code. Aside from smaller cleanups and bug fixes, here is a list of the notable changes. New device drivers: - The Turris Mox router has a new "moxtet" bus driver for its on-board pluggable extension bus. The same platform also gains a firmware driver. - The Samsung Exynos family gains a new Chipid driver exporting using the soc device sysfs interface - A similar socinfo driver for Qualcomm Snapdragon chips. - A firmware driver for the NXP i.MX DSP IPC protocol using shared memory and a mailbox Other changes: - The i.MX reset controller driver now supports the NXP i.MX8MM chip - Amlogic SoC specific drivers gain support for the S905X3 and A311D chips - A rework of the TI Davinci framebuffer driver to allow important cleanups in the platform code - A couple of device drivers for removed ARM SoC platforms are removed. Most of the removals were picked up by other maintainers, this contains whatever was left" * tag 'armsoc-drivers' of git://git.kernel.org/pub/scm/linux/kernel/git/soc/soc: (123 commits) bus: uniphier-system-bus: use devm_platform_ioremap_resource() soc: ti: ti_sci_pm_domains: Add support for exclusive and shared access dt-bindings: ti_sci_pm_domains: Add support for exclusive and shared access firmware: ti_sci: Allow for device shared and exclusive requests bus: imx-weim: remove incorrect __init annotations fbdev: remove w90x900/nuc900 platform drivers spi: remove w90x900 driver net: remove w90p910-ether driver net: remove ks8695 driver firmware: turris-mox-rwtm: Add sysfs documentation firmware: Add Turris Mox rWTM firmware driver dt-bindings: firmware: Document cznic,turris-mox-rwtm binding bus: moxtet: fix unsigned comparison to less than zero bus: moxtet: remove set but not used variable 'dummy' ARM: scoop: Use the right include dt-bindings: power: add Amlogic Everything-Else power domains bindings soc: amlogic: Add support for Everything-Else power domains controller fbdev: da8xx: use resource management for dma fbdev: da8xx-fb: drop a redundant if fbdev: da8xx-fb: use devm_platform_ioremap_resource() ...
This commit is contained in:
@@ -271,6 +271,20 @@ config TRUSTED_FOUNDATIONS
|
||||
|
||||
Choose N if you don't know what this is about.
|
||||
|
||||
config TURRIS_MOX_RWTM
|
||||
tristate "Turris Mox rWTM secure firmware driver"
|
||||
depends on ARCH_MVEBU || COMPILE_TEST
|
||||
depends on HAS_DMA && OF
|
||||
depends on MAILBOX
|
||||
select HW_RANDOM
|
||||
select ARMADA_37XX_RWTM_MBOX
|
||||
help
|
||||
This driver communicates with the firmware on the Cortex-M3 secure
|
||||
processor of the Turris Mox router. Enable if you are building for
|
||||
Turris Mox, and you will be able to read the device serial number and
|
||||
other manufacturing data and also utilize the Entropy Bit Generator
|
||||
for hardware random number generation.
|
||||
|
||||
config HAVE_ARM_SMCCC
|
||||
bool
|
||||
|
||||
|
@@ -22,6 +22,7 @@ obj-$(CONFIG_QCOM_SCM_32) += qcom_scm-32.o
|
||||
CFLAGS_qcom_scm-32.o :=$(call as-instr,.arch armv7-a\n.arch_extension sec,-DREQUIRES_SEC=1) -march=armv7-a
|
||||
obj-$(CONFIG_TI_SCI_PROTOCOL) += ti_sci.o
|
||||
obj-$(CONFIG_TRUSTED_FOUNDATIONS) += trusted_foundations.o
|
||||
obj-$(CONFIG_TURRIS_MOX_RWTM) += turris-mox-rwtm.o
|
||||
|
||||
obj-$(CONFIG_ARM_SCMI_PROTOCOL) += arm_scmi/
|
||||
obj-y += psci/
|
||||
|
@@ -2,5 +2,5 @@
|
||||
obj-y = scmi-bus.o scmi-driver.o scmi-protocols.o
|
||||
scmi-bus-y = bus.o
|
||||
scmi-driver-y = driver.o
|
||||
scmi-protocols-y = base.o clock.o perf.o power.o sensors.o
|
||||
scmi-protocols-y = base.o clock.o perf.o power.o reset.o sensors.o
|
||||
obj-$(CONFIG_ARM_SCMI_POWER_DOMAIN) += scmi_pm_domain.o
|
||||
|
@@ -204,7 +204,7 @@ static int scmi_base_discover_agent_get(const struct scmi_handle *handle,
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
*(__le32 *)t->tx.buf = cpu_to_le32(id);
|
||||
put_unaligned_le32(id, t->tx.buf);
|
||||
|
||||
ret = scmi_do_xfer(handle, t);
|
||||
if (!ret)
|
||||
|
@@ -56,7 +56,7 @@ struct scmi_msg_resp_clock_describe_rates {
|
||||
struct scmi_clock_set_rate {
|
||||
__le32 flags;
|
||||
#define CLOCK_SET_ASYNC BIT(0)
|
||||
#define CLOCK_SET_DELAYED BIT(1)
|
||||
#define CLOCK_SET_IGNORE_RESP BIT(1)
|
||||
#define CLOCK_SET_ROUND_UP BIT(2)
|
||||
#define CLOCK_SET_ROUND_AUTO BIT(3)
|
||||
__le32 id;
|
||||
@@ -67,6 +67,7 @@ struct scmi_clock_set_rate {
|
||||
struct clock_info {
|
||||
int num_clocks;
|
||||
int max_async_req;
|
||||
atomic_t cur_async_req;
|
||||
struct scmi_clock_info *clk;
|
||||
};
|
||||
|
||||
@@ -106,7 +107,7 @@ static int scmi_clock_attributes_get(const struct scmi_handle *handle,
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
*(__le32 *)t->tx.buf = cpu_to_le32(clk_id);
|
||||
put_unaligned_le32(clk_id, t->tx.buf);
|
||||
attr = t->rx.buf;
|
||||
|
||||
ret = scmi_do_xfer(handle, t);
|
||||
@@ -203,39 +204,47 @@ scmi_clock_rate_get(const struct scmi_handle *handle, u32 clk_id, u64 *value)
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
*(__le32 *)t->tx.buf = cpu_to_le32(clk_id);
|
||||
put_unaligned_le32(clk_id, t->tx.buf);
|
||||
|
||||
ret = scmi_do_xfer(handle, t);
|
||||
if (!ret) {
|
||||
__le32 *pval = t->rx.buf;
|
||||
|
||||
*value = le32_to_cpu(*pval);
|
||||
*value |= (u64)le32_to_cpu(*(pval + 1)) << 32;
|
||||
}
|
||||
if (!ret)
|
||||
*value = get_unaligned_le64(t->rx.buf);
|
||||
|
||||
scmi_xfer_put(handle, t);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int scmi_clock_rate_set(const struct scmi_handle *handle, u32 clk_id,
|
||||
u32 config, u64 rate)
|
||||
u64 rate)
|
||||
{
|
||||
int ret;
|
||||
u32 flags = 0;
|
||||
struct scmi_xfer *t;
|
||||
struct scmi_clock_set_rate *cfg;
|
||||
struct clock_info *ci = handle->clk_priv;
|
||||
|
||||
ret = scmi_xfer_get_init(handle, CLOCK_RATE_SET, SCMI_PROTOCOL_CLOCK,
|
||||
sizeof(*cfg), 0, &t);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (ci->max_async_req &&
|
||||
atomic_inc_return(&ci->cur_async_req) < ci->max_async_req)
|
||||
flags |= CLOCK_SET_ASYNC;
|
||||
|
||||
cfg = t->tx.buf;
|
||||
cfg->flags = cpu_to_le32(config);
|
||||
cfg->flags = cpu_to_le32(flags);
|
||||
cfg->id = cpu_to_le32(clk_id);
|
||||
cfg->value_low = cpu_to_le32(rate & 0xffffffff);
|
||||
cfg->value_high = cpu_to_le32(rate >> 32);
|
||||
|
||||
ret = scmi_do_xfer(handle, t);
|
||||
if (flags & CLOCK_SET_ASYNC)
|
||||
ret = scmi_do_xfer_with_response(handle, t);
|
||||
else
|
||||
ret = scmi_do_xfer(handle, t);
|
||||
|
||||
if (ci->max_async_req)
|
||||
atomic_dec(&ci->cur_async_req);
|
||||
|
||||
scmi_xfer_put(handle, t);
|
||||
return ret;
|
||||
|
@@ -15,6 +15,8 @@
|
||||
#include <linux/scmi_protocol.h>
|
||||
#include <linux/types.h>
|
||||
|
||||
#include <asm/unaligned.h>
|
||||
|
||||
#define PROTOCOL_REV_MINOR_MASK GENMASK(15, 0)
|
||||
#define PROTOCOL_REV_MAJOR_MASK GENMASK(31, 16)
|
||||
#define PROTOCOL_REV_MAJOR(x) (u16)(FIELD_GET(PROTOCOL_REV_MAJOR_MASK, (x)))
|
||||
@@ -48,11 +50,11 @@ struct scmi_msg_resp_prot_version {
|
||||
/**
|
||||
* struct scmi_msg_hdr - Message(Tx/Rx) header
|
||||
*
|
||||
* @id: The identifier of the command being sent
|
||||
* @protocol_id: The identifier of the protocol used to send @id command
|
||||
* @seq: The token to identify the message. when a message/command returns,
|
||||
* the platform returns the whole message header unmodified including
|
||||
* the token
|
||||
* @id: The identifier of the message being sent
|
||||
* @protocol_id: The identifier of the protocol used to send @id message
|
||||
* @seq: The token to identify the message. When a message returns, the
|
||||
* platform returns the whole message header unmodified including the
|
||||
* token
|
||||
* @status: Status of the transfer once it's complete
|
||||
* @poll_completion: Indicate if the transfer needs to be polled for
|
||||
* completion or interrupt mode is used
|
||||
@@ -84,17 +86,21 @@ struct scmi_msg {
|
||||
* @rx: Receive message, the buffer should be pre-allocated to store
|
||||
* message. If request-ACK protocol is used, we can reuse the same
|
||||
* buffer for the rx path as we use for the tx path.
|
||||
* @done: completion event
|
||||
* @done: command message transmit completion event
|
||||
* @async: pointer to delayed response message received event completion
|
||||
*/
|
||||
struct scmi_xfer {
|
||||
struct scmi_msg_hdr hdr;
|
||||
struct scmi_msg tx;
|
||||
struct scmi_msg rx;
|
||||
struct completion done;
|
||||
struct completion *async_done;
|
||||
};
|
||||
|
||||
void scmi_xfer_put(const struct scmi_handle *h, struct scmi_xfer *xfer);
|
||||
int scmi_do_xfer(const struct scmi_handle *h, struct scmi_xfer *xfer);
|
||||
int scmi_do_xfer_with_response(const struct scmi_handle *h,
|
||||
struct scmi_xfer *xfer);
|
||||
int scmi_xfer_get_init(const struct scmi_handle *h, u8 msg_id, u8 prot_id,
|
||||
size_t tx_size, size_t rx_size, struct scmi_xfer **p);
|
||||
int scmi_handle_put(const struct scmi_handle *handle);
|
||||
|
@@ -30,8 +30,14 @@
|
||||
#include "common.h"
|
||||
|
||||
#define MSG_ID_MASK GENMASK(7, 0)
|
||||
#define MSG_XTRACT_ID(hdr) FIELD_GET(MSG_ID_MASK, (hdr))
|
||||
#define MSG_TYPE_MASK GENMASK(9, 8)
|
||||
#define MSG_XTRACT_TYPE(hdr) FIELD_GET(MSG_TYPE_MASK, (hdr))
|
||||
#define MSG_TYPE_COMMAND 0
|
||||
#define MSG_TYPE_DELAYED_RESP 2
|
||||
#define MSG_TYPE_NOTIFICATION 3
|
||||
#define MSG_PROTOCOL_ID_MASK GENMASK(17, 10)
|
||||
#define MSG_XTRACT_PROT_ID(hdr) FIELD_GET(MSG_PROTOCOL_ID_MASK, (hdr))
|
||||
#define MSG_TOKEN_ID_MASK GENMASK(27, 18)
|
||||
#define MSG_XTRACT_TOKEN(hdr) FIELD_GET(MSG_TOKEN_ID_MASK, (hdr))
|
||||
#define MSG_TOKEN_MAX (MSG_XTRACT_TOKEN(MSG_TOKEN_ID_MASK) + 1)
|
||||
@@ -86,7 +92,7 @@ struct scmi_desc {
|
||||
};
|
||||
|
||||
/**
|
||||
* struct scmi_chan_info - Structure representing a SCMI channel informfation
|
||||
* struct scmi_chan_info - Structure representing a SCMI channel information
|
||||
*
|
||||
* @cl: Mailbox Client
|
||||
* @chan: Transmit/Receive mailbox channel
|
||||
@@ -111,8 +117,9 @@ struct scmi_chan_info {
|
||||
* @handle: Instance of SCMI handle to send to clients
|
||||
* @version: SCMI revision information containing protocol version,
|
||||
* implementation version and (sub-)vendor identification.
|
||||
* @minfo: Message info
|
||||
* @tx_idr: IDR object to map protocol id to channel info pointer
|
||||
* @tx_minfo: Universal Transmit Message management info
|
||||
* @tx_idr: IDR object to map protocol id to Tx channel info pointer
|
||||
* @rx_idr: IDR object to map protocol id to Rx channel info pointer
|
||||
* @protocols_imp: List of protocols implemented, currently maximum of
|
||||
* MAX_PROTOCOLS_IMP elements allocated by the base protocol
|
||||
* @node: List head
|
||||
@@ -123,8 +130,9 @@ struct scmi_info {
|
||||
const struct scmi_desc *desc;
|
||||
struct scmi_revision_info version;
|
||||
struct scmi_handle handle;
|
||||
struct scmi_xfers_info minfo;
|
||||
struct scmi_xfers_info tx_minfo;
|
||||
struct idr tx_idr;
|
||||
struct idr rx_idr;
|
||||
u8 *protocols_imp;
|
||||
struct list_head node;
|
||||
int users;
|
||||
@@ -182,7 +190,7 @@ static inline int scmi_to_linux_errno(int errno)
|
||||
static inline void scmi_dump_header_dbg(struct device *dev,
|
||||
struct scmi_msg_hdr *hdr)
|
||||
{
|
||||
dev_dbg(dev, "Command ID: %x Sequence ID: %x Protocol: %x\n",
|
||||
dev_dbg(dev, "Message ID: %x Sequence ID: %x Protocol: %x\n",
|
||||
hdr->id, hdr->seq, hdr->protocol_id);
|
||||
}
|
||||
|
||||
@@ -190,64 +198,20 @@ static void scmi_fetch_response(struct scmi_xfer *xfer,
|
||||
struct scmi_shared_mem __iomem *mem)
|
||||
{
|
||||
xfer->hdr.status = ioread32(mem->msg_payload);
|
||||
/* Skip the length of header and statues in payload area i.e 8 bytes*/
|
||||
/* Skip the length of header and status in payload area i.e 8 bytes */
|
||||
xfer->rx.len = min_t(size_t, xfer->rx.len, ioread32(&mem->length) - 8);
|
||||
|
||||
/* Take a copy to the rx buffer.. */
|
||||
memcpy_fromio(xfer->rx.buf, mem->msg_payload + 4, xfer->rx.len);
|
||||
}
|
||||
|
||||
/**
|
||||
* scmi_rx_callback() - mailbox client callback for receive messages
|
||||
*
|
||||
* @cl: client pointer
|
||||
* @m: mailbox message
|
||||
*
|
||||
* Processes one received message to appropriate transfer information and
|
||||
* signals completion of the transfer.
|
||||
*
|
||||
* NOTE: This function will be invoked in IRQ context, hence should be
|
||||
* as optimal as possible.
|
||||
*/
|
||||
static void scmi_rx_callback(struct mbox_client *cl, void *m)
|
||||
{
|
||||
u16 xfer_id;
|
||||
struct scmi_xfer *xfer;
|
||||
struct scmi_chan_info *cinfo = client_to_scmi_chan_info(cl);
|
||||
struct device *dev = cinfo->dev;
|
||||
struct scmi_info *info = handle_to_scmi_info(cinfo->handle);
|
||||
struct scmi_xfers_info *minfo = &info->minfo;
|
||||
struct scmi_shared_mem __iomem *mem = cinfo->payload;
|
||||
|
||||
xfer_id = MSG_XTRACT_TOKEN(ioread32(&mem->msg_header));
|
||||
|
||||
/* Are we even expecting this? */
|
||||
if (!test_bit(xfer_id, minfo->xfer_alloc_table)) {
|
||||
dev_err(dev, "message for %d is not expected!\n", xfer_id);
|
||||
return;
|
||||
}
|
||||
|
||||
xfer = &minfo->xfer_block[xfer_id];
|
||||
|
||||
scmi_dump_header_dbg(dev, &xfer->hdr);
|
||||
/* Is the message of valid length? */
|
||||
if (xfer->rx.len > info->desc->max_msg_size) {
|
||||
dev_err(dev, "unable to handle %zu xfer(max %d)\n",
|
||||
xfer->rx.len, info->desc->max_msg_size);
|
||||
return;
|
||||
}
|
||||
|
||||
scmi_fetch_response(xfer, mem);
|
||||
complete(&xfer->done);
|
||||
}
|
||||
|
||||
/**
|
||||
* pack_scmi_header() - packs and returns 32-bit header
|
||||
*
|
||||
* @hdr: pointer to header containing all the information on message id,
|
||||
* protocol id and sequence id.
|
||||
*
|
||||
* Return: 32-bit packed command header to be sent to the platform.
|
||||
* Return: 32-bit packed message header to be sent to the platform.
|
||||
*/
|
||||
static inline u32 pack_scmi_header(struct scmi_msg_hdr *hdr)
|
||||
{
|
||||
@@ -256,6 +220,18 @@ static inline u32 pack_scmi_header(struct scmi_msg_hdr *hdr)
|
||||
FIELD_PREP(MSG_PROTOCOL_ID_MASK, hdr->protocol_id);
|
||||
}
|
||||
|
||||
/**
|
||||
* unpack_scmi_header() - unpacks and records message and protocol id
|
||||
*
|
||||
* @msg_hdr: 32-bit packed message header sent from the platform
|
||||
* @hdr: pointer to header to fetch message and protocol id.
|
||||
*/
|
||||
static inline void unpack_scmi_header(u32 msg_hdr, struct scmi_msg_hdr *hdr)
|
||||
{
|
||||
hdr->id = MSG_XTRACT_ID(msg_hdr);
|
||||
hdr->protocol_id = MSG_XTRACT_PROT_ID(msg_hdr);
|
||||
}
|
||||
|
||||
/**
|
||||
* scmi_tx_prepare() - mailbox client callback to prepare for the transfer
|
||||
*
|
||||
@@ -271,6 +247,14 @@ static void scmi_tx_prepare(struct mbox_client *cl, void *m)
|
||||
struct scmi_chan_info *cinfo = client_to_scmi_chan_info(cl);
|
||||
struct scmi_shared_mem __iomem *mem = cinfo->payload;
|
||||
|
||||
/*
|
||||
* Ideally channel must be free by now unless OS timeout last
|
||||
* request and platform continued to process the same, wait
|
||||
* until it releases the shared memory, otherwise we may endup
|
||||
* overwriting its response with new message payload or vice-versa
|
||||
*/
|
||||
spin_until_cond(ioread32(&mem->channel_status) &
|
||||
SCMI_SHMEM_CHAN_STAT_CHANNEL_FREE);
|
||||
/* Mark channel busy + clear error */
|
||||
iowrite32(0x0, &mem->channel_status);
|
||||
iowrite32(t->hdr.poll_completion ? 0 : SCMI_SHMEM_FLAG_INTR_ENABLED,
|
||||
@@ -285,8 +269,9 @@ static void scmi_tx_prepare(struct mbox_client *cl, void *m)
|
||||
* scmi_xfer_get() - Allocate one message
|
||||
*
|
||||
* @handle: Pointer to SCMI entity handle
|
||||
* @minfo: Pointer to Tx/Rx Message management info based on channel type
|
||||
*
|
||||
* Helper function which is used by various command functions that are
|
||||
* Helper function which is used by various message functions that are
|
||||
* exposed to clients of this driver for allocating a message traffic event.
|
||||
*
|
||||
* This function can sleep depending on pending requests already in the system
|
||||
@@ -295,13 +280,13 @@ static void scmi_tx_prepare(struct mbox_client *cl, void *m)
|
||||
*
|
||||
* Return: 0 if all went fine, else corresponding error.
|
||||
*/
|
||||
static struct scmi_xfer *scmi_xfer_get(const struct scmi_handle *handle)
|
||||
static struct scmi_xfer *scmi_xfer_get(const struct scmi_handle *handle,
|
||||
struct scmi_xfers_info *minfo)
|
||||
{
|
||||
u16 xfer_id;
|
||||
struct scmi_xfer *xfer;
|
||||
unsigned long flags, bit_pos;
|
||||
struct scmi_info *info = handle_to_scmi_info(handle);
|
||||
struct scmi_xfers_info *minfo = &info->minfo;
|
||||
|
||||
/* Keep the locked section as small as possible */
|
||||
spin_lock_irqsave(&minfo->xfer_lock, flags);
|
||||
@@ -324,18 +309,17 @@ static struct scmi_xfer *scmi_xfer_get(const struct scmi_handle *handle)
|
||||
}
|
||||
|
||||
/**
|
||||
* scmi_xfer_put() - Release a message
|
||||
* __scmi_xfer_put() - Release a message
|
||||
*
|
||||
* @handle: Pointer to SCMI entity handle
|
||||
* @minfo: Pointer to Tx/Rx Message management info based on channel type
|
||||
* @xfer: message that was reserved by scmi_xfer_get
|
||||
*
|
||||
* This holds a spinlock to maintain integrity of internal data structures.
|
||||
*/
|
||||
void scmi_xfer_put(const struct scmi_handle *handle, struct scmi_xfer *xfer)
|
||||
static void
|
||||
__scmi_xfer_put(struct scmi_xfers_info *minfo, struct scmi_xfer *xfer)
|
||||
{
|
||||
unsigned long flags;
|
||||
struct scmi_info *info = handle_to_scmi_info(handle);
|
||||
struct scmi_xfers_info *minfo = &info->minfo;
|
||||
|
||||
/*
|
||||
* Keep the locked section as small as possible
|
||||
@@ -347,6 +331,68 @@ void scmi_xfer_put(const struct scmi_handle *handle, struct scmi_xfer *xfer)
|
||||
spin_unlock_irqrestore(&minfo->xfer_lock, flags);
|
||||
}
|
||||
|
||||
/**
|
||||
* scmi_rx_callback() - mailbox client callback for receive messages
|
||||
*
|
||||
* @cl: client pointer
|
||||
* @m: mailbox message
|
||||
*
|
||||
* Processes one received message to appropriate transfer information and
|
||||
* signals completion of the transfer.
|
||||
*
|
||||
* NOTE: This function will be invoked in IRQ context, hence should be
|
||||
* as optimal as possible.
|
||||
*/
|
||||
static void scmi_rx_callback(struct mbox_client *cl, void *m)
|
||||
{
|
||||
u8 msg_type;
|
||||
u32 msg_hdr;
|
||||
u16 xfer_id;
|
||||
struct scmi_xfer *xfer;
|
||||
struct scmi_chan_info *cinfo = client_to_scmi_chan_info(cl);
|
||||
struct device *dev = cinfo->dev;
|
||||
struct scmi_info *info = handle_to_scmi_info(cinfo->handle);
|
||||
struct scmi_xfers_info *minfo = &info->tx_minfo;
|
||||
struct scmi_shared_mem __iomem *mem = cinfo->payload;
|
||||
|
||||
msg_hdr = ioread32(&mem->msg_header);
|
||||
msg_type = MSG_XTRACT_TYPE(msg_hdr);
|
||||
xfer_id = MSG_XTRACT_TOKEN(msg_hdr);
|
||||
|
||||
if (msg_type == MSG_TYPE_NOTIFICATION)
|
||||
return; /* Notifications not yet supported */
|
||||
|
||||
/* Are we even expecting this? */
|
||||
if (!test_bit(xfer_id, minfo->xfer_alloc_table)) {
|
||||
dev_err(dev, "message for %d is not expected!\n", xfer_id);
|
||||
return;
|
||||
}
|
||||
|
||||
xfer = &minfo->xfer_block[xfer_id];
|
||||
|
||||
scmi_dump_header_dbg(dev, &xfer->hdr);
|
||||
|
||||
scmi_fetch_response(xfer, mem);
|
||||
|
||||
if (msg_type == MSG_TYPE_DELAYED_RESP)
|
||||
complete(xfer->async_done);
|
||||
else
|
||||
complete(&xfer->done);
|
||||
}
|
||||
|
||||
/**
|
||||
* scmi_xfer_put() - Release a transmit message
|
||||
*
|
||||
* @handle: Pointer to SCMI entity handle
|
||||
* @xfer: message that was reserved by scmi_xfer_get
|
||||
*/
|
||||
void scmi_xfer_put(const struct scmi_handle *handle, struct scmi_xfer *xfer)
|
||||
{
|
||||
struct scmi_info *info = handle_to_scmi_info(handle);
|
||||
|
||||
__scmi_xfer_put(&info->tx_minfo, xfer);
|
||||
}
|
||||
|
||||
static bool
|
||||
scmi_xfer_poll_done(const struct scmi_chan_info *cinfo, struct scmi_xfer *xfer)
|
||||
{
|
||||
@@ -435,8 +481,36 @@ int scmi_do_xfer(const struct scmi_handle *handle, struct scmi_xfer *xfer)
|
||||
return ret;
|
||||
}
|
||||
|
||||
#define SCMI_MAX_RESPONSE_TIMEOUT (2 * MSEC_PER_SEC)
|
||||
|
||||
/**
|
||||
* scmi_xfer_get_init() - Allocate and initialise one message
|
||||
* scmi_do_xfer_with_response() - Do one transfer and wait until the delayed
|
||||
* response is received
|
||||
*
|
||||
* @handle: Pointer to SCMI entity handle
|
||||
* @xfer: Transfer to initiate and wait for response
|
||||
*
|
||||
* Return: -ETIMEDOUT in case of no delayed response, if transmit error,
|
||||
* return corresponding error, else if all goes well, return 0.
|
||||
*/
|
||||
int scmi_do_xfer_with_response(const struct scmi_handle *handle,
|
||||
struct scmi_xfer *xfer)
|
||||
{
|
||||
int ret, timeout = msecs_to_jiffies(SCMI_MAX_RESPONSE_TIMEOUT);
|
||||
DECLARE_COMPLETION_ONSTACK(async_response);
|
||||
|
||||
xfer->async_done = &async_response;
|
||||
|
||||
ret = scmi_do_xfer(handle, xfer);
|
||||
if (!ret && !wait_for_completion_timeout(xfer->async_done, timeout))
|
||||
ret = -ETIMEDOUT;
|
||||
|
||||
xfer->async_done = NULL;
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* scmi_xfer_get_init() - Allocate and initialise one message for transmit
|
||||
*
|
||||
* @handle: Pointer to SCMI entity handle
|
||||
* @msg_id: Message identifier
|
||||
@@ -457,6 +531,7 @@ int scmi_xfer_get_init(const struct scmi_handle *handle, u8 msg_id, u8 prot_id,
|
||||
int ret;
|
||||
struct scmi_xfer *xfer;
|
||||
struct scmi_info *info = handle_to_scmi_info(handle);
|
||||
struct scmi_xfers_info *minfo = &info->tx_minfo;
|
||||
struct device *dev = info->dev;
|
||||
|
||||
/* Ensure we have sane transfer sizes */
|
||||
@@ -464,7 +539,7 @@ int scmi_xfer_get_init(const struct scmi_handle *handle, u8 msg_id, u8 prot_id,
|
||||
tx_size > info->desc->max_msg_size)
|
||||
return -ERANGE;
|
||||
|
||||
xfer = scmi_xfer_get(handle);
|
||||
xfer = scmi_xfer_get(handle, minfo);
|
||||
if (IS_ERR(xfer)) {
|
||||
ret = PTR_ERR(xfer);
|
||||
dev_err(dev, "failed to get free message slot(%d)\n", ret);
|
||||
@@ -597,27 +672,13 @@ int scmi_handle_put(const struct scmi_handle *handle)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct scmi_desc scmi_generic_desc = {
|
||||
.max_rx_timeout_ms = 30, /* We may increase this if required */
|
||||
.max_msg = 20, /* Limited by MBOX_TX_QUEUE_LEN */
|
||||
.max_msg_size = 128,
|
||||
};
|
||||
|
||||
/* Each compatible listed below must have descriptor associated with it */
|
||||
static const struct of_device_id scmi_of_match[] = {
|
||||
{ .compatible = "arm,scmi", .data = &scmi_generic_desc },
|
||||
{ /* Sentinel */ },
|
||||
};
|
||||
|
||||
MODULE_DEVICE_TABLE(of, scmi_of_match);
|
||||
|
||||
static int scmi_xfer_info_init(struct scmi_info *sinfo)
|
||||
{
|
||||
int i;
|
||||
struct scmi_xfer *xfer;
|
||||
struct device *dev = sinfo->dev;
|
||||
const struct scmi_desc *desc = sinfo->desc;
|
||||
struct scmi_xfers_info *info = &sinfo->minfo;
|
||||
struct scmi_xfers_info *info = &sinfo->tx_minfo;
|
||||
|
||||
/* Pre-allocated messages, no more than what hdr.seq can support */
|
||||
if (WARN_ON(desc->max_msg >= MSG_TOKEN_MAX)) {
|
||||
@@ -652,9 +713,189 @@ static int scmi_xfer_info_init(struct scmi_info *sinfo)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int scmi_mailbox_check(struct device_node *np)
|
||||
static int scmi_mailbox_check(struct device_node *np, int idx)
|
||||
{
|
||||
return of_parse_phandle_with_args(np, "mboxes", "#mbox-cells", 0, NULL);
|
||||
return of_parse_phandle_with_args(np, "mboxes", "#mbox-cells",
|
||||
idx, NULL);
|
||||
}
|
||||
|
||||
static int scmi_mbox_chan_setup(struct scmi_info *info, struct device *dev,
|
||||
int prot_id, bool tx)
|
||||
{
|
||||
int ret, idx;
|
||||
struct resource res;
|
||||
resource_size_t size;
|
||||
struct device_node *shmem, *np = dev->of_node;
|
||||
struct scmi_chan_info *cinfo;
|
||||
struct mbox_client *cl;
|
||||
struct idr *idr;
|
||||
const char *desc = tx ? "Tx" : "Rx";
|
||||
|
||||
/* Transmit channel is first entry i.e. index 0 */
|
||||
idx = tx ? 0 : 1;
|
||||
idr = tx ? &info->tx_idr : &info->rx_idr;
|
||||
|
||||
if (scmi_mailbox_check(np, idx)) {
|
||||
cinfo = idr_find(idr, SCMI_PROTOCOL_BASE);
|
||||
if (unlikely(!cinfo)) /* Possible only if platform has no Rx */
|
||||
return -EINVAL;
|
||||
goto idr_alloc;
|
||||
}
|
||||
|
||||
cinfo = devm_kzalloc(info->dev, sizeof(*cinfo), GFP_KERNEL);
|
||||
if (!cinfo)
|
||||
return -ENOMEM;
|
||||
|
||||
cinfo->dev = dev;
|
||||
|
||||
cl = &cinfo->cl;
|
||||
cl->dev = dev;
|
||||
cl->rx_callback = scmi_rx_callback;
|
||||
cl->tx_prepare = tx ? scmi_tx_prepare : NULL;
|
||||
cl->tx_block = false;
|
||||
cl->knows_txdone = tx;
|
||||
|
||||
shmem = of_parse_phandle(np, "shmem", idx);
|
||||
ret = of_address_to_resource(shmem, 0, &res);
|
||||
of_node_put(shmem);
|
||||
if (ret) {
|
||||
dev_err(dev, "failed to get SCMI %s payload memory\n", desc);
|
||||
return ret;
|
||||
}
|
||||
|
||||
size = resource_size(&res);
|
||||
cinfo->payload = devm_ioremap(info->dev, res.start, size);
|
||||
if (!cinfo->payload) {
|
||||
dev_err(dev, "failed to ioremap SCMI %s payload\n", desc);
|
||||
return -EADDRNOTAVAIL;
|
||||
}
|
||||
|
||||
cinfo->chan = mbox_request_channel(cl, idx);
|
||||
if (IS_ERR(cinfo->chan)) {
|
||||
ret = PTR_ERR(cinfo->chan);
|
||||
if (ret != -EPROBE_DEFER)
|
||||
dev_err(dev, "failed to request SCMI %s mailbox\n",
|
||||
desc);
|
||||
return ret;
|
||||
}
|
||||
|
||||
idr_alloc:
|
||||
ret = idr_alloc(idr, cinfo, prot_id, prot_id + 1, GFP_KERNEL);
|
||||
if (ret != prot_id) {
|
||||
dev_err(dev, "unable to allocate SCMI idr slot err %d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
cinfo->handle = &info->handle;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline int
|
||||
scmi_mbox_txrx_setup(struct scmi_info *info, struct device *dev, int prot_id)
|
||||
{
|
||||
int ret = scmi_mbox_chan_setup(info, dev, prot_id, true);
|
||||
|
||||
if (!ret) /* Rx is optional, hence no error check */
|
||||
scmi_mbox_chan_setup(info, dev, prot_id, false);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static inline void
|
||||
scmi_create_protocol_device(struct device_node *np, struct scmi_info *info,
|
||||
int prot_id)
|
||||
{
|
||||
struct scmi_device *sdev;
|
||||
|
||||
sdev = scmi_device_create(np, info->dev, prot_id);
|
||||
if (!sdev) {
|
||||
dev_err(info->dev, "failed to create %d protocol device\n",
|
||||
prot_id);
|
||||
return;
|
||||
}
|
||||
|
||||
if (scmi_mbox_txrx_setup(info, &sdev->dev, prot_id)) {
|
||||
dev_err(&sdev->dev, "failed to setup transport\n");
|
||||
scmi_device_destroy(sdev);
|
||||
return;
|
||||
}
|
||||
|
||||
/* setup handle now as the transport is ready */
|
||||
scmi_set_handle(sdev);
|
||||
}
|
||||
|
||||
static int scmi_probe(struct platform_device *pdev)
|
||||
{
|
||||
int ret;
|
||||
struct scmi_handle *handle;
|
||||
const struct scmi_desc *desc;
|
||||
struct scmi_info *info;
|
||||
struct device *dev = &pdev->dev;
|
||||
struct device_node *child, *np = dev->of_node;
|
||||
|
||||
/* Only mailbox method supported, check for the presence of one */
|
||||
if (scmi_mailbox_check(np, 0)) {
|
||||
dev_err(dev, "no mailbox found in %pOF\n", np);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
desc = of_device_get_match_data(dev);
|
||||
if (!desc)
|
||||
return -EINVAL;
|
||||
|
||||
info = devm_kzalloc(dev, sizeof(*info), GFP_KERNEL);
|
||||
if (!info)
|
||||
return -ENOMEM;
|
||||
|
||||
info->dev = dev;
|
||||
info->desc = desc;
|
||||
INIT_LIST_HEAD(&info->node);
|
||||
|
||||
ret = scmi_xfer_info_init(info);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
platform_set_drvdata(pdev, info);
|
||||
idr_init(&info->tx_idr);
|
||||
idr_init(&info->rx_idr);
|
||||
|
||||
handle = &info->handle;
|
||||
handle->dev = info->dev;
|
||||
handle->version = &info->version;
|
||||
|
||||
ret = scmi_mbox_txrx_setup(info, dev, SCMI_PROTOCOL_BASE);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = scmi_base_protocol_init(handle);
|
||||
if (ret) {
|
||||
dev_err(dev, "unable to communicate with SCMI(%d)\n", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
mutex_lock(&scmi_list_mutex);
|
||||
list_add_tail(&info->node, &scmi_list);
|
||||
mutex_unlock(&scmi_list_mutex);
|
||||
|
||||
for_each_available_child_of_node(np, child) {
|
||||
u32 prot_id;
|
||||
|
||||
if (of_property_read_u32(child, "reg", &prot_id))
|
||||
continue;
|
||||
|
||||
if (!FIELD_FIT(MSG_PROTOCOL_ID_MASK, prot_id))
|
||||
dev_err(dev, "Out of range protocol %d\n", prot_id);
|
||||
|
||||
if (!scmi_is_protocol_implemented(handle, prot_id)) {
|
||||
dev_err(dev, "SCMI protocol %d not implemented\n",
|
||||
prot_id);
|
||||
continue;
|
||||
}
|
||||
|
||||
scmi_create_protocol_device(child, info, prot_id);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int scmi_mbox_free_channel(int id, void *p, void *data)
|
||||
@@ -692,167 +933,26 @@ static int scmi_remove(struct platform_device *pdev)
|
||||
ret = idr_for_each(idr, scmi_mbox_free_channel, idr);
|
||||
idr_destroy(&info->tx_idr);
|
||||
|
||||
idr = &info->rx_idr;
|
||||
ret = idr_for_each(idr, scmi_mbox_free_channel, idr);
|
||||
idr_destroy(&info->rx_idr);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static inline int
|
||||
scmi_mbox_chan_setup(struct scmi_info *info, struct device *dev, int prot_id)
|
||||
{
|
||||
int ret;
|
||||
struct resource res;
|
||||
resource_size_t size;
|
||||
struct device_node *shmem, *np = dev->of_node;
|
||||
struct scmi_chan_info *cinfo;
|
||||
struct mbox_client *cl;
|
||||
static const struct scmi_desc scmi_generic_desc = {
|
||||
.max_rx_timeout_ms = 30, /* We may increase this if required */
|
||||
.max_msg = 20, /* Limited by MBOX_TX_QUEUE_LEN */
|
||||
.max_msg_size = 128,
|
||||
};
|
||||
|
||||
if (scmi_mailbox_check(np)) {
|
||||
cinfo = idr_find(&info->tx_idr, SCMI_PROTOCOL_BASE);
|
||||
goto idr_alloc;
|
||||
}
|
||||
/* Each compatible listed below must have descriptor associated with it */
|
||||
static const struct of_device_id scmi_of_match[] = {
|
||||
{ .compatible = "arm,scmi", .data = &scmi_generic_desc },
|
||||
{ /* Sentinel */ },
|
||||
};
|
||||
|
||||
cinfo = devm_kzalloc(info->dev, sizeof(*cinfo), GFP_KERNEL);
|
||||
if (!cinfo)
|
||||
return -ENOMEM;
|
||||
|
||||
cinfo->dev = dev;
|
||||
|
||||
cl = &cinfo->cl;
|
||||
cl->dev = dev;
|
||||
cl->rx_callback = scmi_rx_callback;
|
||||
cl->tx_prepare = scmi_tx_prepare;
|
||||
cl->tx_block = false;
|
||||
cl->knows_txdone = true;
|
||||
|
||||
shmem = of_parse_phandle(np, "shmem", 0);
|
||||
ret = of_address_to_resource(shmem, 0, &res);
|
||||
of_node_put(shmem);
|
||||
if (ret) {
|
||||
dev_err(dev, "failed to get SCMI Tx payload mem resource\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
size = resource_size(&res);
|
||||
cinfo->payload = devm_ioremap(info->dev, res.start, size);
|
||||
if (!cinfo->payload) {
|
||||
dev_err(dev, "failed to ioremap SCMI Tx payload\n");
|
||||
return -EADDRNOTAVAIL;
|
||||
}
|
||||
|
||||
/* Transmit channel is first entry i.e. index 0 */
|
||||
cinfo->chan = mbox_request_channel(cl, 0);
|
||||
if (IS_ERR(cinfo->chan)) {
|
||||
ret = PTR_ERR(cinfo->chan);
|
||||
if (ret != -EPROBE_DEFER)
|
||||
dev_err(dev, "failed to request SCMI Tx mailbox\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
idr_alloc:
|
||||
ret = idr_alloc(&info->tx_idr, cinfo, prot_id, prot_id + 1, GFP_KERNEL);
|
||||
if (ret != prot_id) {
|
||||
dev_err(dev, "unable to allocate SCMI idr slot err %d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
cinfo->handle = &info->handle;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline void
|
||||
scmi_create_protocol_device(struct device_node *np, struct scmi_info *info,
|
||||
int prot_id)
|
||||
{
|
||||
struct scmi_device *sdev;
|
||||
|
||||
sdev = scmi_device_create(np, info->dev, prot_id);
|
||||
if (!sdev) {
|
||||
dev_err(info->dev, "failed to create %d protocol device\n",
|
||||
prot_id);
|
||||
return;
|
||||
}
|
||||
|
||||
if (scmi_mbox_chan_setup(info, &sdev->dev, prot_id)) {
|
||||
dev_err(&sdev->dev, "failed to setup transport\n");
|
||||
scmi_device_destroy(sdev);
|
||||
return;
|
||||
}
|
||||
|
||||
/* setup handle now as the transport is ready */
|
||||
scmi_set_handle(sdev);
|
||||
}
|
||||
|
||||
static int scmi_probe(struct platform_device *pdev)
|
||||
{
|
||||
int ret;
|
||||
struct scmi_handle *handle;
|
||||
const struct scmi_desc *desc;
|
||||
struct scmi_info *info;
|
||||
struct device *dev = &pdev->dev;
|
||||
struct device_node *child, *np = dev->of_node;
|
||||
|
||||
/* Only mailbox method supported, check for the presence of one */
|
||||
if (scmi_mailbox_check(np)) {
|
||||
dev_err(dev, "no mailbox found in %pOF\n", np);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
desc = of_device_get_match_data(dev);
|
||||
if (!desc)
|
||||
return -EINVAL;
|
||||
|
||||
info = devm_kzalloc(dev, sizeof(*info), GFP_KERNEL);
|
||||
if (!info)
|
||||
return -ENOMEM;
|
||||
|
||||
info->dev = dev;
|
||||
info->desc = desc;
|
||||
INIT_LIST_HEAD(&info->node);
|
||||
|
||||
ret = scmi_xfer_info_init(info);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
platform_set_drvdata(pdev, info);
|
||||
idr_init(&info->tx_idr);
|
||||
|
||||
handle = &info->handle;
|
||||
handle->dev = info->dev;
|
||||
handle->version = &info->version;
|
||||
|
||||
ret = scmi_mbox_chan_setup(info, dev, SCMI_PROTOCOL_BASE);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = scmi_base_protocol_init(handle);
|
||||
if (ret) {
|
||||
dev_err(dev, "unable to communicate with SCMI(%d)\n", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
mutex_lock(&scmi_list_mutex);
|
||||
list_add_tail(&info->node, &scmi_list);
|
||||
mutex_unlock(&scmi_list_mutex);
|
||||
|
||||
for_each_available_child_of_node(np, child) {
|
||||
u32 prot_id;
|
||||
|
||||
if (of_property_read_u32(child, "reg", &prot_id))
|
||||
continue;
|
||||
|
||||
if (!FIELD_FIT(MSG_PROTOCOL_ID_MASK, prot_id))
|
||||
dev_err(dev, "Out of range protocol %d\n", prot_id);
|
||||
|
||||
if (!scmi_is_protocol_implemented(handle, prot_id)) {
|
||||
dev_err(dev, "SCMI protocol %d not implemented\n",
|
||||
prot_id);
|
||||
continue;
|
||||
}
|
||||
|
||||
scmi_create_protocol_device(child, info, prot_id);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
MODULE_DEVICE_TABLE(of, scmi_of_match);
|
||||
|
||||
static struct platform_driver scmi_driver = {
|
||||
.driver = {
|
||||
|
@@ -5,7 +5,10 @@
|
||||
* Copyright (C) 2018 ARM Ltd.
|
||||
*/
|
||||
|
||||
#include <linux/bits.h>
|
||||
#include <linux/of.h>
|
||||
#include <linux/io.h>
|
||||
#include <linux/io-64-nonatomic-hi-lo.h>
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/pm_opp.h>
|
||||
#include <linux/sort.h>
|
||||
@@ -21,6 +24,7 @@ enum scmi_performance_protocol_cmd {
|
||||
PERF_LEVEL_GET = 0x8,
|
||||
PERF_NOTIFY_LIMITS = 0x9,
|
||||
PERF_NOTIFY_LEVEL = 0xa,
|
||||
PERF_DESCRIBE_FASTCHANNEL = 0xb,
|
||||
};
|
||||
|
||||
struct scmi_opp {
|
||||
@@ -44,6 +48,7 @@ struct scmi_msg_resp_perf_domain_attributes {
|
||||
#define SUPPORTS_SET_PERF_LVL(x) ((x) & BIT(30))
|
||||
#define SUPPORTS_PERF_LIMIT_NOTIFY(x) ((x) & BIT(29))
|
||||
#define SUPPORTS_PERF_LEVEL_NOTIFY(x) ((x) & BIT(28))
|
||||
#define SUPPORTS_PERF_FASTCHANNELS(x) ((x) & BIT(27))
|
||||
__le32 rate_limit_us;
|
||||
__le32 sustained_freq_khz;
|
||||
__le32 sustained_perf_level;
|
||||
@@ -87,17 +92,56 @@ struct scmi_msg_resp_perf_describe_levels {
|
||||
} opp[0];
|
||||
};
|
||||
|
||||
struct scmi_perf_get_fc_info {
|
||||
__le32 domain;
|
||||
__le32 message_id;
|
||||
};
|
||||
|
||||
struct scmi_msg_resp_perf_desc_fc {
|
||||
__le32 attr;
|
||||
#define SUPPORTS_DOORBELL(x) ((x) & BIT(0))
|
||||
#define DOORBELL_REG_WIDTH(x) FIELD_GET(GENMASK(2, 1), (x))
|
||||
__le32 rate_limit;
|
||||
__le32 chan_addr_low;
|
||||
__le32 chan_addr_high;
|
||||
__le32 chan_size;
|
||||
__le32 db_addr_low;
|
||||
__le32 db_addr_high;
|
||||
__le32 db_set_lmask;
|
||||
__le32 db_set_hmask;
|
||||
__le32 db_preserve_lmask;
|
||||
__le32 db_preserve_hmask;
|
||||
};
|
||||
|
||||
struct scmi_fc_db_info {
|
||||
int width;
|
||||
u64 set;
|
||||
u64 mask;
|
||||
void __iomem *addr;
|
||||
};
|
||||
|
||||
struct scmi_fc_info {
|
||||
void __iomem *level_set_addr;
|
||||
void __iomem *limit_set_addr;
|
||||
void __iomem *level_get_addr;
|
||||
void __iomem *limit_get_addr;
|
||||
struct scmi_fc_db_info *level_set_db;
|
||||
struct scmi_fc_db_info *limit_set_db;
|
||||
};
|
||||
|
||||
struct perf_dom_info {
|
||||
bool set_limits;
|
||||
bool set_perf;
|
||||
bool perf_limit_notify;
|
||||
bool perf_level_notify;
|
||||
bool perf_fastchannels;
|
||||
u32 opp_count;
|
||||
u32 sustained_freq_khz;
|
||||
u32 sustained_perf_level;
|
||||
u32 mult_factor;
|
||||
char name[SCMI_MAX_STR_SIZE];
|
||||
struct scmi_opp opp[MAX_OPPS];
|
||||
struct scmi_fc_info *fc_info;
|
||||
};
|
||||
|
||||
struct scmi_perf_info {
|
||||
@@ -151,7 +195,7 @@ scmi_perf_domain_attributes_get(const struct scmi_handle *handle, u32 domain,
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
*(__le32 *)t->tx.buf = cpu_to_le32(domain);
|
||||
put_unaligned_le32(domain, t->tx.buf);
|
||||
attr = t->rx.buf;
|
||||
|
||||
ret = scmi_do_xfer(handle, t);
|
||||
@@ -162,6 +206,7 @@ scmi_perf_domain_attributes_get(const struct scmi_handle *handle, u32 domain,
|
||||
dom_info->set_perf = SUPPORTS_SET_PERF_LVL(flags);
|
||||
dom_info->perf_limit_notify = SUPPORTS_PERF_LIMIT_NOTIFY(flags);
|
||||
dom_info->perf_level_notify = SUPPORTS_PERF_LEVEL_NOTIFY(flags);
|
||||
dom_info->perf_fastchannels = SUPPORTS_PERF_FASTCHANNELS(flags);
|
||||
dom_info->sustained_freq_khz =
|
||||
le32_to_cpu(attr->sustained_freq_khz);
|
||||
dom_info->sustained_perf_level =
|
||||
@@ -249,8 +294,42 @@ scmi_perf_describe_levels_get(const struct scmi_handle *handle, u32 domain,
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int scmi_perf_limits_set(const struct scmi_handle *handle, u32 domain,
|
||||
u32 max_perf, u32 min_perf)
|
||||
#define SCMI_PERF_FC_RING_DB(w) \
|
||||
do { \
|
||||
u##w val = 0; \
|
||||
\
|
||||
if (db->mask) \
|
||||
val = ioread##w(db->addr) & db->mask; \
|
||||
iowrite##w((u##w)db->set | val, db->addr); \
|
||||
} while (0)
|
||||
|
||||
static void scmi_perf_fc_ring_db(struct scmi_fc_db_info *db)
|
||||
{
|
||||
if (!db || !db->addr)
|
||||
return;
|
||||
|
||||
if (db->width == 1)
|
||||
SCMI_PERF_FC_RING_DB(8);
|
||||
else if (db->width == 2)
|
||||
SCMI_PERF_FC_RING_DB(16);
|
||||
else if (db->width == 4)
|
||||
SCMI_PERF_FC_RING_DB(32);
|
||||
else /* db->width == 8 */
|
||||
#ifdef CONFIG_64BIT
|
||||
SCMI_PERF_FC_RING_DB(64);
|
||||
#else
|
||||
{
|
||||
u64 val = 0;
|
||||
|
||||
if (db->mask)
|
||||
val = ioread64_hi_lo(db->addr) & db->mask;
|
||||
iowrite64_hi_lo(db->set, db->addr);
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
static int scmi_perf_mb_limits_set(const struct scmi_handle *handle, u32 domain,
|
||||
u32 max_perf, u32 min_perf)
|
||||
{
|
||||
int ret;
|
||||
struct scmi_xfer *t;
|
||||
@@ -272,8 +351,24 @@ static int scmi_perf_limits_set(const struct scmi_handle *handle, u32 domain,
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int scmi_perf_limits_get(const struct scmi_handle *handle, u32 domain,
|
||||
u32 *max_perf, u32 *min_perf)
|
||||
static int scmi_perf_limits_set(const struct scmi_handle *handle, u32 domain,
|
||||
u32 max_perf, u32 min_perf)
|
||||
{
|
||||
struct scmi_perf_info *pi = handle->perf_priv;
|
||||
struct perf_dom_info *dom = pi->dom_info + domain;
|
||||
|
||||
if (dom->fc_info && dom->fc_info->limit_set_addr) {
|
||||
iowrite32(max_perf, dom->fc_info->limit_set_addr);
|
||||
iowrite32(min_perf, dom->fc_info->limit_set_addr + 4);
|
||||
scmi_perf_fc_ring_db(dom->fc_info->limit_set_db);
|
||||
return 0;
|
||||
}
|
||||
|
||||
return scmi_perf_mb_limits_set(handle, domain, max_perf, min_perf);
|
||||
}
|
||||
|
||||
static int scmi_perf_mb_limits_get(const struct scmi_handle *handle, u32 domain,
|
||||
u32 *max_perf, u32 *min_perf)
|
||||
{
|
||||
int ret;
|
||||
struct scmi_xfer *t;
|
||||
@@ -284,7 +379,7 @@ static int scmi_perf_limits_get(const struct scmi_handle *handle, u32 domain,
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
*(__le32 *)t->tx.buf = cpu_to_le32(domain);
|
||||
put_unaligned_le32(domain, t->tx.buf);
|
||||
|
||||
ret = scmi_do_xfer(handle, t);
|
||||
if (!ret) {
|
||||
@@ -298,8 +393,23 @@ static int scmi_perf_limits_get(const struct scmi_handle *handle, u32 domain,
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int scmi_perf_level_set(const struct scmi_handle *handle, u32 domain,
|
||||
u32 level, bool poll)
|
||||
static int scmi_perf_limits_get(const struct scmi_handle *handle, u32 domain,
|
||||
u32 *max_perf, u32 *min_perf)
|
||||
{
|
||||
struct scmi_perf_info *pi = handle->perf_priv;
|
||||
struct perf_dom_info *dom = pi->dom_info + domain;
|
||||
|
||||
if (dom->fc_info && dom->fc_info->limit_get_addr) {
|
||||
*max_perf = ioread32(dom->fc_info->limit_get_addr);
|
||||
*min_perf = ioread32(dom->fc_info->limit_get_addr + 4);
|
||||
return 0;
|
||||
}
|
||||
|
||||
return scmi_perf_mb_limits_get(handle, domain, max_perf, min_perf);
|
||||
}
|
||||
|
||||
static int scmi_perf_mb_level_set(const struct scmi_handle *handle, u32 domain,
|
||||
u32 level, bool poll)
|
||||
{
|
||||
int ret;
|
||||
struct scmi_xfer *t;
|
||||
@@ -321,8 +431,23 @@ static int scmi_perf_level_set(const struct scmi_handle *handle, u32 domain,
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int scmi_perf_level_get(const struct scmi_handle *handle, u32 domain,
|
||||
u32 *level, bool poll)
|
||||
static int scmi_perf_level_set(const struct scmi_handle *handle, u32 domain,
|
||||
u32 level, bool poll)
|
||||
{
|
||||
struct scmi_perf_info *pi = handle->perf_priv;
|
||||
struct perf_dom_info *dom = pi->dom_info + domain;
|
||||
|
||||
if (dom->fc_info && dom->fc_info->level_set_addr) {
|
||||
iowrite32(level, dom->fc_info->level_set_addr);
|
||||
scmi_perf_fc_ring_db(dom->fc_info->level_set_db);
|
||||
return 0;
|
||||
}
|
||||
|
||||
return scmi_perf_mb_level_set(handle, domain, level, poll);
|
||||
}
|
||||
|
||||
static int scmi_perf_mb_level_get(const struct scmi_handle *handle, u32 domain,
|
||||
u32 *level, bool poll)
|
||||
{
|
||||
int ret;
|
||||
struct scmi_xfer *t;
|
||||
@@ -333,16 +458,128 @@ static int scmi_perf_level_get(const struct scmi_handle *handle, u32 domain,
|
||||
return ret;
|
||||
|
||||
t->hdr.poll_completion = poll;
|
||||
*(__le32 *)t->tx.buf = cpu_to_le32(domain);
|
||||
put_unaligned_le32(domain, t->tx.buf);
|
||||
|
||||
ret = scmi_do_xfer(handle, t);
|
||||
if (!ret)
|
||||
*level = le32_to_cpu(*(__le32 *)t->rx.buf);
|
||||
*level = get_unaligned_le32(t->rx.buf);
|
||||
|
||||
scmi_xfer_put(handle, t);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int scmi_perf_level_get(const struct scmi_handle *handle, u32 domain,
|
||||
u32 *level, bool poll)
|
||||
{
|
||||
struct scmi_perf_info *pi = handle->perf_priv;
|
||||
struct perf_dom_info *dom = pi->dom_info + domain;
|
||||
|
||||
if (dom->fc_info && dom->fc_info->level_get_addr) {
|
||||
*level = ioread32(dom->fc_info->level_get_addr);
|
||||
return 0;
|
||||
}
|
||||
|
||||
return scmi_perf_mb_level_get(handle, domain, level, poll);
|
||||
}
|
||||
|
||||
static bool scmi_perf_fc_size_is_valid(u32 msg, u32 size)
|
||||
{
|
||||
if ((msg == PERF_LEVEL_GET || msg == PERF_LEVEL_SET) && size == 4)
|
||||
return true;
|
||||
if ((msg == PERF_LIMITS_GET || msg == PERF_LIMITS_SET) && size == 8)
|
||||
return true;
|
||||
return false;
|
||||
}
|
||||
|
||||
static void
|
||||
scmi_perf_domain_desc_fc(const struct scmi_handle *handle, u32 domain,
|
||||
u32 message_id, void __iomem **p_addr,
|
||||
struct scmi_fc_db_info **p_db)
|
||||
{
|
||||
int ret;
|
||||
u32 flags;
|
||||
u64 phys_addr;
|
||||
u8 size;
|
||||
void __iomem *addr;
|
||||
struct scmi_xfer *t;
|
||||
struct scmi_fc_db_info *db;
|
||||
struct scmi_perf_get_fc_info *info;
|
||||
struct scmi_msg_resp_perf_desc_fc *resp;
|
||||
|
||||
if (!p_addr)
|
||||
return;
|
||||
|
||||
ret = scmi_xfer_get_init(handle, PERF_DESCRIBE_FASTCHANNEL,
|
||||
SCMI_PROTOCOL_PERF,
|
||||
sizeof(*info), sizeof(*resp), &t);
|
||||
if (ret)
|
||||
return;
|
||||
|
||||
info = t->tx.buf;
|
||||
info->domain = cpu_to_le32(domain);
|
||||
info->message_id = cpu_to_le32(message_id);
|
||||
|
||||
ret = scmi_do_xfer(handle, t);
|
||||
if (ret)
|
||||
goto err_xfer;
|
||||
|
||||
resp = t->rx.buf;
|
||||
flags = le32_to_cpu(resp->attr);
|
||||
size = le32_to_cpu(resp->chan_size);
|
||||
if (!scmi_perf_fc_size_is_valid(message_id, size))
|
||||
goto err_xfer;
|
||||
|
||||
phys_addr = le32_to_cpu(resp->chan_addr_low);
|
||||
phys_addr |= (u64)le32_to_cpu(resp->chan_addr_high) << 32;
|
||||
addr = devm_ioremap(handle->dev, phys_addr, size);
|
||||
if (!addr)
|
||||
goto err_xfer;
|
||||
*p_addr = addr;
|
||||
|
||||
if (p_db && SUPPORTS_DOORBELL(flags)) {
|
||||
db = devm_kzalloc(handle->dev, sizeof(*db), GFP_KERNEL);
|
||||
if (!db)
|
||||
goto err_xfer;
|
||||
|
||||
size = 1 << DOORBELL_REG_WIDTH(flags);
|
||||
phys_addr = le32_to_cpu(resp->db_addr_low);
|
||||
phys_addr |= (u64)le32_to_cpu(resp->db_addr_high) << 32;
|
||||
addr = devm_ioremap(handle->dev, phys_addr, size);
|
||||
if (!addr)
|
||||
goto err_xfer;
|
||||
|
||||
db->addr = addr;
|
||||
db->width = size;
|
||||
db->set = le32_to_cpu(resp->db_set_lmask);
|
||||
db->set |= (u64)le32_to_cpu(resp->db_set_hmask) << 32;
|
||||
db->mask = le32_to_cpu(resp->db_preserve_lmask);
|
||||
db->mask |= (u64)le32_to_cpu(resp->db_preserve_hmask) << 32;
|
||||
*p_db = db;
|
||||
}
|
||||
err_xfer:
|
||||
scmi_xfer_put(handle, t);
|
||||
}
|
||||
|
||||
static void scmi_perf_domain_init_fc(const struct scmi_handle *handle,
|
||||
u32 domain, struct scmi_fc_info **p_fc)
|
||||
{
|
||||
struct scmi_fc_info *fc;
|
||||
|
||||
fc = devm_kzalloc(handle->dev, sizeof(*fc), GFP_KERNEL);
|
||||
if (!fc)
|
||||
return;
|
||||
|
||||
scmi_perf_domain_desc_fc(handle, domain, PERF_LEVEL_SET,
|
||||
&fc->level_set_addr, &fc->level_set_db);
|
||||
scmi_perf_domain_desc_fc(handle, domain, PERF_LEVEL_GET,
|
||||
&fc->level_get_addr, NULL);
|
||||
scmi_perf_domain_desc_fc(handle, domain, PERF_LIMITS_SET,
|
||||
&fc->limit_set_addr, &fc->limit_set_db);
|
||||
scmi_perf_domain_desc_fc(handle, domain, PERF_LIMITS_GET,
|
||||
&fc->limit_get_addr, NULL);
|
||||
*p_fc = fc;
|
||||
}
|
||||
|
||||
/* Device specific ops */
|
||||
static int scmi_dev_domain_id(struct device *dev)
|
||||
{
|
||||
@@ -494,6 +731,9 @@ static int scmi_perf_protocol_init(struct scmi_handle *handle)
|
||||
|
||||
scmi_perf_domain_attributes_get(handle, domain, dom);
|
||||
scmi_perf_describe_levels_get(handle, domain, dom);
|
||||
|
||||
if (dom->perf_fastchannels)
|
||||
scmi_perf_domain_init_fc(handle, domain, &dom->fc_info);
|
||||
}
|
||||
|
||||
handle->perf_ops = &perf_ops;
|
||||
|
@@ -96,7 +96,7 @@ scmi_power_domain_attributes_get(const struct scmi_handle *handle, u32 domain,
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
*(__le32 *)t->tx.buf = cpu_to_le32(domain);
|
||||
put_unaligned_le32(domain, t->tx.buf);
|
||||
attr = t->rx.buf;
|
||||
|
||||
ret = scmi_do_xfer(handle, t);
|
||||
@@ -147,11 +147,11 @@ scmi_power_state_get(const struct scmi_handle *handle, u32 domain, u32 *state)
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
*(__le32 *)t->tx.buf = cpu_to_le32(domain);
|
||||
put_unaligned_le32(domain, t->tx.buf);
|
||||
|
||||
ret = scmi_do_xfer(handle, t);
|
||||
if (!ret)
|
||||
*state = le32_to_cpu(*(__le32 *)t->rx.buf);
|
||||
*state = get_unaligned_le32(t->rx.buf);
|
||||
|
||||
scmi_xfer_put(handle, t);
|
||||
return ret;
|
||||
|
231
drivers/firmware/arm_scmi/reset.c
Normal file
231
drivers/firmware/arm_scmi/reset.c
Normal file
@@ -0,0 +1,231 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
/*
|
||||
* System Control and Management Interface (SCMI) Reset Protocol
|
||||
*
|
||||
* Copyright (C) 2019 ARM Ltd.
|
||||
*/
|
||||
|
||||
#include "common.h"
|
||||
|
||||
enum scmi_reset_protocol_cmd {
|
||||
RESET_DOMAIN_ATTRIBUTES = 0x3,
|
||||
RESET = 0x4,
|
||||
RESET_NOTIFY = 0x5,
|
||||
};
|
||||
|
||||
enum scmi_reset_protocol_notify {
|
||||
RESET_ISSUED = 0x0,
|
||||
};
|
||||
|
||||
#define NUM_RESET_DOMAIN_MASK 0xffff
|
||||
#define RESET_NOTIFY_ENABLE BIT(0)
|
||||
|
||||
struct scmi_msg_resp_reset_domain_attributes {
|
||||
__le32 attributes;
|
||||
#define SUPPORTS_ASYNC_RESET(x) ((x) & BIT(31))
|
||||
#define SUPPORTS_NOTIFY_RESET(x) ((x) & BIT(30))
|
||||
__le32 latency;
|
||||
u8 name[SCMI_MAX_STR_SIZE];
|
||||
};
|
||||
|
||||
struct scmi_msg_reset_domain_reset {
|
||||
__le32 domain_id;
|
||||
__le32 flags;
|
||||
#define AUTONOMOUS_RESET BIT(0)
|
||||
#define EXPLICIT_RESET_ASSERT BIT(1)
|
||||
#define ASYNCHRONOUS_RESET BIT(2)
|
||||
__le32 reset_state;
|
||||
#define ARCH_RESET_TYPE BIT(31)
|
||||
#define COLD_RESET_STATE BIT(0)
|
||||
#define ARCH_COLD_RESET (ARCH_RESET_TYPE | COLD_RESET_STATE)
|
||||
};
|
||||
|
||||
struct reset_dom_info {
|
||||
bool async_reset;
|
||||
bool reset_notify;
|
||||
u32 latency_us;
|
||||
char name[SCMI_MAX_STR_SIZE];
|
||||
};
|
||||
|
||||
struct scmi_reset_info {
|
||||
int num_domains;
|
||||
struct reset_dom_info *dom_info;
|
||||
};
|
||||
|
||||
static int scmi_reset_attributes_get(const struct scmi_handle *handle,
|
||||
struct scmi_reset_info *pi)
|
||||
{
|
||||
int ret;
|
||||
struct scmi_xfer *t;
|
||||
u32 attr;
|
||||
|
||||
ret = scmi_xfer_get_init(handle, PROTOCOL_ATTRIBUTES,
|
||||
SCMI_PROTOCOL_RESET, 0, sizeof(attr), &t);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = scmi_do_xfer(handle, t);
|
||||
if (!ret) {
|
||||
attr = get_unaligned_le32(t->rx.buf);
|
||||
pi->num_domains = attr & NUM_RESET_DOMAIN_MASK;
|
||||
}
|
||||
|
||||
scmi_xfer_put(handle, t);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int
|
||||
scmi_reset_domain_attributes_get(const struct scmi_handle *handle, u32 domain,
|
||||
struct reset_dom_info *dom_info)
|
||||
{
|
||||
int ret;
|
||||
struct scmi_xfer *t;
|
||||
struct scmi_msg_resp_reset_domain_attributes *attr;
|
||||
|
||||
ret = scmi_xfer_get_init(handle, RESET_DOMAIN_ATTRIBUTES,
|
||||
SCMI_PROTOCOL_RESET, sizeof(domain),
|
||||
sizeof(*attr), &t);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
put_unaligned_le32(domain, t->tx.buf);
|
||||
attr = t->rx.buf;
|
||||
|
||||
ret = scmi_do_xfer(handle, t);
|
||||
if (!ret) {
|
||||
u32 attributes = le32_to_cpu(attr->attributes);
|
||||
|
||||
dom_info->async_reset = SUPPORTS_ASYNC_RESET(attributes);
|
||||
dom_info->reset_notify = SUPPORTS_NOTIFY_RESET(attributes);
|
||||
dom_info->latency_us = le32_to_cpu(attr->latency);
|
||||
if (dom_info->latency_us == U32_MAX)
|
||||
dom_info->latency_us = 0;
|
||||
strlcpy(dom_info->name, attr->name, SCMI_MAX_STR_SIZE);
|
||||
}
|
||||
|
||||
scmi_xfer_put(handle, t);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int scmi_reset_num_domains_get(const struct scmi_handle *handle)
|
||||
{
|
||||
struct scmi_reset_info *pi = handle->reset_priv;
|
||||
|
||||
return pi->num_domains;
|
||||
}
|
||||
|
||||
static char *scmi_reset_name_get(const struct scmi_handle *handle, u32 domain)
|
||||
{
|
||||
struct scmi_reset_info *pi = handle->reset_priv;
|
||||
struct reset_dom_info *dom = pi->dom_info + domain;
|
||||
|
||||
return dom->name;
|
||||
}
|
||||
|
||||
static int scmi_reset_latency_get(const struct scmi_handle *handle, u32 domain)
|
||||
{
|
||||
struct scmi_reset_info *pi = handle->reset_priv;
|
||||
struct reset_dom_info *dom = pi->dom_info + domain;
|
||||
|
||||
return dom->latency_us;
|
||||
}
|
||||
|
||||
static int scmi_domain_reset(const struct scmi_handle *handle, u32 domain,
|
||||
u32 flags, u32 state)
|
||||
{
|
||||
int ret;
|
||||
struct scmi_xfer *t;
|
||||
struct scmi_msg_reset_domain_reset *dom;
|
||||
struct scmi_reset_info *pi = handle->reset_priv;
|
||||
struct reset_dom_info *rdom = pi->dom_info + domain;
|
||||
|
||||
if (rdom->async_reset)
|
||||
flags |= ASYNCHRONOUS_RESET;
|
||||
|
||||
ret = scmi_xfer_get_init(handle, RESET, SCMI_PROTOCOL_RESET,
|
||||
sizeof(*dom), 0, &t);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
dom = t->tx.buf;
|
||||
dom->domain_id = cpu_to_le32(domain);
|
||||
dom->flags = cpu_to_le32(flags);
|
||||
dom->domain_id = cpu_to_le32(state);
|
||||
|
||||
if (rdom->async_reset)
|
||||
ret = scmi_do_xfer_with_response(handle, t);
|
||||
else
|
||||
ret = scmi_do_xfer(handle, t);
|
||||
|
||||
scmi_xfer_put(handle, t);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int scmi_reset_domain_reset(const struct scmi_handle *handle, u32 domain)
|
||||
{
|
||||
return scmi_domain_reset(handle, domain, AUTONOMOUS_RESET,
|
||||
ARCH_COLD_RESET);
|
||||
}
|
||||
|
||||
static int
|
||||
scmi_reset_domain_assert(const struct scmi_handle *handle, u32 domain)
|
||||
{
|
||||
return scmi_domain_reset(handle, domain, EXPLICIT_RESET_ASSERT,
|
||||
ARCH_COLD_RESET);
|
||||
}
|
||||
|
||||
static int
|
||||
scmi_reset_domain_deassert(const struct scmi_handle *handle, u32 domain)
|
||||
{
|
||||
return scmi_domain_reset(handle, domain, 0, ARCH_COLD_RESET);
|
||||
}
|
||||
|
||||
static struct scmi_reset_ops reset_ops = {
|
||||
.num_domains_get = scmi_reset_num_domains_get,
|
||||
.name_get = scmi_reset_name_get,
|
||||
.latency_get = scmi_reset_latency_get,
|
||||
.reset = scmi_reset_domain_reset,
|
||||
.assert = scmi_reset_domain_assert,
|
||||
.deassert = scmi_reset_domain_deassert,
|
||||
};
|
||||
|
||||
static int scmi_reset_protocol_init(struct scmi_handle *handle)
|
||||
{
|
||||
int domain;
|
||||
u32 version;
|
||||
struct scmi_reset_info *pinfo;
|
||||
|
||||
scmi_version_get(handle, SCMI_PROTOCOL_RESET, &version);
|
||||
|
||||
dev_dbg(handle->dev, "Reset Version %d.%d\n",
|
||||
PROTOCOL_REV_MAJOR(version), PROTOCOL_REV_MINOR(version));
|
||||
|
||||
pinfo = devm_kzalloc(handle->dev, sizeof(*pinfo), GFP_KERNEL);
|
||||
if (!pinfo)
|
||||
return -ENOMEM;
|
||||
|
||||
scmi_reset_attributes_get(handle, pinfo);
|
||||
|
||||
pinfo->dom_info = devm_kcalloc(handle->dev, pinfo->num_domains,
|
||||
sizeof(*pinfo->dom_info), GFP_KERNEL);
|
||||
if (!pinfo->dom_info)
|
||||
return -ENOMEM;
|
||||
|
||||
for (domain = 0; domain < pinfo->num_domains; domain++) {
|
||||
struct reset_dom_info *dom = pinfo->dom_info + domain;
|
||||
|
||||
scmi_reset_domain_attributes_get(handle, domain, dom);
|
||||
}
|
||||
|
||||
handle->reset_ops = &reset_ops;
|
||||
handle->reset_priv = pinfo;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int __init scmi_reset_init(void)
|
||||
{
|
||||
return scmi_protocol_register(SCMI_PROTOCOL_RESET,
|
||||
&scmi_reset_protocol_init);
|
||||
}
|
||||
subsys_initcall(scmi_reset_init);
|
@@ -9,8 +9,8 @@
|
||||
|
||||
enum scmi_sensor_protocol_cmd {
|
||||
SENSOR_DESCRIPTION_GET = 0x3,
|
||||
SENSOR_CONFIG_SET = 0x4,
|
||||
SENSOR_TRIP_POINT_SET = 0x5,
|
||||
SENSOR_TRIP_POINT_NOTIFY = 0x4,
|
||||
SENSOR_TRIP_POINT_CONFIG = 0x5,
|
||||
SENSOR_READING_GET = 0x6,
|
||||
};
|
||||
|
||||
@@ -42,9 +42,10 @@ struct scmi_msg_resp_sensor_description {
|
||||
} desc[0];
|
||||
};
|
||||
|
||||
struct scmi_msg_set_sensor_config {
|
||||
struct scmi_msg_sensor_trip_point_notify {
|
||||
__le32 id;
|
||||
__le32 event_control;
|
||||
#define SENSOR_TP_NOTIFY_ALL BIT(0)
|
||||
};
|
||||
|
||||
struct scmi_msg_set_sensor_trip_point {
|
||||
@@ -119,7 +120,7 @@ static int scmi_sensor_description_get(const struct scmi_handle *handle,
|
||||
|
||||
do {
|
||||
/* Set the number of sensors to be skipped/already read */
|
||||
*(__le32 *)t->tx.buf = cpu_to_le32(desc_index);
|
||||
put_unaligned_le32(desc_index, t->tx.buf);
|
||||
|
||||
ret = scmi_do_xfer(handle, t);
|
||||
if (ret)
|
||||
@@ -135,9 +136,10 @@ static int scmi_sensor_description_get(const struct scmi_handle *handle,
|
||||
}
|
||||
|
||||
for (cnt = 0; cnt < num_returned; cnt++) {
|
||||
u32 attrh;
|
||||
u32 attrh, attrl;
|
||||
struct scmi_sensor_info *s;
|
||||
|
||||
attrl = le32_to_cpu(buf->desc[cnt].attributes_low);
|
||||
attrh = le32_to_cpu(buf->desc[cnt].attributes_high);
|
||||
s = &si->sensors[desc_index + cnt];
|
||||
s->id = le32_to_cpu(buf->desc[cnt].id);
|
||||
@@ -146,6 +148,8 @@ static int scmi_sensor_description_get(const struct scmi_handle *handle,
|
||||
/* Sign extend to a full s8 */
|
||||
if (s->scale & SENSOR_SCALE_SIGN)
|
||||
s->scale |= SENSOR_SCALE_EXTEND;
|
||||
s->async = SUPPORTS_ASYNC_READ(attrl);
|
||||
s->num_trip_points = NUM_TRIP_POINTS(attrl);
|
||||
strlcpy(s->name, buf->desc[cnt].name, SCMI_MAX_STR_SIZE);
|
||||
}
|
||||
|
||||
@@ -160,15 +164,15 @@ static int scmi_sensor_description_get(const struct scmi_handle *handle,
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int
|
||||
scmi_sensor_configuration_set(const struct scmi_handle *handle, u32 sensor_id)
|
||||
static int scmi_sensor_trip_point_notify(const struct scmi_handle *handle,
|
||||
u32 sensor_id, bool enable)
|
||||
{
|
||||
int ret;
|
||||
u32 evt_cntl = BIT(0);
|
||||
u32 evt_cntl = enable ? SENSOR_TP_NOTIFY_ALL : 0;
|
||||
struct scmi_xfer *t;
|
||||
struct scmi_msg_set_sensor_config *cfg;
|
||||
struct scmi_msg_sensor_trip_point_notify *cfg;
|
||||
|
||||
ret = scmi_xfer_get_init(handle, SENSOR_CONFIG_SET,
|
||||
ret = scmi_xfer_get_init(handle, SENSOR_TRIP_POINT_NOTIFY,
|
||||
SCMI_PROTOCOL_SENSOR, sizeof(*cfg), 0, &t);
|
||||
if (ret)
|
||||
return ret;
|
||||
@@ -183,15 +187,16 @@ scmi_sensor_configuration_set(const struct scmi_handle *handle, u32 sensor_id)
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int scmi_sensor_trip_point_set(const struct scmi_handle *handle,
|
||||
u32 sensor_id, u8 trip_id, u64 trip_value)
|
||||
static int
|
||||
scmi_sensor_trip_point_config(const struct scmi_handle *handle, u32 sensor_id,
|
||||
u8 trip_id, u64 trip_value)
|
||||
{
|
||||
int ret;
|
||||
u32 evt_cntl = SENSOR_TP_BOTH;
|
||||
struct scmi_xfer *t;
|
||||
struct scmi_msg_set_sensor_trip_point *trip;
|
||||
|
||||
ret = scmi_xfer_get_init(handle, SENSOR_TRIP_POINT_SET,
|
||||
ret = scmi_xfer_get_init(handle, SENSOR_TRIP_POINT_CONFIG,
|
||||
SCMI_PROTOCOL_SENSOR, sizeof(*trip), 0, &t);
|
||||
if (ret)
|
||||
return ret;
|
||||
@@ -209,11 +214,13 @@ static int scmi_sensor_trip_point_set(const struct scmi_handle *handle,
|
||||
}
|
||||
|
||||
static int scmi_sensor_reading_get(const struct scmi_handle *handle,
|
||||
u32 sensor_id, bool async, u64 *value)
|
||||
u32 sensor_id, u64 *value)
|
||||
{
|
||||
int ret;
|
||||
struct scmi_xfer *t;
|
||||
struct scmi_msg_sensor_reading_get *sensor;
|
||||
struct sensors_info *si = handle->sensor_priv;
|
||||
struct scmi_sensor_info *s = si->sensors + sensor_id;
|
||||
|
||||
ret = scmi_xfer_get_init(handle, SENSOR_READING_GET,
|
||||
SCMI_PROTOCOL_SENSOR, sizeof(*sensor),
|
||||
@@ -223,14 +230,18 @@ static int scmi_sensor_reading_get(const struct scmi_handle *handle,
|
||||
|
||||
sensor = t->tx.buf;
|
||||
sensor->id = cpu_to_le32(sensor_id);
|
||||
sensor->flags = cpu_to_le32(async ? SENSOR_READ_ASYNC : 0);
|
||||
|
||||
ret = scmi_do_xfer(handle, t);
|
||||
if (!ret) {
|
||||
__le32 *pval = t->rx.buf;
|
||||
|
||||
*value = le32_to_cpu(*pval);
|
||||
*value |= (u64)le32_to_cpu(*(pval + 1)) << 32;
|
||||
if (s->async) {
|
||||
sensor->flags = cpu_to_le32(SENSOR_READ_ASYNC);
|
||||
ret = scmi_do_xfer_with_response(handle, t);
|
||||
if (!ret)
|
||||
*value = get_unaligned_le64((void *)
|
||||
((__le32 *)t->rx.buf + 1));
|
||||
} else {
|
||||
sensor->flags = cpu_to_le32(0);
|
||||
ret = scmi_do_xfer(handle, t);
|
||||
if (!ret)
|
||||
*value = get_unaligned_le64(t->rx.buf);
|
||||
}
|
||||
|
||||
scmi_xfer_put(handle, t);
|
||||
@@ -255,8 +266,8 @@ static int scmi_sensor_count_get(const struct scmi_handle *handle)
|
||||
static struct scmi_sensor_ops sensor_ops = {
|
||||
.count_get = scmi_sensor_count_get,
|
||||
.info_get = scmi_sensor_info_get,
|
||||
.configuration_set = scmi_sensor_configuration_set,
|
||||
.trip_point_set = scmi_sensor_trip_point_set,
|
||||
.trip_point_notify = scmi_sensor_trip_point_notify,
|
||||
.trip_point_config = scmi_sensor_trip_point_config,
|
||||
.reading_get = scmi_sensor_reading_get,
|
||||
};
|
||||
|
||||
|
@@ -1,4 +1,15 @@
|
||||
# SPDX-License-Identifier: GPL-2.0-only
|
||||
config IMX_DSP
|
||||
bool "IMX DSP Protocol driver"
|
||||
depends on IMX_MBOX
|
||||
help
|
||||
This enables DSP IPC protocol between host AP (Linux)
|
||||
and the firmware running on DSP.
|
||||
DSP exists on some i.MX8 processors (e.g i.MX8QM, i.MX8QXP).
|
||||
|
||||
It acts like a doorbell. Client might use shared memory to
|
||||
exchange information with DSP side.
|
||||
|
||||
config IMX_SCU
|
||||
bool "IMX SCU Protocol driver"
|
||||
depends on IMX_MBOX
|
||||
|
@@ -1,3 +1,4 @@
|
||||
# SPDX-License-Identifier: GPL-2.0
|
||||
obj-$(CONFIG_IMX_DSP) += imx-dsp.o
|
||||
obj-$(CONFIG_IMX_SCU) += imx-scu.o misc.o imx-scu-irq.o
|
||||
obj-$(CONFIG_IMX_SCU_PD) += scu-pd.o
|
||||
|
155
drivers/firmware/imx/imx-dsp.c
Normal file
155
drivers/firmware/imx/imx-dsp.c
Normal file
@@ -0,0 +1,155 @@
|
||||
// SPDX-License-Identifier: GPL-2.0+
|
||||
/*
|
||||
* Copyright 2019 NXP
|
||||
* Author: Daniel Baluta <daniel.baluta@nxp.com>
|
||||
*
|
||||
* Implementation of the DSP IPC interface (host side)
|
||||
*/
|
||||
|
||||
#include <linux/firmware/imx/dsp.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/mailbox_client.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/of_platform.h>
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/slab.h>
|
||||
|
||||
/*
|
||||
* imx_dsp_ring_doorbell - triggers an interrupt on the other side (DSP)
|
||||
*
|
||||
* @dsp: DSP IPC handle
|
||||
* @chan_idx: index of the channel where to trigger the interrupt
|
||||
*
|
||||
* Returns non-negative value for success, negative value for error
|
||||
*/
|
||||
int imx_dsp_ring_doorbell(struct imx_dsp_ipc *ipc, unsigned int idx)
|
||||
{
|
||||
int ret;
|
||||
struct imx_dsp_chan *dsp_chan;
|
||||
|
||||
if (idx >= DSP_MU_CHAN_NUM)
|
||||
return -EINVAL;
|
||||
|
||||
dsp_chan = &ipc->chans[idx];
|
||||
ret = mbox_send_message(dsp_chan->ch, NULL);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(imx_dsp_ring_doorbell);
|
||||
|
||||
/*
|
||||
* imx_dsp_handle_rx - rx callback used by imx mailbox
|
||||
*
|
||||
* @c: mbox client
|
||||
* @msg: message received
|
||||
*
|
||||
* Users of DSP IPC will need to privde handle_reply and handle_request
|
||||
* callbacks.
|
||||
*/
|
||||
static void imx_dsp_handle_rx(struct mbox_client *c, void *msg)
|
||||
{
|
||||
struct imx_dsp_chan *chan = container_of(c, struct imx_dsp_chan, cl);
|
||||
|
||||
if (chan->idx == 0) {
|
||||
chan->ipc->ops->handle_reply(chan->ipc);
|
||||
} else {
|
||||
chan->ipc->ops->handle_request(chan->ipc);
|
||||
imx_dsp_ring_doorbell(chan->ipc, 1);
|
||||
}
|
||||
}
|
||||
|
||||
static int imx_dsp_probe(struct platform_device *pdev)
|
||||
{
|
||||
struct device *dev = &pdev->dev;
|
||||
struct imx_dsp_ipc *dsp_ipc;
|
||||
struct imx_dsp_chan *dsp_chan;
|
||||
struct mbox_client *cl;
|
||||
char *chan_name;
|
||||
int ret;
|
||||
int i, j;
|
||||
|
||||
device_set_of_node_from_dev(&pdev->dev, pdev->dev.parent);
|
||||
|
||||
dsp_ipc = devm_kzalloc(dev, sizeof(*dsp_ipc), GFP_KERNEL);
|
||||
if (!dsp_ipc)
|
||||
return -ENOMEM;
|
||||
|
||||
for (i = 0; i < DSP_MU_CHAN_NUM; i++) {
|
||||
if (i < 2)
|
||||
chan_name = kasprintf(GFP_KERNEL, "txdb%d", i);
|
||||
else
|
||||
chan_name = kasprintf(GFP_KERNEL, "rxdb%d", i - 2);
|
||||
|
||||
if (!chan_name)
|
||||
return -ENOMEM;
|
||||
|
||||
dsp_chan = &dsp_ipc->chans[i];
|
||||
cl = &dsp_chan->cl;
|
||||
cl->dev = dev;
|
||||
cl->tx_block = false;
|
||||
cl->knows_txdone = true;
|
||||
cl->rx_callback = imx_dsp_handle_rx;
|
||||
|
||||
dsp_chan->ipc = dsp_ipc;
|
||||
dsp_chan->idx = i % 2;
|
||||
dsp_chan->ch = mbox_request_channel_byname(cl, chan_name);
|
||||
if (IS_ERR(dsp_chan->ch)) {
|
||||
ret = PTR_ERR(dsp_chan->ch);
|
||||
if (ret != -EPROBE_DEFER)
|
||||
dev_err(dev, "Failed to request mbox chan %s ret %d\n",
|
||||
chan_name, ret);
|
||||
goto out;
|
||||
}
|
||||
|
||||
dev_dbg(dev, "request mbox chan %s\n", chan_name);
|
||||
/* chan_name is not used anymore by framework */
|
||||
kfree(chan_name);
|
||||
}
|
||||
|
||||
dsp_ipc->dev = dev;
|
||||
|
||||
dev_set_drvdata(dev, dsp_ipc);
|
||||
|
||||
dev_info(dev, "NXP i.MX DSP IPC initialized\n");
|
||||
|
||||
return devm_of_platform_populate(dev);
|
||||
out:
|
||||
kfree(chan_name);
|
||||
for (j = 0; j < i; j++) {
|
||||
dsp_chan = &dsp_ipc->chans[j];
|
||||
mbox_free_channel(dsp_chan->ch);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int imx_dsp_remove(struct platform_device *pdev)
|
||||
{
|
||||
struct imx_dsp_chan *dsp_chan;
|
||||
struct imx_dsp_ipc *dsp_ipc;
|
||||
int i;
|
||||
|
||||
dsp_ipc = dev_get_drvdata(&pdev->dev);
|
||||
|
||||
for (i = 0; i < DSP_MU_CHAN_NUM; i++) {
|
||||
dsp_chan = &dsp_ipc->chans[i];
|
||||
mbox_free_channel(dsp_chan->ch);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct platform_driver imx_dsp_driver = {
|
||||
.driver = {
|
||||
.name = "imx-dsp",
|
||||
},
|
||||
.probe = imx_dsp_probe,
|
||||
.remove = imx_dsp_remove,
|
||||
};
|
||||
builtin_platform_driver(imx_dsp_driver);
|
||||
|
||||
MODULE_AUTHOR("Daniel Baluta <daniel.baluta@nxp.com>");
|
||||
MODULE_DESCRIPTION("IMX DSP IPC protocol driver");
|
||||
MODULE_LICENSE("GPL v2");
|
@@ -92,7 +92,8 @@ static const struct imx_sc_pd_range imx8qxp_scu_pd_ranges[] = {
|
||||
{ "gpt", IMX_SC_R_GPT_0, 5, true, 0 },
|
||||
{ "kpp", IMX_SC_R_KPP, 1, false, 0 },
|
||||
{ "fspi", IMX_SC_R_FSPI_0, 2, true, 0 },
|
||||
{ "mu", IMX_SC_R_MU_0A, 14, true, 0 },
|
||||
{ "mu_a", IMX_SC_R_MU_0A, 14, true, 0 },
|
||||
{ "mu_b", IMX_SC_R_MU_13B, 1, true, 13 },
|
||||
|
||||
/* CONN SS */
|
||||
{ "usb", IMX_SC_R_USB_0, 2, true, 0 },
|
||||
@@ -130,6 +131,7 @@ static const struct imx_sc_pd_range imx8qxp_scu_pd_ranges[] = {
|
||||
{ "lcd0-pwm", IMX_SC_R_LCD_0_PWM_0, 1, true, 0 },
|
||||
{ "lpuart", IMX_SC_R_UART_0, 4, true, 0 },
|
||||
{ "lpspi", IMX_SC_R_SPI_0, 4, true, 0 },
|
||||
{ "irqstr_dsp", IMX_SC_R_IRQSTR_DSP, 1, false, 0 },
|
||||
|
||||
/* VPU SS */
|
||||
{ "vpu", IMX_SC_R_VPU, 1, false, 0 },
|
||||
|
@@ -9,6 +9,7 @@
|
||||
#include <linux/init.h>
|
||||
#include <linux/cpumask.h>
|
||||
#include <linux/export.h>
|
||||
#include <linux/dma-direct.h>
|
||||
#include <linux/dma-mapping.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/types.h>
|
||||
@@ -425,21 +426,23 @@ EXPORT_SYMBOL(qcom_scm_set_remote_state);
|
||||
* @mem_sz: size of the region.
|
||||
* @srcvm: vmid for current set of owners, each set bit in
|
||||
* flag indicate a unique owner
|
||||
* @newvm: array having new owners and corrsponding permission
|
||||
* @newvm: array having new owners and corresponding permission
|
||||
* flags
|
||||
* @dest_cnt: number of owners in next set.
|
||||
*
|
||||
* Return negative errno on failure, 0 on success, with @srcvm updated.
|
||||
* Return negative errno on failure or 0 on success with @srcvm updated.
|
||||
*/
|
||||
int qcom_scm_assign_mem(phys_addr_t mem_addr, size_t mem_sz,
|
||||
unsigned int *srcvm,
|
||||
struct qcom_scm_vmperm *newvm, int dest_cnt)
|
||||
const struct qcom_scm_vmperm *newvm,
|
||||
unsigned int dest_cnt)
|
||||
{
|
||||
struct qcom_scm_current_perm_info *destvm;
|
||||
struct qcom_scm_mem_map_info *mem_to_map;
|
||||
phys_addr_t mem_to_map_phys;
|
||||
phys_addr_t dest_phys;
|
||||
phys_addr_t ptr_phys;
|
||||
dma_addr_t ptr_dma;
|
||||
size_t mem_to_map_sz;
|
||||
size_t dest_sz;
|
||||
size_t src_sz;
|
||||
@@ -447,52 +450,50 @@ int qcom_scm_assign_mem(phys_addr_t mem_addr, size_t mem_sz,
|
||||
int next_vm;
|
||||
__le32 *src;
|
||||
void *ptr;
|
||||
int ret;
|
||||
int len;
|
||||
int i;
|
||||
int ret, i, b;
|
||||
unsigned long srcvm_bits = *srcvm;
|
||||
|
||||
src_sz = hweight_long(*srcvm) * sizeof(*src);
|
||||
src_sz = hweight_long(srcvm_bits) * sizeof(*src);
|
||||
mem_to_map_sz = sizeof(*mem_to_map);
|
||||
dest_sz = dest_cnt * sizeof(*destvm);
|
||||
ptr_sz = ALIGN(src_sz, SZ_64) + ALIGN(mem_to_map_sz, SZ_64) +
|
||||
ALIGN(dest_sz, SZ_64);
|
||||
|
||||
ptr = dma_alloc_coherent(__scm->dev, ptr_sz, &ptr_phys, GFP_KERNEL);
|
||||
ptr = dma_alloc_coherent(__scm->dev, ptr_sz, &ptr_dma, GFP_KERNEL);
|
||||
if (!ptr)
|
||||
return -ENOMEM;
|
||||
ptr_phys = dma_to_phys(__scm->dev, ptr_dma);
|
||||
|
||||
/* Fill source vmid detail */
|
||||
src = ptr;
|
||||
len = hweight_long(*srcvm);
|
||||
for (i = 0; i < len; i++) {
|
||||
src[i] = cpu_to_le32(ffs(*srcvm) - 1);
|
||||
*srcvm ^= 1 << (ffs(*srcvm) - 1);
|
||||
}
|
||||
i = 0;
|
||||
for_each_set_bit(b, &srcvm_bits, BITS_PER_LONG)
|
||||
src[i++] = cpu_to_le32(b);
|
||||
|
||||
/* Fill details of mem buff to map */
|
||||
mem_to_map = ptr + ALIGN(src_sz, SZ_64);
|
||||
mem_to_map_phys = ptr_phys + ALIGN(src_sz, SZ_64);
|
||||
mem_to_map[0].mem_addr = cpu_to_le64(mem_addr);
|
||||
mem_to_map[0].mem_size = cpu_to_le64(mem_sz);
|
||||
mem_to_map->mem_addr = cpu_to_le64(mem_addr);
|
||||
mem_to_map->mem_size = cpu_to_le64(mem_sz);
|
||||
|
||||
next_vm = 0;
|
||||
/* Fill details of next vmid detail */
|
||||
destvm = ptr + ALIGN(mem_to_map_sz, SZ_64) + ALIGN(src_sz, SZ_64);
|
||||
dest_phys = ptr_phys + ALIGN(mem_to_map_sz, SZ_64) + ALIGN(src_sz, SZ_64);
|
||||
for (i = 0; i < dest_cnt; i++) {
|
||||
destvm[i].vmid = cpu_to_le32(newvm[i].vmid);
|
||||
destvm[i].perm = cpu_to_le32(newvm[i].perm);
|
||||
destvm[i].ctx = 0;
|
||||
destvm[i].ctx_size = 0;
|
||||
next_vm |= BIT(newvm[i].vmid);
|
||||
for (i = 0; i < dest_cnt; i++, destvm++, newvm++) {
|
||||
destvm->vmid = cpu_to_le32(newvm->vmid);
|
||||
destvm->perm = cpu_to_le32(newvm->perm);
|
||||
destvm->ctx = 0;
|
||||
destvm->ctx_size = 0;
|
||||
next_vm |= BIT(newvm->vmid);
|
||||
}
|
||||
|
||||
ret = __qcom_scm_assign_mem(__scm->dev, mem_to_map_phys, mem_to_map_sz,
|
||||
ptr_phys, src_sz, dest_phys, dest_sz);
|
||||
dma_free_coherent(__scm->dev, ALIGN(ptr_sz, SZ_64), ptr, ptr_phys);
|
||||
dma_free_coherent(__scm->dev, ptr_sz, ptr, ptr_dma);
|
||||
if (ret) {
|
||||
dev_err(__scm->dev,
|
||||
"Assign memory protection call failed %d.\n", ret);
|
||||
"Assign memory protection call failed %d\n", ret);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
|
@@ -635,6 +635,7 @@ fail:
|
||||
|
||||
/**
|
||||
* ti_sci_cmd_get_device() - command to request for device managed by TISCI
|
||||
* that can be shared with other hosts.
|
||||
* @handle: Pointer to TISCI handle as retrieved by *ti_sci_get_handle
|
||||
* @id: Device Identifier
|
||||
*
|
||||
@@ -642,11 +643,29 @@ fail:
|
||||
* usage count by balancing get_device with put_device. No refcounting is
|
||||
* managed by driver for that purpose.
|
||||
*
|
||||
* NOTE: The request is for exclusive access for the processor.
|
||||
*
|
||||
* Return: 0 if all went fine, else return appropriate error.
|
||||
*/
|
||||
static int ti_sci_cmd_get_device(const struct ti_sci_handle *handle, u32 id)
|
||||
{
|
||||
return ti_sci_set_device_state(handle, id, 0,
|
||||
MSG_DEVICE_SW_STATE_ON);
|
||||
}
|
||||
|
||||
/**
|
||||
* ti_sci_cmd_get_device_exclusive() - command to request for device managed by
|
||||
* TISCI that is exclusively owned by the
|
||||
* requesting host.
|
||||
* @handle: Pointer to TISCI handle as retrieved by *ti_sci_get_handle
|
||||
* @id: Device Identifier
|
||||
*
|
||||
* Request for the device - NOTE: the client MUST maintain integrity of
|
||||
* usage count by balancing get_device with put_device. No refcounting is
|
||||
* managed by driver for that purpose.
|
||||
*
|
||||
* Return: 0 if all went fine, else return appropriate error.
|
||||
*/
|
||||
static int ti_sci_cmd_get_device_exclusive(const struct ti_sci_handle *handle,
|
||||
u32 id)
|
||||
{
|
||||
return ti_sci_set_device_state(handle, id,
|
||||
MSG_FLAG_DEVICE_EXCLUSIVE,
|
||||
@@ -665,6 +684,26 @@ static int ti_sci_cmd_get_device(const struct ti_sci_handle *handle, u32 id)
|
||||
* Return: 0 if all went fine, else return appropriate error.
|
||||
*/
|
||||
static int ti_sci_cmd_idle_device(const struct ti_sci_handle *handle, u32 id)
|
||||
{
|
||||
return ti_sci_set_device_state(handle, id, 0,
|
||||
MSG_DEVICE_SW_STATE_RETENTION);
|
||||
}
|
||||
|
||||
/**
|
||||
* ti_sci_cmd_idle_device_exclusive() - Command to idle a device managed by
|
||||
* TISCI that is exclusively owned by
|
||||
* requesting host.
|
||||
* @handle: Pointer to TISCI handle as retrieved by *ti_sci_get_handle
|
||||
* @id: Device Identifier
|
||||
*
|
||||
* Request for the device - NOTE: the client MUST maintain integrity of
|
||||
* usage count by balancing get_device with put_device. No refcounting is
|
||||
* managed by driver for that purpose.
|
||||
*
|
||||
* Return: 0 if all went fine, else return appropriate error.
|
||||
*/
|
||||
static int ti_sci_cmd_idle_device_exclusive(const struct ti_sci_handle *handle,
|
||||
u32 id)
|
||||
{
|
||||
return ti_sci_set_device_state(handle, id,
|
||||
MSG_FLAG_DEVICE_EXCLUSIVE,
|
||||
@@ -2894,7 +2933,9 @@ static void ti_sci_setup_ops(struct ti_sci_info *info)
|
||||
core_ops->reboot_device = ti_sci_cmd_core_reboot;
|
||||
|
||||
dops->get_device = ti_sci_cmd_get_device;
|
||||
dops->get_device_exclusive = ti_sci_cmd_get_device_exclusive;
|
||||
dops->idle_device = ti_sci_cmd_idle_device;
|
||||
dops->idle_device_exclusive = ti_sci_cmd_idle_device_exclusive;
|
||||
dops->put_device = ti_sci_cmd_put_device;
|
||||
|
||||
dops->is_valid = ti_sci_cmd_dev_is_valid;
|
||||
|
384
drivers/firmware/turris-mox-rwtm.c
Normal file
384
drivers/firmware/turris-mox-rwtm.c
Normal file
@@ -0,0 +1,384 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
/*
|
||||
* Turris Mox rWTM firmware driver
|
||||
*
|
||||
* Copyright (C) 2019 Marek Behun <marek.behun@nic.cz>
|
||||
*/
|
||||
|
||||
#include <linux/armada-37xx-rwtm-mailbox.h>
|
||||
#include <linux/completion.h>
|
||||
#include <linux/dma-mapping.h>
|
||||
#include <linux/hw_random.h>
|
||||
#include <linux/mailbox_client.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/mutex.h>
|
||||
#include <linux/of.h>
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/slab.h>
|
||||
|
||||
#define DRIVER_NAME "turris-mox-rwtm"
|
||||
|
||||
/*
|
||||
* The macros and constants below come from Turris Mox's rWTM firmware code.
|
||||
* This firmware is open source and it's sources can be found at
|
||||
* https://gitlab.labs.nic.cz/turris/mox-boot-builder/tree/master/wtmi.
|
||||
*/
|
||||
|
||||
#define MBOX_STS_SUCCESS (0 << 30)
|
||||
#define MBOX_STS_FAIL (1 << 30)
|
||||
#define MBOX_STS_BADCMD (2 << 30)
|
||||
#define MBOX_STS_ERROR(s) ((s) & (3 << 30))
|
||||
#define MBOX_STS_VALUE(s) (((s) >> 10) & 0xfffff)
|
||||
#define MBOX_STS_CMD(s) ((s) & 0x3ff)
|
||||
|
||||
enum mbox_cmd {
|
||||
MBOX_CMD_GET_RANDOM = 1,
|
||||
MBOX_CMD_BOARD_INFO = 2,
|
||||
MBOX_CMD_ECDSA_PUB_KEY = 3,
|
||||
MBOX_CMD_HASH = 4,
|
||||
MBOX_CMD_SIGN = 5,
|
||||
MBOX_CMD_VERIFY = 6,
|
||||
|
||||
MBOX_CMD_OTP_READ = 7,
|
||||
MBOX_CMD_OTP_WRITE = 8,
|
||||
};
|
||||
|
||||
struct mox_kobject;
|
||||
|
||||
struct mox_rwtm {
|
||||
struct device *dev;
|
||||
struct mbox_client mbox_client;
|
||||
struct mbox_chan *mbox;
|
||||
struct mox_kobject *kobj;
|
||||
struct hwrng hwrng;
|
||||
|
||||
struct armada_37xx_rwtm_rx_msg reply;
|
||||
|
||||
void *buf;
|
||||
dma_addr_t buf_phys;
|
||||
|
||||
struct mutex busy;
|
||||
struct completion cmd_done;
|
||||
|
||||
/* board information */
|
||||
int has_board_info;
|
||||
u64 serial_number;
|
||||
int board_version, ram_size;
|
||||
u8 mac_address1[6], mac_address2[6];
|
||||
|
||||
/* public key burned in eFuse */
|
||||
int has_pubkey;
|
||||
u8 pubkey[135];
|
||||
};
|
||||
|
||||
struct mox_kobject {
|
||||
struct kobject kobj;
|
||||
struct mox_rwtm *rwtm;
|
||||
};
|
||||
|
||||
static inline struct kobject *rwtm_to_kobj(struct mox_rwtm *rwtm)
|
||||
{
|
||||
return &rwtm->kobj->kobj;
|
||||
}
|
||||
|
||||
static inline struct mox_rwtm *to_rwtm(struct kobject *kobj)
|
||||
{
|
||||
return container_of(kobj, struct mox_kobject, kobj)->rwtm;
|
||||
}
|
||||
|
||||
static void mox_kobj_release(struct kobject *kobj)
|
||||
{
|
||||
kfree(to_rwtm(kobj)->kobj);
|
||||
}
|
||||
|
||||
static struct kobj_type mox_kobj_ktype = {
|
||||
.release = mox_kobj_release,
|
||||
.sysfs_ops = &kobj_sysfs_ops,
|
||||
};
|
||||
|
||||
static int mox_kobj_create(struct mox_rwtm *rwtm)
|
||||
{
|
||||
rwtm->kobj = kzalloc(sizeof(*rwtm->kobj), GFP_KERNEL);
|
||||
if (!rwtm->kobj)
|
||||
return -ENOMEM;
|
||||
|
||||
kobject_init(rwtm_to_kobj(rwtm), &mox_kobj_ktype);
|
||||
if (kobject_add(rwtm_to_kobj(rwtm), firmware_kobj, "turris-mox-rwtm")) {
|
||||
kobject_put(rwtm_to_kobj(rwtm));
|
||||
return -ENXIO;
|
||||
}
|
||||
|
||||
rwtm->kobj->rwtm = rwtm;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
#define MOX_ATTR_RO(name, format, cat) \
|
||||
static ssize_t \
|
||||
name##_show(struct kobject *kobj, struct kobj_attribute *a, \
|
||||
char *buf) \
|
||||
{ \
|
||||
struct mox_rwtm *rwtm = to_rwtm(kobj); \
|
||||
if (!rwtm->has_##cat) \
|
||||
return -ENODATA; \
|
||||
return sprintf(buf, format, rwtm->name); \
|
||||
} \
|
||||
static struct kobj_attribute mox_attr_##name = __ATTR_RO(name)
|
||||
|
||||
MOX_ATTR_RO(serial_number, "%016llX\n", board_info);
|
||||
MOX_ATTR_RO(board_version, "%i\n", board_info);
|
||||
MOX_ATTR_RO(ram_size, "%i\n", board_info);
|
||||
MOX_ATTR_RO(mac_address1, "%pM\n", board_info);
|
||||
MOX_ATTR_RO(mac_address2, "%pM\n", board_info);
|
||||
MOX_ATTR_RO(pubkey, "%s\n", pubkey);
|
||||
|
||||
static int mox_get_status(enum mbox_cmd cmd, u32 retval)
|
||||
{
|
||||
if (MBOX_STS_CMD(retval) != cmd ||
|
||||
MBOX_STS_ERROR(retval) != MBOX_STS_SUCCESS)
|
||||
return -EIO;
|
||||
else if (MBOX_STS_ERROR(retval) == MBOX_STS_FAIL)
|
||||
return -(int)MBOX_STS_VALUE(retval);
|
||||
else
|
||||
return MBOX_STS_VALUE(retval);
|
||||
}
|
||||
|
||||
static const struct attribute *mox_rwtm_attrs[] = {
|
||||
&mox_attr_serial_number.attr,
|
||||
&mox_attr_board_version.attr,
|
||||
&mox_attr_ram_size.attr,
|
||||
&mox_attr_mac_address1.attr,
|
||||
&mox_attr_mac_address2.attr,
|
||||
&mox_attr_pubkey.attr,
|
||||
NULL
|
||||
};
|
||||
|
||||
static void mox_rwtm_rx_callback(struct mbox_client *cl, void *data)
|
||||
{
|
||||
struct mox_rwtm *rwtm = dev_get_drvdata(cl->dev);
|
||||
struct armada_37xx_rwtm_rx_msg *msg = data;
|
||||
|
||||
rwtm->reply = *msg;
|
||||
complete(&rwtm->cmd_done);
|
||||
}
|
||||
|
||||
static void reply_to_mac_addr(u8 *mac, u32 t1, u32 t2)
|
||||
{
|
||||
mac[0] = t1 >> 8;
|
||||
mac[1] = t1;
|
||||
mac[2] = t2 >> 24;
|
||||
mac[3] = t2 >> 16;
|
||||
mac[4] = t2 >> 8;
|
||||
mac[5] = t2;
|
||||
}
|
||||
|
||||
static int mox_get_board_info(struct mox_rwtm *rwtm)
|
||||
{
|
||||
struct armada_37xx_rwtm_tx_msg msg;
|
||||
struct armada_37xx_rwtm_rx_msg *reply = &rwtm->reply;
|
||||
int ret;
|
||||
|
||||
msg.command = MBOX_CMD_BOARD_INFO;
|
||||
ret = mbox_send_message(rwtm->mbox, &msg);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
ret = wait_for_completion_timeout(&rwtm->cmd_done, HZ / 2);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
ret = mox_get_status(MBOX_CMD_BOARD_INFO, reply->retval);
|
||||
if (ret < 0 && ret != -ENODATA) {
|
||||
return ret;
|
||||
} else if (ret == -ENODATA) {
|
||||
dev_warn(rwtm->dev,
|
||||
"Board does not have manufacturing information burned!\n");
|
||||
} else {
|
||||
rwtm->serial_number = reply->status[1];
|
||||
rwtm->serial_number <<= 32;
|
||||
rwtm->serial_number |= reply->status[0];
|
||||
rwtm->board_version = reply->status[2];
|
||||
rwtm->ram_size = reply->status[3];
|
||||
reply_to_mac_addr(rwtm->mac_address1, reply->status[4],
|
||||
reply->status[5]);
|
||||
reply_to_mac_addr(rwtm->mac_address2, reply->status[6],
|
||||
reply->status[7]);
|
||||
rwtm->has_board_info = 1;
|
||||
|
||||
pr_info("Turris Mox serial number %016llX\n",
|
||||
rwtm->serial_number);
|
||||
pr_info(" board version %i\n", rwtm->board_version);
|
||||
pr_info(" burned RAM size %i MiB\n", rwtm->ram_size);
|
||||
}
|
||||
|
||||
msg.command = MBOX_CMD_ECDSA_PUB_KEY;
|
||||
ret = mbox_send_message(rwtm->mbox, &msg);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
ret = wait_for_completion_timeout(&rwtm->cmd_done, HZ / 2);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
ret = mox_get_status(MBOX_CMD_ECDSA_PUB_KEY, reply->retval);
|
||||
if (ret < 0 && ret != -ENODATA) {
|
||||
return ret;
|
||||
} else if (ret == -ENODATA) {
|
||||
dev_warn(rwtm->dev, "Board has no public key burned!\n");
|
||||
} else {
|
||||
u32 *s = reply->status;
|
||||
|
||||
rwtm->has_pubkey = 1;
|
||||
sprintf(rwtm->pubkey,
|
||||
"%06x%08x%08x%08x%08x%08x%08x%08x%08x%08x%08x%08x%08x%08x%08x%08x%08x",
|
||||
ret, s[0], s[1], s[2], s[3], s[4], s[5], s[6], s[7],
|
||||
s[8], s[9], s[10], s[11], s[12], s[13], s[14], s[15]);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int mox_hwrng_read(struct hwrng *rng, void *data, size_t max, bool wait)
|
||||
{
|
||||
struct mox_rwtm *rwtm = (struct mox_rwtm *) rng->priv;
|
||||
struct armada_37xx_rwtm_tx_msg msg;
|
||||
int ret;
|
||||
|
||||
if (max > 4096)
|
||||
max = 4096;
|
||||
|
||||
msg.command = MBOX_CMD_GET_RANDOM;
|
||||
msg.args[0] = 1;
|
||||
msg.args[1] = rwtm->buf_phys;
|
||||
msg.args[2] = (max + 3) & ~3;
|
||||
|
||||
if (!wait) {
|
||||
if (!mutex_trylock(&rwtm->busy))
|
||||
return -EBUSY;
|
||||
} else {
|
||||
mutex_lock(&rwtm->busy);
|
||||
}
|
||||
|
||||
ret = mbox_send_message(rwtm->mbox, &msg);
|
||||
if (ret < 0)
|
||||
goto unlock_mutex;
|
||||
|
||||
ret = wait_for_completion_interruptible(&rwtm->cmd_done);
|
||||
if (ret < 0)
|
||||
goto unlock_mutex;
|
||||
|
||||
ret = mox_get_status(MBOX_CMD_GET_RANDOM, rwtm->reply.retval);
|
||||
if (ret < 0)
|
||||
goto unlock_mutex;
|
||||
|
||||
memcpy(data, rwtm->buf, max);
|
||||
ret = max;
|
||||
|
||||
unlock_mutex:
|
||||
mutex_unlock(&rwtm->busy);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int turris_mox_rwtm_probe(struct platform_device *pdev)
|
||||
{
|
||||
struct mox_rwtm *rwtm;
|
||||
struct device *dev = &pdev->dev;
|
||||
int ret;
|
||||
|
||||
rwtm = devm_kzalloc(dev, sizeof(*rwtm), GFP_KERNEL);
|
||||
if (!rwtm)
|
||||
return -ENOMEM;
|
||||
|
||||
rwtm->dev = dev;
|
||||
rwtm->buf = dmam_alloc_coherent(dev, PAGE_SIZE, &rwtm->buf_phys,
|
||||
GFP_KERNEL);
|
||||
if (!rwtm->buf)
|
||||
return -ENOMEM;
|
||||
|
||||
ret = mox_kobj_create(rwtm);
|
||||
if (ret < 0) {
|
||||
dev_err(dev, "Cannot create turris-mox-rwtm kobject!\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = sysfs_create_files(rwtm_to_kobj(rwtm), mox_rwtm_attrs);
|
||||
if (ret < 0) {
|
||||
dev_err(dev, "Cannot create sysfs files!\n");
|
||||
goto put_kobj;
|
||||
}
|
||||
|
||||
platform_set_drvdata(pdev, rwtm);
|
||||
|
||||
mutex_init(&rwtm->busy);
|
||||
|
||||
rwtm->mbox_client.dev = dev;
|
||||
rwtm->mbox_client.rx_callback = mox_rwtm_rx_callback;
|
||||
|
||||
rwtm->mbox = mbox_request_channel(&rwtm->mbox_client, 0);
|
||||
if (IS_ERR(rwtm->mbox)) {
|
||||
ret = PTR_ERR(rwtm->mbox);
|
||||
if (ret != -EPROBE_DEFER)
|
||||
dev_err(dev, "Cannot request mailbox channel: %i\n",
|
||||
ret);
|
||||
goto remove_files;
|
||||
}
|
||||
|
||||
init_completion(&rwtm->cmd_done);
|
||||
|
||||
ret = mox_get_board_info(rwtm);
|
||||
if (ret < 0)
|
||||
dev_warn(dev, "Cannot read board information: %i\n", ret);
|
||||
|
||||
rwtm->hwrng.name = DRIVER_NAME "_hwrng";
|
||||
rwtm->hwrng.read = mox_hwrng_read;
|
||||
rwtm->hwrng.priv = (unsigned long) rwtm;
|
||||
rwtm->hwrng.quality = 1024;
|
||||
|
||||
ret = devm_hwrng_register(dev, &rwtm->hwrng);
|
||||
if (ret < 0) {
|
||||
dev_err(dev, "Cannot register HWRNG: %i\n", ret);
|
||||
goto free_channel;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
free_channel:
|
||||
mbox_free_channel(rwtm->mbox);
|
||||
remove_files:
|
||||
sysfs_remove_files(rwtm_to_kobj(rwtm), mox_rwtm_attrs);
|
||||
put_kobj:
|
||||
kobject_put(rwtm_to_kobj(rwtm));
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int turris_mox_rwtm_remove(struct platform_device *pdev)
|
||||
{
|
||||
struct mox_rwtm *rwtm = platform_get_drvdata(pdev);
|
||||
|
||||
sysfs_remove_files(rwtm_to_kobj(rwtm), mox_rwtm_attrs);
|
||||
kobject_put(rwtm_to_kobj(rwtm));
|
||||
mbox_free_channel(rwtm->mbox);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct of_device_id turris_mox_rwtm_match[] = {
|
||||
{ .compatible = "cznic,turris-mox-rwtm", },
|
||||
{ },
|
||||
};
|
||||
|
||||
MODULE_DEVICE_TABLE(of, turris_mox_rwtm_match);
|
||||
|
||||
static struct platform_driver turris_mox_rwtm_driver = {
|
||||
.probe = turris_mox_rwtm_probe,
|
||||
.remove = turris_mox_rwtm_remove,
|
||||
.driver = {
|
||||
.name = DRIVER_NAME,
|
||||
.of_match_table = turris_mox_rwtm_match,
|
||||
},
|
||||
};
|
||||
module_platform_driver(turris_mox_rwtm_driver);
|
||||
|
||||
MODULE_LICENSE("GPL v2");
|
||||
MODULE_DESCRIPTION("Turris Mox rWTM firmware driver");
|
||||
MODULE_AUTHOR("Marek Behun <marek.behun@nic.cz>");
|
Reference in New Issue
Block a user